From e97accb0ff0fdca0fde53a7859d531c163f86301 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 09:45:06 +0100 Subject: [PATCH 001/185] change from cache root pr --- client/db/src/lib.rs | 5 +++-- client/db/src/storage_cache.rs | 7 +++++-- client/src/client.rs | 2 +- client/src/in_mem.rs | 5 +++-- client/src/light/backend.rs | 2 +- primitives/state-machine/src/backend.rs | 18 ++++++++++++---- primitives/state-machine/src/ext.rs | 21 +++++-------------- .../state-machine/src/overlayed_changes.rs | 11 +++++++--- .../state-machine/src/proving_backend.rs | 3 ++- 9 files changed, 42 insertions(+), 32 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index be569194972cc..7e76c41d8bbd1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -594,7 +594,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc ); let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( + let (root, transaction, _) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| { if k == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( @@ -604,7 +604,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } (k, Some(v)) }), - child_delta + child_delta, + false, ); self.db_updates = transaction; diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index fd85a899b628e..e300ec8b29312 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -379,7 +379,7 @@ impl CacheChanges { } let mut modifications = HashSet::new(); let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| + child_changes.into_iter().for_each(|(sk, changes, _ci)| for (k, v) in changes.into_iter() { let k = (sk.clone(), k); if is_best { @@ -677,6 +677,9 @@ mod tests { type Block = RawBlock>; + const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_KEY_1); + #[test] fn smoke() { //init_log(); @@ -993,7 +996,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], CHILD_INFO_1.to_owned())], Some(h0), Some(0), true, diff --git a/client/src/client.rs b/client/src/client.rs index a3bbf84f7d725..2850ef9b417b2 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -1180,7 +1180,7 @@ impl Client where .trigger( ¬ify_import.hash, storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + storage_changes.1.into_iter().map(|(sk, v, _ci)| (sk, v.into_iter())), ); } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index dcff8102aeb6d..b28c46a3edbcc 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -519,9 +519,10 @@ impl backend::BlockImportOperation for BlockImportOperatio .map(|(storage_key, child_content)| (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); - let (root, transaction) = self.old_state.full_storage_root( + let (root, transaction, _) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta + child_delta, + false, ); self.new_state = Some(InMemoryBackend::from(transaction)); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index ad9f43587e4cd..34259ac895539 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -326,7 +326,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + let (storage_root, _, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, false); self.storage_update = Some(storage_update); Ok(storage_root) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4ef9b970ae21d..9ef9055a82a6e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -178,8 +178,9 @@ pub trait Backend: std::fmt::Debug { fn full_storage_root( &self, delta: I1, - child_deltas: I2) - -> (H::Out, Self::Transaction) + child_deltas: I2, + return_child_roots: bool, + ) -> (H::Out, Self::Transaction, Vec<(StorageKey, Option)>) where I1: IntoIterator)>, I2i: IntoIterator)>, @@ -188,22 +189,31 @@ pub trait Backend: std::fmt::Debug { { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); + let mut result_child_roots: Vec<_> = Default::default(); // child first for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); txs.consolidate(child_txs); if empty { + if return_child_roots { + result_child_roots.push((storage_key.clone(), None)); + } child_roots.push((storage_key, None)); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + if return_child_roots { + child_roots.push((storage_key.clone(), Some(child_root.encode()))); + result_child_roots.push((storage_key, Some(child_root))); + } else { + child_roots.push((storage_key, Some(child_root.encode()))); + } } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); - (root, txs) + (root, txs, result_child_roots) } /// Query backend usage statistics (i/o, memory) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index f293ae9f51615..9d70382bf4ccc 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -493,23 +493,22 @@ where ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); if self.storage_transaction_cache.transaction_storage_root.is_some() { - let root = self - .storage(storage_key.as_ref()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) + let root = self.storage_transaction_cache.transaction_child_storage_root.get(storage_key.as_ref()) + .map(|root| root.encode()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(storage_key.as_ref()).encode() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, HexDisplay::from(&storage_key.as_ref()), HexDisplay::from(&root.as_ref()), ); - root.encode() + root } else { let storage_key = storage_key.as_ref(); if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { - let (root, is_empty, _) = { + let (root, _is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) @@ -523,16 +522,6 @@ where }; let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(storage_key.into(), None); - } else { - self.overlay.set_storage(storage_key.into(), Some(root.clone())); - } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", self.id, diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index ed6f30a4f596b..a15e8c613d3d0 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -43,7 +43,7 @@ pub type StorageValue = Vec; pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; +pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, OwnedChildInfo)>; /// The overlayed changes to state to be queried on top of the backend. /// @@ -130,6 +130,8 @@ pub struct StorageTransactionCache { pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, + /// The child root storage root after applying the transaction. + pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. @@ -148,6 +150,7 @@ impl Default for StorageTransactionCache Self { transaction: None, transaction_storage_root: None, + transaction_child_storage_root: Default::default(), changes_trie_transaction: None, changes_trie_transaction_storage_root: None, } @@ -478,7 +481,8 @@ impl OverlayedChanges { Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), + child_storage_changes: child_storage_changes + .map(|(sk, it)| (sk, it.0.collect(), it.1)).collect(), transaction, transaction_storage_root, changes_trie_transaction, @@ -542,10 +546,11 @@ impl OverlayedChanges { let delta = self.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) .chain(self.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); + let (root, transaction, child_roots) = backend.full_storage_root(delta, child_delta_iter, true); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); + cache.transaction_child_storage_root = child_roots.into_iter().collect(); root } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 70124927fdd2e..cbec12476200d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -487,7 +487,8 @@ mod tests { let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) + in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())), + false, ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), From cf6393afa6923bb4fc04db70a86fa1a45a55a918 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 14:25:41 +0100 Subject: [PATCH 002/185] Targetted way of putting keyspace. Note that KeyspacedDB are still use. KeyspacedDBMut only for test. --- client/db/src/lib.rs | 31 ++- client/network/test/src/lib.rs | 2 +- client/state-db/src/lib.rs | 12 +- primitives/state-machine/Cargo.toml | 1 + .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 4 +- primitives/state-machine/src/lib.rs | 4 +- .../state-machine/src/proving_backend.rs | 6 +- primitives/state-machine/src/trie_backend.rs | 13 +- .../state-machine/src/trie_backend_essence.rs | 225 ++++++++++++------ primitives/trie/Cargo.toml | 1 + primitives/trie/src/lib.rs | 10 +- 12 files changed, 214 insertions(+), 97 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7e76c41d8bbd1..de8fb754f5859 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1308,11 +1308,32 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - for (key, val) in commit.data.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in commit.data.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); + let mut key_buffer = Vec::new(); + for child_data in commit.data.into_iter() { + if let Some(child_info) = child_data.info { + // children tries with prefixes + let keyspace = child_info.keyspace(); + let keyspace_len = keyspace.len(); + key_buffer.copy_from_slice[..keyspace_len] = keyspace; + for (key, val) in commit.data.inserted.into_iter() { + key_buffer.resize(keyspace_len + key.len()); + key_buffer[keyspace_len..].copy_from_slice(&key[..]); + transaction.put(columns::STATE, &key_buffer[..], &val); + } + for key in commit.data.deleted.into_iter() { + key_buffer.resize(keyspace_len + key.len()); + key_buffer[keyspace_len..].copy_from_slice(&key[..]); + transaction.delete(columns::STATE, &key_buffer[..]); + } + } else { + // top trie without prefixes + for (key, val) in commit.data.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in commit.data.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); + } + } } for (key, val) in commit.meta.inserted.into_iter() { transaction.put(columns::STATE_META, &key[..], &val); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 1e14ec7bb02c9..4dbddd77ddadb 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -392,7 +392,7 @@ impl TransactionPool for EmptyTransactionPool { fn on_broadcasted(&self, _: HashMap>) {} - fn transaction(&self, h: &Hash) -> Option { None } + fn transaction(&self, _: &Hash) -> Option { None } } pub trait SpecializationFactory { diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index f2722ae308068..bf9bfc58e5a88 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,6 +40,7 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; +use sp_core::storage::OwnedChildInfo; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -120,12 +121,21 @@ pub struct ChangeSet { pub deleted: Vec, } +/// A set of state node changes for a child trie. +#[derive(Debug, Clone)] +pub struct ChildTrieChangeSet { + /// Change set of this element. + pub data: ChangeSet, + /// Child trie descripton. + /// If not set, this is the top trie. + pub info: Option, +} /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { /// State node changes. - pub data: ChangeSet, + pub data: Vec>, /// Metadata changes. pub meta: ChangeSet>, } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index d390471aca2d6..78ab9c9156327 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -21,6 +21,7 @@ sp-externalities = { version = "0.8.0", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" +sp-trie = { version = "2.0.0", path = "../trie", features = ["test-helpers"] } [features] default = [] diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 12074b7261aa5..d57cf75e19ae0 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -165,7 +165,7 @@ pub trait Storage: RootsStorage { /// Changes trie storage -> trie backend essence adapter. pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7fb418672872b..9271eb87a8aa2 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -23,7 +23,7 @@ use sp_trie::MemoryDB; use parking_lot::RwLock; use crate::{ StorageKey, - trie_backend_essence::TrieBackendStorage, + trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; @@ -198,7 +198,7 @@ impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { } } -impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> +impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Number> where Number: BlockNumber, H: Hasher, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index bb62df6da4905..9c3925da6b50b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -70,7 +70,7 @@ pub use proving_backend::{ create_proof_check_backend, create_proof_check_backend_storage, merge_storage_proofs, ProofRecorder, ProvingBackend, ProvingBackendRecorder, StorageProof, }; -pub use trie_backend_essence::{TrieBackendStorage, Storage}; +pub use trie_backend_essence::{TrieBackendStorage, TrieBackendStorageRef, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::InMemory as InMemoryBackend; @@ -1026,7 +1026,7 @@ mod tests { ); } - #[test] + //#[test] TODO this will not make sense when child transaction get separated fn child_storage_uuid() { const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index cbec12476200d..5081104fdc15f 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -28,7 +28,7 @@ use sp_trie::{ pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; +use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage, TrieBackendStorageRef}; use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; @@ -132,7 +132,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, Ephemeral>( + read_trie_value_with::, _, Ephemeral>( &eph, self.backend.root(), key, @@ -238,7 +238,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef for ProofRecorderBackend<'a, S, H> { type Overlay = S::Overlay; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dbaae323c09f2..4676618c77026 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -24,7 +24,7 @@ use sp_core::storage::ChildInfo; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -128,8 +128,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = BackendStorageDBRef::new(self.essence.backend_storage()); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -152,8 +151,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &[u8]) -> Vec { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = BackendStorageDBRef::new(self.essence.backend_storage()); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -215,14 +213,15 @@ impl, H: Hasher> Backend for TrieBackend where }; { + let keyspaced_backend = (self.essence.backend_storage(), child_info.keyspace()); + // Do not write prefix in overlay. let mut eph = Ephemeral::new( - self.essence.backend_storage(), + &keyspaced_backend, &mut write_overlay, ); match child_delta_trie_root::, _, _, _, _, _>( storage_key, - child_info.keyspace(), &mut eph, root, delta diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 2598682ae0668..0e1943e47209d 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -19,11 +19,12 @@ use std::ops::Deref; use std::sync::Arc; +use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, default_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB}; + for_keys_in_child_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -36,12 +37,12 @@ pub trait Storage: Send + Sync { } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher> { +pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence where H::Out: Encode { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -102,11 +103,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: Option, key: &[u8], ) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { @@ -147,11 +144,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let map_e = |e| format!("Trie lookup error: {}", e); @@ -168,11 +161,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let root = self.storage(storage_key)? .unwrap_or(default_child_trie_root::>(storage_key).encode()); - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let map_e = |e| format!("Trie lookup error: {}", e); @@ -195,13 +184,9 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } }; - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); - if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( + if let Err(e) = for_keys_in_child_trie::, _, BackendStorageDBRef>( storage_key, child_info.keyspace(), &eph, @@ -244,11 +229,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, child_info: Option, ) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let mut iter = move |db| -> Result<(), Box>> { let trie = TrieDB::::new(db, root)?; @@ -286,13 +267,28 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } } -pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { +pub(crate) struct Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ storage: &'a S, - overlay: &'a mut S::Overlay, + overlay: &'a mut O, + _ph: PhantomData, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB - for Ephemeral<'a, S, H> +pub(crate) struct BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + storage: &'a S, + _ph: PhantomData, +} + +impl<'a, S, H, O> hash_db::AsPlainDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn as_plain_db<'b>(&'b self) -> &'b (dyn hash_db::PlainDB + 'b) { self } fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::PlainDB + 'b) { @@ -300,24 +296,67 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB, H: 'a + Hasher> hash_db::AsHashDB - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::AsHashDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { +impl<'a, S, H, O> Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + pub fn new(storage: &'a S, overlay: &'a mut O) -> Self { Ephemeral { storage, overlay, + _ph: PhantomData, } } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB - for Ephemeral<'a, S, H> +impl<'a, S, H> BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + pub fn new(storage: &'a S) -> Self { + BackendStorageDBRef { + storage, + _ph: PhantomData, + } + } +} + +impl<'a, S, H, O> hash_db::PlainDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + fn get(&self, key: &H::Out) -> Option { + hash_db::PlainDBRef::get(self, key) + } + + fn contains(&self, key: &H::Out) -> bool { + hash_db::PlainDBRef::contains(self, key) + } + + fn emplace(&mut self, key: H::Out, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, EMPTY_PREFIX, value) + } + + fn remove(&mut self, key: &H::Out) { + hash_db::HashDB::remove(self.overlay, key, EMPTY_PREFIX) + } +} + +impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out) -> Option { if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { @@ -334,27 +373,61 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB bool { - hash_db::HashDB::get(self, key, EMPTY_PREFIX).is_some() + hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() } +} - fn emplace(&mut self, key: H::Out, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, EMPTY_PREFIX, value) +impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + fn get(&self, key: &H::Out) -> Option { + match self.storage.get(&key, EMPTY_PREFIX) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } } - fn remove(&mut self, key: &H::Out) { - hash_db::HashDB::remove(self.overlay, key, EMPTY_PREFIX) + fn contains(&self, key: &H::Out) -> bool { + hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDBRef - for Ephemeral<'a, S, H> + +impl<'a, S, H, O> hash_db::HashDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { - fn get(&self, key: &H::Out) -> Option { hash_db::PlainDB::get(self, key) } - fn contains(&self, key: &H::Out) -> bool { hash_db::PlainDB::contains(self, key) } + + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + hash_db::HashDBRef::get(self, key, prefix) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + hash_db::HashDBRef::contains(self, key, prefix) + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + hash_db::HashDB::insert(self.overlay, prefix, value) + } + + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, prefix, value) + } + + fn remove(&mut self, key: &H::Out, prefix: Prefix) { + hash_db::HashDB::remove(self.overlay, key, prefix) + } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { @@ -371,44 +444,45 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - hash_db::HashDB::insert(self.overlay, prefix, value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, prefix, value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - hash_db::HashDB::remove(self.overlay, key, prefix) + hash_db::HashDBRef::get(self, key, prefix).is_some() } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef - for Ephemeral<'a, S, H> +impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + match self.storage.get(&key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + hash_db::HashDBRef::get(self, key, prefix).is_some() } } + /// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage: Send + Sync { +pub trait TrieBackendStorageRef { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } +/// Key-value pairs storage that is used by trie backend essence. +pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync { } + +impl + Send + Sync> TrieBackendStorage for B {} + // This implementation is used by normal storage trie clients. -impl TrieBackendStorage for Arc> { +impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -417,7 +491,7 @@ impl TrieBackendStorage for Arc> { } // This implementation is used by test storage trie clients. -impl TrieBackendStorage for PrefixedMemoryDB { +impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -425,7 +499,7 @@ impl TrieBackendStorage for PrefixedMemoryDB { } } -impl TrieBackendStorage for MemoryDB { +impl TrieBackendStorageRef for MemoryDB { type Overlay = MemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -433,6 +507,15 @@ impl TrieBackendStorage for MemoryDB { } } +impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for (&'a B, &'a [u8]) { + type Overlay = PrefixedMemoryDB; + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + let prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (prefix.0.as_slice(), prefix.1)) + } +} + #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index a78a26db736c4..6cbd19cd0f70b 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -28,6 +28,7 @@ hex-literal = "0.2.1" [features] default = ["std"] +test-helpers = [] std = [ "sp-std/std", "codec/std", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index c71d3fb84ce79..ca80c8dbd0370 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -235,7 +235,6 @@ pub fn child_trie_root( /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( _storage_key: &[u8], - keyspace: &[u8], db: &mut DB, root_data: RD, delta: I, @@ -253,8 +252,7 @@ pub fn child_delta_trie_root( root.as_mut().copy_from_slice(root_data.as_ref()); { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; for (key, change) in delta { match change { @@ -363,6 +361,7 @@ pub fn read_child_trie_value_with(&'a DB, &'a [u8], PhantomData); +#[cfg(feature="test-helpers")] /// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. /// @@ -371,7 +370,7 @@ pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. -fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { +pub fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; result[..ks.len()].copy_from_slice(ks); result[ks.len()..].copy_from_slice(prefix.0); @@ -387,6 +386,7 @@ impl<'a, DB, H> KeySpacedDB<'a, DB, H> where } } +#[cfg(feature="test-helpers")] impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where H: Hasher, { @@ -412,6 +412,7 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where } } +#[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, H: Hasher, @@ -443,6 +444,7 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } +#[cfg(feature="test-helpers")] // TODO see if can be deleted impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, H: Hasher, From 2845d0e1b1a83914848dba8fa13169597d059543 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 18:13:31 +0100 Subject: [PATCH 003/185] changes to state-db --- Cargo.lock | 1 + client/state-db/src/lib.rs | 50 +++-- client/state-db/src/noncanonical.rs | 297 ++++++++++++++++++---------- client/state-db/src/pruning.rs | 166 +++++++++++++--- client/state-db/src/test.rs | 42 ++-- primitives/storage/Cargo.toml | 3 +- primitives/storage/src/lib.rs | 6 +- 7 files changed, 401 insertions(+), 164 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecf929384ac59..e197367ca2049 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6764,6 +6764,7 @@ name = "sp-storage" version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", "sp-std 2.0.0", diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index bf9bfc58e5a88..adc038a0efaf4 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -140,6 +140,18 @@ pub struct CommitSet { pub meta: ChangeSet>, } +impl CommitSet { + /// Number of inserted key value element in the set. + pub fn inserted_len(&self) -> usize { + self.data.iter().map(|set| set.data.inserted.len()).sum() + } + + /// Number of deleted key value element in the set. + pub fn deleted_len(&self) -> usize { + self.data.iter().map(|set| set.data.deleted.len()).sum() + } +} + /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone, Eq, PartialEq)] pub struct Constraints { @@ -244,7 +256,13 @@ impl StateDbSync { } } - pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { + pub fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: Vec>, + ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. @@ -253,7 +271,9 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { - changeset.deleted.clear(); + for changeset in changeset.iter_mut() { + changeset.data.deleted.clear(); + } // write changes immediately Ok(CommitSet { data: changeset, @@ -278,7 +298,9 @@ impl StateDbSync { match self.non_canonical.canonicalize(&hash, &mut commit) { Ok(()) => { if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); + for commit in commit.data.iter_mut() { + commit.data.deleted.clear(); + } } } Err(e) => return Err(e), @@ -424,7 +446,13 @@ impl StateDb { } /// Add a new non-canonical block. - pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: Vec>, + ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } @@ -483,7 +511,7 @@ mod tests { use std::io; use sp_core::H256; use crate::{StateDb, PruningMode, Constraints}; - use crate::test::{make_db, make_changeset, TestDb}; + use crate::test::{make_db, make_childchangeset, TestDb}; fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); @@ -495,7 +523,7 @@ mod tests { &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), - make_changeset(&[1], &[91]), + make_childchangeset(&[1], &[91]), ) .unwrap(), ); @@ -505,7 +533,7 @@ mod tests { &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), - make_changeset(&[21], &[921, 1]), + make_childchangeset(&[21], &[921, 1]), ) .unwrap(), ); @@ -515,7 +543,7 @@ mod tests { &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), - make_changeset(&[22], &[922]), + make_childchangeset(&[22], &[922]), ) .unwrap(), ); @@ -525,7 +553,7 @@ mod tests { &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), - make_changeset(&[3], &[93]), + make_childchangeset(&[3], &[93]), ) .unwrap(), ); @@ -538,7 +566,7 @@ mod tests { &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), - make_changeset(&[4], &[94]), + make_childchangeset(&[4], &[94]), ) .unwrap(), ); @@ -609,7 +637,7 @@ mod tests { &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), - make_changeset(&[], &[]), + make_childchangeset(&[], &[]), ) .unwrap(), ); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 373c1aa0da076..1bb4fd0914210 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,13 +22,19 @@ use std::fmt; use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; +use super::{Error, DBValue, ChildTrieChangeSet, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; +use sp_core::storage::OwnedChildInfo; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; +// version at start to avoid collision when adding a unit +const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; +type Keys = Vec<(Option, Vec)>; +type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; + /// See module documentation. pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, @@ -39,49 +45,79 @@ pub struct NonCanonicalOverlay { values: HashMap, //ref counted //would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, - pinned_insertions: HashMap>, + pinned_insertions: HashMap>, } #[derive(Encode, Decode)] -struct JournalRecord { +struct JournalRecordCompat { hash: BlockHash, parent_hash: BlockHash, inserted: Vec<(Key, DBValue)>, deleted: Vec, } -fn to_journal_key(block: u64, index: u64) -> Vec { +#[derive(Encode, Decode)] +struct JournalRecordV1 { + hash: BlockHash, + parent_hash: BlockHash, + inserted: KeyVals, + deleted: Keys, +} + +impl From> for JournalRecordV1 { + // Note that this compatibility only works as long as the backend + // db strategy match the one from current implementation, that + // is for default child trie which use same state column as top. + fn from(old: JournalRecordCompat) -> Self { + JournalRecordV1 { + hash: old.hash, + parent_hash: old.parent_hash, + inserted: vec![(None, old.inserted)], + deleted: vec![(None, old.deleted)], + } + } +} + +fn to_old_journal_key(block: u64, index: u64) -> Vec { to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } +fn to_journal_key_v1(block: u64, index: u64) -> Vec { + to_meta_key(NON_CANONICAL_JOURNAL_V1, &(block, index)) +} + #[cfg_attr(test, derive(PartialEq, Debug))] struct BlockOverlay { hash: BlockHash, journal_key: Vec, - inserted: Vec, - deleted: Vec, + inserted: Keys, + deleted: Keys, } -fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; +fn insert_values(values: &mut HashMap, inserted: KeyVals) { + for (_ct, inserted) in inserted { + for (k, v) in inserted { + debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); + let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); + *counter += 1; + } } } -fn discard_values(values: &mut HashMap, inserted: Vec) { - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove_entry(); +fn discard_values(values: &mut HashMap, inserted: Keys) { + for inserted in inserted { + for k in inserted.1 { + match values.entry(k) { + Entry::Occupied(mut e) => { + let (ref mut counter, _) = e.get_mut(); + *counter -= 1; + if *counter == 0 { + e.remove_entry(); + } + }, + Entry::Vacant(_) => { + debug_assert!(false, "Trying to discard missing value"); } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); } } } @@ -93,7 +129,7 @@ fn discard_descendants( index: usize, parents: &mut HashMap, pinned: &HashMap, - pinned_insertions: &mut HashMap>, + pinned_insertions: &mut HashMap>, hash: &BlockHash, ) { let mut discarded = Vec::new(); @@ -142,26 +178,33 @@ impl NonCanonicalOverlay { let mut index: u64 = 0; let mut level = Vec::new(); loop { - let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; + let journal_key = to_journal_key_v1(block, index); + let record: JournalRecordV1 = match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => Decode::decode(&mut record.as_slice())?, + None => { + let journal_key = to_old_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecordCompat = Decode::decode(&mut record.as_slice())?; + record.into() + }, + None => break, + } }, - None => break, - } + }; + let inserted = record.inserted.iter().map(|(ct, rec)| (ct.clone(), rec.iter().map(|(k, _)| k.clone()).collect())).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; } if level.is_empty() { break; @@ -184,7 +227,13 @@ impl NonCanonicalOverlay { } /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: Vec>, + ) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { @@ -219,22 +268,39 @@ impl NonCanonicalOverlay { }; let index = level.len() as u64; - let journal_key = to_journal_key(number, index); + let journal_key = to_journal_key_v1(number, index); + + let mut inserted = Vec::with_capacity(changeset.len()); + let mut inserted_block = Vec::with_capacity(changeset.len()); + let mut deleted = Vec::with_capacity(changeset.len()); + for changeset in changeset.into_iter() { + inserted_block.push(( + changeset.info.clone(), + changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + )); + inserted.push(( + changeset.info.clone(), + changeset.data.inserted, + )); + deleted.push(( + changeset.info, + changeset.data.deleted, + )); + } - let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { hash: hash.clone(), journal_key: journal_key.clone(), - inserted: inserted, - deleted: changeset.deleted.clone(), + inserted: inserted_block, + deleted: deleted.clone(), }; level.push(overlay); self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecord { + let journal_record = JournalRecordV1 { hash: hash.clone(), parent_hash: parent_hash.clone(), - inserted: changeset.inserted, - deleted: changeset.deleted, + inserted, + deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); @@ -317,9 +383,26 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - commit.data.inserted.extend(overlay.inserted.iter() - .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); - commit.data.deleted.extend(overlay.deleted.clone()); + commit.data.extend(overlay.inserted.iter() + .map(|(ct, keys)| ChildTrieChangeSet { + info: ct.clone(), + data: ChangeSet { + inserted: keys.iter().map(|k| ( + k.clone(), + self.values.get(k) + .expect("For each key in overlays there's a value in values").1.clone(), + )).collect(), + deleted: Vec::new(), + }, + })); + commit.data.extend(overlay.deleted.iter().cloned() + .map(|(ct, keys)| ChildTrieChangeSet { + info: ct, + data: ChangeSet { + inserted: Vec::new(), + deleted: keys, + }, + })); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -471,9 +554,9 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; - use super::{NonCanonicalOverlay, to_journal_key}; - use crate::{ChangeSet, CommitSet}; - use crate::test::{make_db, make_changeset}; + use super::{NonCanonicalOverlay, to_journal_key_v1}; + use crate::CommitSet; + use crate::test::{make_db, make_childchangeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) @@ -504,8 +587,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 2, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h2, 1, &h1, Default::default()).unwrap(); } #[test] @@ -515,8 +598,8 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h2, 3, &h1, Default::default()).unwrap(); } #[test] @@ -526,8 +609,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h2, 2, &H256::default(), Default::default()).unwrap(); } #[test] @@ -537,7 +620,7 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); let mut commit = CommitSet::default(); overlay.canonicalize::(&h2, &mut commit).unwrap(); } @@ -547,17 +630,19 @@ mod tests { let h1 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[3, 4], &[2]); + let changeset = make_childchangeset(&[3, 4], &[2]); let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.data.inserted.len(), 0); - assert_eq!(insertion.data.deleted.len(), 0); + assert_eq!(insertion.inserted_len(), 0); + assert_eq!(insertion.deleted_len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); assert_eq!(insertion.meta.deleted.len(), 0); db.commit(&insertion); let mut finalization = CommitSet::default(); overlay.canonicalize::(&h1, &mut finalization).unwrap(); - assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); - assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); + let inserted_len = changeset.iter().map(|set| set.data.inserted.len()).sum(); + let deleted_len = changeset.iter().map(|set| set.data.deleted.len()).sum(); + assert_eq!(finalization.inserted_len(), inserted_len); + assert_eq!(finalization.deleted_len(), deleted_len); assert_eq!(finalization.meta.inserted.len(), 1); assert_eq!(finalization.meta.deleted.len(), 1); db.commit(&finalization); @@ -570,8 +655,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -586,8 +671,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&h1, &mut commit).unwrap(); db.commit(&commit); @@ -606,8 +691,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset1 = make_childchangeset(&[5, 6], &[2]); + let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); assert!(contains(&overlay, 5)); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); @@ -638,8 +723,8 @@ mod tests { #[test] fn insert_same_key() { let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -660,7 +745,7 @@ mod tests { let h3 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[], &[]); + let changeset = make_childchangeset(&[], &[]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); @@ -688,19 +773,19 @@ mod tests { // // 1_2_2 is the winner - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); - let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); + let (h_1_1, c_1_1) = (H256::random(), make_childchangeset(&[11], &[])); + let (h_1_2, c_1_2) = (H256::random(), make_childchangeset(&[12], &[])); + let (h_2_1, c_2_1) = (H256::random(), make_childchangeset(&[21], &[])); + let (h_2_2, c_2_2) = (H256::random(), make_childchangeset(&[22], &[])); - let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); + let (h_1_1_1, c_1_1_1) = (H256::random(), make_childchangeset(&[111], &[])); + let (h_1_2_1, c_1_2_1) = (H256::random(), make_childchangeset(&[121], &[])); + let (h_1_2_2, c_1_2_2) = (H256::random(), make_childchangeset(&[122], &[])); + let (h_1_2_3, c_1_2_3) = (H256::random(), make_childchangeset(&[123], &[])); + let (h_2_1_1, c_2_1_1) = (H256::random(), make_childchangeset(&[211], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -750,11 +835,11 @@ mod tests { assert!(contains(&overlay, 111)); assert!(!contains(&overlay, 211)); // check that journals are deleted - assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key_v1(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(2, 3)).unwrap().is_none()); // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); @@ -791,8 +876,8 @@ mod tests { let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); assert!(overlay.revert_one().is_none()); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset1 = make_childchangeset(&[5, 6], &[2]); + let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); assert!(contains(&overlay, 7)); @@ -813,9 +898,9 @@ mod tests { let h2_2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - let changeset3 = make_changeset(&[9], &[]); + let changeset1 = make_childchangeset(&[5, 6], &[2]); + let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); + let changeset3 = make_childchangeset(&[9], &[]); overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); assert!(contains(&overlay, 5)); overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); @@ -838,8 +923,8 @@ mod tests { // - 0 - 1_1 // \ 1_2 - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -866,9 +951,9 @@ mod tests { // \ 1_3 // 1_1 and 1_2 both make the same change - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_3, c_3) = (H256::random(), make_childchangeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -895,9 +980,9 @@ mod tests { // - 0 - 1_1 - 2_1 // \ 1_2 - let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); - let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); - let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); + let (h_11, c_11) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_12, c_12) = (H256::random(), make_childchangeset(&[], &[])); + let (h_21, c_21) = (H256::random(), make_childchangeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_11, 1, &H256::default(), c_11).unwrap()); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index a993df4f111ac..a680bfbb27139 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,16 +26,21 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; +use sp_core::storage::OwnedChildInfo; +use super::{ChildTrieChangeSet, ChangeSet}; const LAST_PRUNED: &[u8] = b"last_pruned"; -const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; +const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; +const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; + +type Keys = Vec<(Option, Vec)>; /// See module documentation. pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, + death_index: HashMap, HashMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -46,22 +51,63 @@ pub struct RefWindow { pending_prunings: usize, } +impl RefWindow { + fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { + if let Some(child_index) = self.death_index.get_mut(ct) { + child_index.remove(key) + } else { + None + } + } +} + #[derive(Debug, PartialEq, Eq)] struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashSet, + deleted: HashMap, HashSet>, +} + +impl DeathRow { + fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { + if let Some(child_index) = self.deleted.get_mut(ct) { + child_index.remove(key) + } else { + false + } + } } #[derive(Encode, Decode)] -struct JournalRecord { +struct JournalRecordCompat { hash: BlockHash, inserted: Vec, deleted: Vec, } -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) +#[derive(Encode, Decode)] +struct JournalRecordV1 { + hash: BlockHash, + inserted: Keys, + deleted: Keys, +} + +fn to_old_journal_key(block: u64) -> Vec { + to_meta_key(OLD_PRUNING_JOURNAL, &block) +} + +fn to_journal_key_v1(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL_V1, &block) +} + +impl From> for JournalRecordV1 { + fn from(old: JournalRecordCompat) -> Self { + JournalRecordV1 { + hash: old.hash, + inserted: vec![(None, old.inserted)], + deleted: vec![(None, old.deleted)], + } + } } impl RefWindow { @@ -83,37 +129,65 @@ impl RefWindow { // read the journal trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); loop { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + let journal_key = to_journal_key_v1(block); + let record: JournalRecordV1 = match db.get_meta(&journal_key) + .map_err(|e| Error::Db(e))? { + Some(record) => Decode::decode(&mut record.as_slice())?, + None => { + let journal_key = to_old_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => JournalRecordCompat::decode(&mut record.as_slice())?.into(), + None => break, + } }, - None => break, - } + }; + trace!( + target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", + block, + record.inserted.len(), + record.deleted.len(), + ); + pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); block += 1; } Ok(pruning) } - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { + fn import, Vec)>>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Keys, + ) { // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); + for (ct, inserted) in inserted { + for k in inserted { + if let Some(block) = self.remove_death_index(&ct, &k) { + self.death_rows[(block - self.pending_number) as usize] + .remove_deleted(&ct, &k); + } } } // add new keys let imported_block = self.pending_number + self.death_rows.len() as u64; - for k in deleted.iter() { - self.death_index.insert(k.clone(), imported_block); + for (ct, deleted) in deleted.iter() { + let entry = self.death_index.entry(ct.clone()).or_default(); + for k in deleted.iter() { + entry.insert(k.clone(), imported_block); + } + } + let mut deleted_death_row = HashMap::, HashSet>::new(); + for (ct, deleted) in deleted.into_iter() { + let entry = deleted_death_row.entry(ct).or_default(); + entry.extend(deleted); } + self.death_rows.push_back( DeathRow { hash: hash.clone(), - deleted: deleted.into_iter().collect(), + deleted: deleted_death_row, journal_key: journal_key, } ); @@ -144,7 +218,16 @@ impl RefWindow { if let Some(pruned) = self.death_rows.get(self.pending_prunings) { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - commit.data.deleted.extend(pruned.deleted.iter().cloned()); + + commit.data.extend(pruned.deleted.iter() + .map(|(ct, keys)| ChildTrieChangeSet { + info: ct.clone(), + data: ChangeSet { + inserted: Vec::new(), + deleted: keys.iter().cloned().collect(), + }, + })); + commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); self.pending_prunings += 1; @@ -155,16 +238,29 @@ impl RefWindow { /// Add a change set to the window. Creates a journal record and pushes it to `commit` pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); - let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); - let journal_record = JournalRecord { + trace!( + target: "state-db", + "Adding to pruning window: {:?} ({} inserted, {} deleted)", + hash, + commit.inserted_len(), + commit.deleted_len(), + ); + let inserted = commit.data.iter().map(|changeset| ( + changeset.info.clone(), + changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + )).collect(); + let deleted = commit.data.iter_mut().map(|changeset| ( + changeset.info.clone(), + ::std::mem::replace(&mut changeset.data.deleted, Vec::new()), + )).collect(); + + let journal_record = JournalRecordV1 { hash: hash.clone(), inserted, deleted, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); + let journal_key = to_old_journal_key(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); self.pending_canonicalizations += 1; @@ -176,8 +272,12 @@ impl RefWindow { for _ in 0 .. self.pending_prunings { let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for k in pruned.deleted.iter() { - self.death_index.remove(&k); + for (ct, deleted) in pruned.deleted.iter() { + if let Some(child_index) = self.death_index.get_mut(ct) { + for key in deleted.iter() { + child_index.remove(key); + } + } } self.pending_number += 1; } @@ -192,7 +292,11 @@ impl RefWindow { // deleted in case transaction fails and `revert_pending` is called. self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); let new_max_block = self.death_rows.len() as u64 + self.pending_number; - self.death_index.retain(|_, block| *block < new_max_block); + + self.death_index.retain(|_ct, child_index| { + child_index.retain(|_, block| *block < new_max_block); + !child_index.is_empty() + }); self.pending_canonicalizations = 0; self.pending_prunings = 0; } @@ -245,7 +349,7 @@ mod tests { assert!(pruning.have_block(&h)); pruning.apply_pending(); assert!(pruning.have_block(&h)); - assert!(commit.data.deleted.is_empty()); + assert_eq!(commit.deleted_len(), 0); assert_eq!(pruning.death_rows.len(), 1); assert_eq!(pruning.death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index accafa9bf831f..bb2a21219c6c9 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,11 +18,12 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSet}; +use sp_core::storage::OwnedChildInfo; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, + pub data: HashMap, HashMap>, pub meta: HashMap, DBValue>, } @@ -39,16 +40,23 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(key).cloned()) + Ok(self.data.get(&None).and_then(|data| data.get(key).cloned())) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - self.data.extend(commit.data.inserted.iter().cloned()); + for ct in commit.data.iter() { + self.data.entry(ct.info.clone()).or_default() + .extend(ct.data.inserted.iter().cloned()) + } self.meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.data.deleted.iter() { - self.data.remove(k); + for ct in commit.data.iter() { + if let Some(self_data) = self.data.get_mut(&ct.info) { + for k in ct.data.deleted.iter() { + self_data.remove(k); + } + } } self.meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { @@ -73,21 +81,29 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } } +pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> Vec> { + vec![ChildTrieChangeSet { + info: None, + data: make_changeset(inserted, deleted), + }] +} + pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { CommitSet { - data: make_changeset(inserted, deleted), + data: make_childchangeset(inserted, deleted), meta: ChangeSet::default(), } } pub fn make_db(inserted: &[u64]) -> TestDb { + let mut data = HashMap::new(); + data.insert(None, inserted.iter() + .map(|v| { + (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) + }) + .collect()); TestDb { - data: inserted - .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect(), + data, meta: Default::default(), } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 384519cc1d69d..21a51b0385ca1 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -10,7 +10,8 @@ sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index d32c54aae8c47..0407444e0055b 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,6 +18,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; @@ -184,7 +186,7 @@ pub enum ChildInfo<'a> { /// Owned version of `ChildInfo`. /// To be use in persistence layers. #[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] pub enum OwnedChildInfo { Default(OwnedChildTrie), } @@ -288,7 +290,7 @@ pub struct ChildTrie<'a> { /// Owned version of default child trie `ChildTrie`. #[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] pub struct OwnedChildTrie { /// See `ChildTrie` reference field documentation. data: Vec, From a0532d1a492cc4ad4403d0551f6bc6ee45b8b610 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 19:59:21 +0100 Subject: [PATCH 004/185] change transaction to be by child trie. --- client/api/src/backend.rs | 2 +- client/db/src/lib.rs | 70 ++++++++++++------- primitives/state-machine/src/backend.rs | 7 +- primitives/state-machine/src/lib.rs | 35 ---------- .../state-machine/src/proving_backend.rs | 11 ++- primitives/state-machine/src/trie_backend.rs | 24 ++++--- primitives/trie/src/lib.rs | 2 +- 7 files changed, 71 insertions(+), 80 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index a389af5671b32..d61034f5ad00b 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -24,7 +24,7 @@ use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HasherFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, + ChildStorageCollection, StorageCollection, }; use crate::{ blockchain::{ diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index de8fb754f5859..810991dd2387f 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -79,6 +79,7 @@ use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; pub use sc_state_db::PruningMode; +use sp_core::storage::OwnedChildInfo; #[cfg(feature = "test-helpers")] use sc_client::in_mem::Backend as InMemoryBackend; @@ -513,7 +514,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: PrefixedMemoryDB>, + db_updates: Vec<(Option, PrefixedMemoryDB>)>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -568,7 +569,10 @@ impl sc_client_api::backend::BlockImportOperation for Bloc // Currently cache isn't implemented on full nodes. } - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { + fn update_db_storage( + &mut self, + update: Vec<(Option, PrefixedMemoryDB>)>, + ) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -1103,26 +1107,30 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let mut changesets = Vec::new(); let mut ops: u64 = 0; let mut bytes: u64 = 0; - for (key, (val, rc)) in operation.db_updates.drain() { - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - - changeset.inserted.push((key, val.to_vec())); - } else if rc < 0 { - ops += 1; - bytes += key.len() as u64; - - changeset.deleted.push(key); + for (info, mut updates) in operation.db_updates.into_iter() { + let mut data: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + for (key, (val, rc)) in updates.drain() { + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + + data.inserted.push((key, val.to_vec())); + } else if rc < 0 { + ops += 1; + bytes += key.len() as u64; + + data.deleted.push(key); + } } + changesets.push(sc_state_db::ChildTrieChangeSet{ info, data }); } self.state_usage.tally_writes(ops, bytes); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) + let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changesets) .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; apply_state_commit(&mut transaction, commit); @@ -1312,25 +1320,26 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm for child_data in commit.data.into_iter() { if let Some(child_info) = child_data.info { // children tries with prefixes + let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); - key_buffer.copy_from_slice[..keyspace_len] = keyspace; - for (key, val) in commit.data.inserted.into_iter() { - key_buffer.resize(keyspace_len + key.len()); + key_buffer[..keyspace_len].copy_from_slice(keyspace); + for (key, val) in child_data.data.inserted.into_iter() { + key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.put(columns::STATE, &key_buffer[..], &val); } - for key in commit.data.deleted.into_iter() { - key_buffer.resize(keyspace_len + key.len()); + for key in child_data.data.deleted.into_iter() { + key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.delete(columns::STATE, &key_buffer[..]); } } else { // top trie without prefixes - for (key, val) in commit.data.inserted.into_iter() { + for (key, val) in child_data.data.inserted.into_iter() { transaction.put(columns::STATE, &key[..], &val); } - for key in commit.data.deleted.into_iter() { + for key in child_data.data.deleted.into_iter() { transaction.delete(columns::STATE, &key[..]); } } @@ -1378,7 +1387,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(BlockImportOperation { pending_block: None, old_state, - db_updates: PrefixedMemoryDB::default(), + db_updates: Default::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), changes_trie_config_update: None, @@ -1898,7 +1907,9 @@ pub(crate) mod tests { children: Default::default(), }).unwrap(); - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); + let mut map: PrefixedMemoryDB> = Default::default(); + key = map.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.push((None, map)); op.set_block_data( header, Some(vec![]), @@ -1934,8 +1945,11 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); + let mut map: PrefixedMemoryDB> = Default::default(); + map.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { + map.remove(&key, EMPTY_PREFIX); + }); op.set_block_data( header, Some(vec![]), @@ -1971,7 +1985,9 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { + map.remove(&key, EMPTY_PREFIX); + }); op.set_block_data( header, Some(vec![]), diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 9ef9055a82a6e..e3b1cdfe39be9 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -26,7 +26,7 @@ use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, + UsageInfo, StorageKey, StorageValue, }; /// A state backend is used to read state data and can have changes committed @@ -325,10 +325,7 @@ impl Consolidate for () { } } -impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, - StorageCollection, - )> { +impl Consolidate for Vec { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 9c3925da6b50b..173de031c5db7 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1025,39 +1025,4 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } - - //#[test] TODO this will not make sense when child transaction get separated - fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - use crate::trie_backend::tests::test_trie; - let mut overlay = OverlayedChanges::default(); - - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); - let mut transaction = { - let backend = test_trie(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); - cache.transaction.unwrap() - }; - let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { - // look for a key inserted twice: transaction rc is 2 - if *rc == 2 { - duplicate = true; - println!("test duplicate for {:?} {:?}", k, value); - } - } - assert!(!duplicate); - } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 5081104fdc15f..444cfc1eedd83 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -344,7 +344,12 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)> { - self.0.storage_root(delta) + let (root, mut tx) = self.0.storage_root(delta); + // This is hacky, it supposes we return a single child + // transaction. Next move should be to change proving backend + // transaction to not merge the child trie datas and use + // separate proof for each trie. + (root, tx.remove(0).1) } fn child_storage_root( @@ -357,7 +362,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(storage_key, child_info, delta) + let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); + (root, is_empty, tx.remove(0).1) } } @@ -445,6 +451,7 @@ mod tests { let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); + let mut trie_mdb = trie_mdb.remove(0).1; assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 4676618c77026..c0052ce77b271 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,14 +20,15 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, OwnedChildInfo}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; -/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +/// Patricia trie-based backend. Transaction type is overlays of changes to commit +/// for this trie and child tries. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, } @@ -71,7 +72,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = S::Overlay; + type Transaction = Vec<(Option, S::Overlay)>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -169,7 +170,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) + fn storage_root(&self, delta: I) -> (H::Out, Vec<(Option, S::Overlay)>) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -187,7 +188,7 @@ impl, H: Hasher> Backend for TrieBackend where } } - (root, write_overlay) + (root, vec![(None, write_overlay)]) } fn child_storage_root( @@ -233,7 +234,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - (root, is_default, write_overlay) + (root, is_default, vec![(Some(child_info.to_owned()), write_overlay)]) } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { @@ -324,13 +325,18 @@ pub mod tests { #[test] fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); + let tx = test_trie().storage_root(::std::iter::empty()).1; + for (_ct, mut tx) in tx.into_iter() { + assert!(tx.drain().is_empty()); + } } #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - assert!(!tx.drain().is_empty()); + let (new_root, tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + for (_ct, mut tx) in tx.into_iter() { + assert!(!tx.drain().is_empty()); + } assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index ca80c8dbd0370..fe8d7e66a6331 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -444,7 +444,7 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } -#[cfg(feature="test-helpers")] // TODO see if can be deleted +#[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, H: Hasher, From eb5961f54b06f96ecd70b8ca97b4c1207fd1dbe5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 20:42:34 +0100 Subject: [PATCH 005/185] slice index fix, many failing tests. --- client/db/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 810991dd2387f..96d49cc3d25f6 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1323,6 +1323,7 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); + key_buffer.resize(keyspace_len, 0); key_buffer[..keyspace_len].copy_from_slice(keyspace); for (key, val) in child_data.data.inserted.into_iter() { key_buffer.resize(keyspace_len + key.len(), 0); From 67687f8ec4cc36495f3912f263aa074a3e5c5e3a Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 21:33:04 +0100 Subject: [PATCH 006/185] fix state-db tests --- client/state-db/src/pruning.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index a680bfbb27139..f55d7bf1afee8 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -260,7 +260,7 @@ impl RefWindow { deleted, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_old_journal_key(block); + let journal_key = to_journal_key_v1(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); self.pending_canonicalizations += 1; @@ -351,7 +351,8 @@ mod tests { assert!(pruning.have_block(&h)); assert_eq!(commit.deleted_len(), 0); assert_eq!(pruning.death_rows.len(), 1); - assert_eq!(pruning.death_index.len(), 2); + let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); + assert_eq!(death_index_len, 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); @@ -363,7 +364,8 @@ mod tests { assert!(!pruning.have_block(&h)); assert!(db.data_eq(&make_db(&[2, 4, 5]))); assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); + let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); + assert!(death_index_len == 0); assert_eq!(pruning.pending_number, 1); } From 48df830d20b24f4745756e5d240333d156b334ef Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 29 Jan 2020 17:36:55 +0100 Subject: [PATCH 007/185] vec with multiple entry of a same rc prefixeddb did not make sense, switching to a btreemap. --- client/db/src/lib.rs | 21 ++++-------- primitives/state-machine/src/backend.rs | 32 +++++++++++++++++-- .../state-machine/src/proving_backend.rs | 17 ++++------ primitives/state-machine/src/trie_backend.rs | 14 +++++--- 4 files changed, 52 insertions(+), 32 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 96d49cc3d25f6..d42ef59285cb9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::HashMap; +use std::collections::{HashMap, BTreeMap}; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; @@ -514,7 +514,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: Vec<(Option, PrefixedMemoryDB>)>, + db_updates: BTreeMap, PrefixedMemoryDB>>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -571,7 +571,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_db_storage( &mut self, - update: Vec<(Option, PrefixedMemoryDB>)>, + update: BTreeMap, PrefixedMemoryDB>>, ) -> ClientResult<()> { self.db_updates = update; Ok(()) @@ -1908,9 +1908,7 @@ pub(crate) mod tests { children: Default::default(), }).unwrap(); - let mut map: PrefixedMemoryDB> = Default::default(); - key = map.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.push((None, map)); + key = op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -1946,11 +1944,8 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - let mut map: PrefixedMemoryDB> = Default::default(); - map.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { - map.remove(&key, EMPTY_PREFIX); - }); + op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); + op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -1986,9 +1981,7 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { - map.remove(&key, EMPTY_PREFIX); - }); + op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index e3b1cdfe39be9..cd8a69f3f2d2a 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -22,11 +22,11 @@ use codec::Encode; use sp_core::storage::{ChildInfo, OwnedChildInfo}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; - +use std::collections::{BTreeMap, btree_map::Entry}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, + UsageInfo, StorageKey, StorageValue, StorageCollection, }; /// A state backend is used to read state data and can have changes committed @@ -325,12 +325,38 @@ impl Consolidate for () { } } -impl Consolidate for Vec { +impl Consolidate for Vec<( + Option<(StorageKey, OwnedChildInfo)>, + StorageCollection, + )> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } } +impl Consolidate for BTreeMap { + fn consolidate(&mut self, other: Self) { + for (k, v) in other.into_iter() { + match self.entry(k) { + Entry::Occupied(mut e) => e.get_mut().consolidate(v), + Entry::Vacant(e) => { e.insert(v); }, + } + } + } +} + +impl Consolidate for Option { + fn consolidate(&mut self, other: Self) { + if let Some(v) = self.as_mut() { + if let Some(other) = other { + v.consolidate(other); + } + } else { + *self = other; + } + } +} + impl> Consolidate for sp_trie::GenericMemoryDB { fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 444cfc1eedd83..6d5b45596a7ed 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -268,7 +268,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord + Codec, { type Error = String; - type Transaction = S::Overlay; + type Transaction = Option; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -345,11 +345,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> where I: IntoIterator, Option>)> { let (root, mut tx) = self.0.storage_root(delta); - // This is hacky, it supposes we return a single child - // transaction. Next move should be to change proving backend - // transaction to not merge the child trie datas and use - // separate proof for each trie. - (root, tx.remove(0).1) + // We may rather want to return a btreemap + (root, tx.remove(&None)) } fn child_storage_root( @@ -363,7 +360,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord { let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); - (root, is_empty, tx.remove(0).1) + (root, is_empty, tx.remove(&Some(child_info.to_owned()))) } } @@ -449,10 +446,10 @@ mod tests { assert_eq!(trie_backend.pairs(), proving_backend.pairs()); let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - let mut trie_mdb = trie_mdb.remove(0).1; - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + let mut trie_mdb = trie_mdb.remove(&None).unwrap(); + assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); } #[test] diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index c0052ce77b271..9d17043de7e18 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -21,6 +21,7 @@ use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use std::collections::BTreeMap; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -72,7 +73,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = Vec<(Option, S::Overlay)>; + type Transaction = BTreeMap, S::Overlay>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -170,7 +171,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, Vec<(Option, S::Overlay)>) + fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -187,8 +188,9 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } - - (root, vec![(None, write_overlay)]) + let mut tx = BTreeMap::new(); + tx.insert(None, write_overlay); + (root, tx) } fn child_storage_root( @@ -234,7 +236,9 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - (root, is_default, vec![(Some(child_info.to_owned()), write_overlay)]) + let mut tx = BTreeMap::new(); + tx.insert(Some(child_info.to_owned()), write_overlay); + (root, is_default, tx) } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { From cb4c4a96ff2c30eca88f56a8c351bedaa89f10b9 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 29 Jan 2020 20:20:48 +0100 Subject: [PATCH 008/185] change set to btreemap, seems useless (at least do no solve changetrie issue). --- client/db/src/lib.rs | 25 +++++++++++------- client/state-db/src/lib.rs | 34 ++++++++++++++++++------ client/state-db/src/noncanonical.rs | 40 ++++++++++++++--------------- client/state-db/src/pruning.rs | 20 +++++++-------- client/state-db/src/test.rs | 19 +++++++------- 5 files changed, 81 insertions(+), 57 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d42ef59285cb9..651e54593f0e2 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, BTreeMap}; +use std::collections::{HashMap, BTreeMap, btree_map::Entry}; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; @@ -1107,7 +1107,7 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = Vec::new(); + let mut changesets = BTreeMap::new(); let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { @@ -1125,7 +1125,14 @@ impl Backend { data.deleted.push(key); } } - changesets.push(sc_state_db::ChildTrieChangeSet{ info, data }); + match changesets.entry(info) { + Entry::Vacant(e) => { e.insert(data); }, + Entry::Occupied(mut e) => { + let e = e.get_mut(); + e.inserted.extend(data.inserted); + e.deleted.extend(data.deleted); + }, + } } self.state_usage.tally_writes(ops, bytes); @@ -1316,31 +1323,31 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut key_buffer = Vec::new(); + let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { - if let Some(child_info) = child_data.info { + if let Some(child_info) = child_data.0 { // children tries with prefixes let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); key_buffer[..keyspace_len].copy_from_slice(keyspace); - for (key, val) in child_data.data.inserted.into_iter() { + for (key, val) in child_data.1.inserted.into_iter() { key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.put(columns::STATE, &key_buffer[..], &val); } - for key in child_data.data.deleted.into_iter() { + for key in child_data.1.deleted.into_iter() { key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.delete(columns::STATE, &key_buffer[..]); } } else { // top trie without prefixes - for (key, val) in child_data.data.inserted.into_iter() { + for (key, val) in child_data.1.inserted.into_iter() { transaction.put(columns::STATE, &key[..], &val); } - for key in child_data.data.deleted.into_iter() { + for key in child_data.1.deleted.into_iter() { transaction.delete(columns::STATE, &key[..]); } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index adc038a0efaf4..18098af8b882c 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -36,7 +36,7 @@ mod pruning; use std::fmt; use parking_lot::RwLock; use codec::Codec; -use std::collections::{HashMap, hash_map::Entry}; +use std::collections::{BTreeMap, HashMap, hash_map::Entry, btree_map::Entry as BEntry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; @@ -122,6 +122,7 @@ pub struct ChangeSet { } /// A set of state node changes for a child trie. +/// TODO remove?? #[derive(Debug, Clone)] pub struct ChildTrieChangeSet { /// Change set of this element. @@ -131,11 +132,28 @@ pub struct ChildTrieChangeSet { pub info: Option, } +/// Change sets of all child trie (top is key None). +pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; + +/// Extends for `ChildTrieChangeSets` is merging. +fn extend_change_sets(set: &mut ChildTrieChangeSets, other: impl Iterator, ChangeSet)>) { + for (ci, o_cs) in other { + match set.entry(ci) { + BEntry::Occupied(mut e) => { + let entry = e.get_mut(); + entry.inserted.extend(o_cs.inserted); + entry.deleted.extend(o_cs.deleted); + }, + BEntry::Vacant(e) => { e.insert(o_cs); }, + } + } +} + /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { /// State node changes. - pub data: Vec>, + pub data: ChildTrieChangeSets, /// Metadata changes. pub meta: ChangeSet>, } @@ -143,12 +161,12 @@ pub struct CommitSet { impl CommitSet { /// Number of inserted key value element in the set. pub fn inserted_len(&self) -> usize { - self.data.iter().map(|set| set.data.inserted.len()).sum() + self.data.iter().map(|set| set.1.inserted.len()).sum() } /// Number of deleted key value element in the set. pub fn deleted_len(&self) -> usize { - self.data.iter().map(|set| set.data.deleted.len()).sum() + self.data.iter().map(|set| set.1.deleted.len()).sum() } } @@ -261,7 +279,7 @@ impl StateDbSync { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - mut changeset: Vec>, + mut changeset: ChildTrieChangeSets, ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { @@ -272,7 +290,7 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { for changeset in changeset.iter_mut() { - changeset.data.deleted.clear(); + changeset.1.deleted.clear(); } // write changes immediately Ok(CommitSet { @@ -299,7 +317,7 @@ impl StateDbSync { Ok(()) => { if self.mode == PruningMode::ArchiveCanonical { for commit in commit.data.iter_mut() { - commit.data.deleted.clear(); + commit.1.deleted.clear(); } } } @@ -451,7 +469,7 @@ impl StateDb { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - changeset: Vec>, + changeset: ChildTrieChangeSets, ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 1bb4fd0914210..93f26245e28a5 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,7 +22,7 @@ use std::fmt; use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChildTrieChangeSet, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; +use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; use sp_core::storage::OwnedChildInfo; @@ -232,7 +232,7 @@ impl NonCanonicalOverlay { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - changeset: Vec>, + changeset: ChildTrieChangeSets, ) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); @@ -275,16 +275,16 @@ impl NonCanonicalOverlay { let mut deleted = Vec::with_capacity(changeset.len()); for changeset in changeset.into_iter() { inserted_block.push(( - changeset.info.clone(), - changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + changeset.0.clone(), + changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), )); inserted.push(( - changeset.info.clone(), - changeset.data.inserted, + changeset.0.clone(), + changeset.1.inserted, )); deleted.push(( - changeset.info, - changeset.data.deleted, + changeset.0, + changeset.1.deleted, )); } @@ -383,10 +383,10 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - commit.data.extend(overlay.inserted.iter() - .map(|(ct, keys)| ChildTrieChangeSet { - info: ct.clone(), - data: ChangeSet { + crate::extend_change_sets(&mut commit.data, overlay.inserted.iter() + .map(|(ct, keys)| ( + ct.clone(), + ChangeSet { inserted: keys.iter().map(|k| ( k.clone(), self.values.get(k) @@ -394,15 +394,15 @@ impl NonCanonicalOverlay { )).collect(), deleted: Vec::new(), }, - })); - commit.data.extend(overlay.deleted.iter().cloned() - .map(|(ct, keys)| ChildTrieChangeSet { - info: ct, - data: ChangeSet { + ))); + crate::extend_change_sets(&mut commit.data, overlay.deleted.iter().cloned() + .map(|(ct, keys)| ( + ct, + ChangeSet { inserted: Vec::new(), deleted: keys, }, - })); + ))); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -639,8 +639,8 @@ mod tests { db.commit(&insertion); let mut finalization = CommitSet::default(); overlay.canonicalize::(&h1, &mut finalization).unwrap(); - let inserted_len = changeset.iter().map(|set| set.data.inserted.len()).sum(); - let deleted_len = changeset.iter().map(|set| set.data.deleted.len()).sum(); + let inserted_len = changeset.iter().map(|set| set.1.inserted.len()).sum(); + let deleted_len = changeset.iter().map(|set| set.1.deleted.len()).sum(); assert_eq!(finalization.inserted_len(), inserted_len); assert_eq!(finalization.deleted_len(), deleted_len); assert_eq!(finalization.meta.inserted.len(), 1); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index f55d7bf1afee8..fdf5dec0515b7 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -27,7 +27,7 @@ use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; use sp_core::storage::OwnedChildInfo; -use super::{ChildTrieChangeSet, ChangeSet}; +use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -219,14 +219,14 @@ impl RefWindow { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - commit.data.extend(pruned.deleted.iter() - .map(|(ct, keys)| ChildTrieChangeSet { - info: ct.clone(), - data: ChangeSet { + crate::extend_change_sets(&mut commit.data, pruned.deleted.iter() + .map(|(ct, keys)| ( + ct.clone(), + ChangeSet { inserted: Vec::new(), deleted: keys.iter().cloned().collect(), }, - })); + ))); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); @@ -246,12 +246,12 @@ impl RefWindow { commit.deleted_len(), ); let inserted = commit.data.iter().map(|changeset| ( - changeset.info.clone(), - changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + changeset.0.clone(), + changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), )).collect(); let deleted = commit.data.iter_mut().map(|changeset| ( - changeset.info.clone(), - ::std::mem::replace(&mut changeset.data.deleted, Vec::new()), + changeset.0.clone(), + ::std::mem::replace(&mut changeset.1.deleted, Vec::new()), )).collect(); let journal_record = JournalRecordV1 { diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index bb2a21219c6c9..c7be13fb15595 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,7 +18,7 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSet}; +use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; use sp_core::storage::OwnedChildInfo; #[derive(Default, Debug, Clone, PartialEq, Eq)] @@ -47,13 +47,13 @@ impl NodeDb for TestDb { impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { for ct in commit.data.iter() { - self.data.entry(ct.info.clone()).or_default() - .extend(ct.data.inserted.iter().cloned()) + self.data.entry(ct.0.clone()).or_default() + .extend(ct.1.inserted.iter().cloned()) } self.meta.extend(commit.meta.inserted.iter().cloned()); for ct in commit.data.iter() { - if let Some(self_data) = self.data.get_mut(&ct.info) { - for k in ct.data.deleted.iter() { + if let Some(self_data) = self.data.get_mut(&ct.0) { + for k in ct.1.deleted.iter() { self_data.remove(k); } } @@ -81,11 +81,10 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } } -pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> Vec> { - vec![ChildTrieChangeSet { - info: None, - data: make_changeset(inserted, deleted), - }] +pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { + let mut result = ChildTrieChangeSets::new(); + result.insert(None, make_changeset(inserted, deleted)); + result } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { From a398b826fba66574ba99eb6a1441e06c021fe33e Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 30 Jan 2020 10:18:32 +0100 Subject: [PATCH 009/185] moving get_1 to get, state-machine needs rework --- client/db/src/changes_tries_storage.rs | 6 +- client/db/src/lib.rs | 19 +++-- client/src/client.rs | 6 +- client/state-db/src/lib.rs | 20 ++++-- client/state-db/src/noncanonical.rs | 44 +++++++----- .../state-machine/src/changes_trie/mod.rs | 15 +++- .../state-machine/src/changes_trie/storage.rs | 19 ++++- .../state-machine/src/proving_backend.rs | 10 ++- .../state-machine/src/trie_backend_essence.rs | 69 +++++++++++++++---- 9 files changed, 160 insertions(+), 48 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 72163a5694213..ab8c7465badd1 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -486,7 +486,11 @@ where self.build_cache.read().with_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + _prefix: Prefix, + ) -> Result, String> { self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d42ef59285cb9..73ee6737c2955 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -666,9 +666,14 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { let key = prefixed_key::>(key, prefix); - self.state_db.get(&key, self) + self.state_db.get(trie, &key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -694,7 +699,12 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { + fn get( + &self, + _trie: Option, + _key: &Block::Hash, + _prefix: Prefix, + ) -> Result, String> { Ok(None) } } @@ -1316,7 +1326,7 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut key_buffer = Vec::new(); + let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { if let Some(child_info) = child_data.info { // children tries with prefixes @@ -1640,6 +1650,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(Some(header)) => { sp_state_machine::Storage::get( self.storage.as_ref(), + None, // header in top trie &header.state_root(), (&[], None), ).unwrap_or(None).is_some() diff --git a/client/src/client.rs b/client/src/client.rs index 2850ef9b417b2..118487c4e7b72 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -564,7 +564,11 @@ impl Client where self.storage.with_cached_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { self.storage.get(key, prefix) } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index adc038a0efaf4..a54d73d4ab0f3 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,7 +40,7 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::{OwnedChildInfo, ChildInfo}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -400,10 +400,15 @@ impl StateDbSync { } } - pub fn get(&self, key: &Key, db: &D) -> Result, Error> + pub fn get( + &self, + trie: Option, + key: &Key, + db: &D, + ) -> Result, Error> where Key: AsRef { - if let Some(value) = self.non_canonical.get(key) { + if let Some(value) = self.non_canonical.get(trie, key) { return Ok(Some(value)); } db.get(key.as_ref()).map_err(|e| Error::Db(e)) @@ -472,10 +477,15 @@ impl StateDb { } /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get(&self, key: &Key, db: &D) -> Result, Error> + pub fn get( + &self, + trie: Option, + key: &Key, + db: &D, + ) -> Result, Error> where Key: AsRef { - self.db.read().get(key, db) + self.db.read().get(trie, key, db) } /// Revert all non-canonical blocks with the best block number. diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 1bb4fd0914210..67c2ba6e19f6b 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -21,11 +21,11 @@ //! `revert_pending` use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry}; +use std::collections::{HashMap, VecDeque, hash_map::Entry, BTreeMap}; use super::{Error, DBValue, ChildTrieChangeSet, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::{ChildInfo, OwnedChildInfo}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; // version at start to avoid collision when adding a unit @@ -34,6 +34,7 @@ const LAST_CANONICAL: &[u8] = b"last_canonical"; type Keys = Vec<(Option, Vec)>; type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; +type ChildKeyVals = BTreeMap, HashMap>; /// See module documentation. pub struct NonCanonicalOverlay { @@ -42,7 +43,7 @@ pub struct NonCanonicalOverlay { parents: HashMap, pending_canonicalizations: Vec, pending_insertions: Vec, - values: HashMap, //ref counted + values: ChildKeyVals, //ref counted //would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, pinned_insertions: HashMap>, @@ -94,8 +95,12 @@ struct BlockOverlay { deleted: Keys, } -fn insert_values(values: &mut HashMap, inserted: KeyVals) { - for (_ct, inserted) in inserted { +fn insert_values( + values: &mut ChildKeyVals, + inserted: KeyVals, +) { + for (ct, inserted) in inserted { + let values = values.entry(ct).or_default(); for (k, v) in inserted { debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); @@ -104,9 +109,10 @@ fn insert_values(values: &mut HashMap, inserted: } } -fn discard_values(values: &mut HashMap, inserted: Keys) { - for inserted in inserted { - for k in inserted.1 { +fn discard_values(values: &mut ChildKeyVals, inserted: Keys) { + for (ct, inserted) in inserted { + let values = values.entry(ct).or_default(); + for k in inserted { match values.entry(k) { Entry::Occupied(mut e) => { let (ref mut counter, _) = e.get_mut(); @@ -125,7 +131,7 @@ fn discard_values(values: &mut HashMap, inserted fn discard_descendants( levels: &mut VecDeque>>, - mut values: &mut HashMap, + mut values: &mut ChildKeyVals, index: usize, parents: &mut HashMap, pinned: &HashMap, @@ -168,7 +174,7 @@ impl NonCanonicalOverlay { }; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); - let mut values = HashMap::new(); + let mut values = BTreeMap::new(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); @@ -196,7 +202,7 @@ impl NonCanonicalOverlay { let overlay = BlockOverlay { hash: record.hash.clone(), journal_key, - inserted: inserted, + inserted, deleted: record.deleted, }; insert_values(&mut values, record.inserted); @@ -389,7 +395,10 @@ impl NonCanonicalOverlay { data: ChangeSet { inserted: keys.iter().map(|k| ( k.clone(), - self.values.get(k) + self.values + .get(ct) + .expect("For each key in overlays there's a value in values") + .get(k) .expect("For each key in overlays there's a value in values").1.clone(), )).collect(), deleted: Vec::new(), @@ -451,9 +460,12 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, key: &Key) -> Option { - if let Some((_, value)) = self.values.get(&key) { - return Some(value.clone()); + pub fn get(&self, trie: Option, key: &Key) -> Option { + // TODO make storage over data representation of OwnedChildInfo to use borrow + if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { + if let Some((_, value)) = values.get(&key) { + return Some(value.clone()); + } } None } @@ -559,7 +571,7 @@ mod tests { use crate::test::{make_db, make_childchangeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(None, &H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index d57cf75e19ae0..f3e0ae1159ba5 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,6 +71,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; +use sp_core::storage::{OwnedChildInfo, ChildInfo}; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -159,7 +160,11 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Changes trie storage -> trie backend essence adapter. @@ -168,7 +173,13 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + assert!(trie.is_none(), "Change trie is using a single top trie"); self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 9271eb87a8aa2..b875a3cc70c92 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -26,6 +26,7 @@ use crate::{ trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; +use sp_core::storage::ChildInfo; #[cfg(test)] use crate::backend::insert_into_memory_db; @@ -187,8 +188,14 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + fn get( + &self, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // Change trie is a default top trie. + let trie = None; + MemoryDB::::get(&self.data.read().mdb, trie, key, prefix) } } @@ -205,7 +212,13 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + assert!(trie.is_none(), "Change trie is a single top trie"); self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6d5b45596a7ed..80e292310102b 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -243,11 +243,17 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // TODO switch proof model too (use a trie) if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(trie, key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0e1943e47209d..06db5c946d6da 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -27,13 +27,18 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_child_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, OwnedChildInfo}; use codec::Encode; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Patricia trie-based pairs storage essence. @@ -359,10 +364,12 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out) -> Option { + // TODO need new trait with ct as parameter!!! if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - match self.storage.get(&key, EMPTY_PREFIX) { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -382,7 +389,8 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, H: 'a + Hasher, { fn get(&self, key: &H::Out) -> Option { - match self.storage.get(&key, EMPTY_PREFIX) { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -432,8 +440,9 @@ impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> wher fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) - } else { - match self.storage.get(&key, prefix) { + } else { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -453,7 +462,8 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - match self.storage.get(&key, prefix) { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -473,7 +483,12 @@ pub trait TrieBackendStorageRef { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Key-value pairs storage that is used by trie backend essence. @@ -485,8 +500,13 @@ impl + Send + Sync> TrieBackendStorage impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - Storage::::get(self.deref(), key, prefix) + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + Storage::::get(self.deref(), trie, key, prefix) } } @@ -494,7 +514,14 @@ impl TrieBackendStorageRef for Arc> { impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // TODO should we split prefixed memory db too?? -> likely yes: sharing + // rc does not make sense -> change type of PrefixedMemoryDB. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -502,17 +529,31 @@ impl TrieBackendStorageRef for PrefixedMemoryDB { impl TrieBackendStorageRef for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // TODO should we split prefixed memory db too?? -> likely yes: sharing + // rc does not make sense -> change type of PrefixedMemoryDB. + // This could be mergde with prefixed impl through genericmemorydb Ok(hash_db::HashDB::get(self, key, prefix)) } } +// TODO remove : should not be used anymore. impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for (&'a B, &'a [u8]) { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { let prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (prefix.0.as_slice(), prefix.1)) + self.0.get(trie, key, (prefix.0.as_slice(), prefix.1)) } } From 7b26a93634ecf1ea527cd479dd58956c8bdfabfa Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 30 Jan 2020 14:47:11 +0100 Subject: [PATCH 010/185] Resolve a bit of child trie --- client/db/src/lib.rs | 6 +- .../state-machine/src/changes_trie/mod.rs | 3 - .../state-machine/src/changes_trie/storage.rs | 7 +- .../state-machine/src/in_memory_backend.rs | 5 +- .../state-machine/src/proving_backend.rs | 3 +- primitives/state-machine/src/trie_backend.rs | 67 +++++-- .../state-machine/src/trie_backend_essence.rs | 188 ++++++------------ primitives/storage/src/lib.rs | 2 +- primitives/trie/src/lib.rs | 88 ++------ 9 files changed, 143 insertions(+), 226 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 73ee6737c2955..52441a3666eaf 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -92,7 +92,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HasherFor + (Arc>>, Option), HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -1601,7 +1601,7 @@ impl sc_client_api::backend::Backend for Backend { BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::::new(Arc::new(genesis_storage), root); + let db_state = DbState::::new((Arc::new(genesis_storage), None), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1620,7 +1620,7 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root(); - let db_state = DbState::::new(self.storage.clone(), *root); + let db_state = DbState::::new((self.storage.clone(), None), *root); let state = RefTrackingState::new( db_state, self.storage.clone(), diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index f3e0ae1159ba5..77fbd2f17ad14 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,7 +71,6 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; -use sp_core::storage::{OwnedChildInfo, ChildInfo}; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -175,11 +174,9 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBack fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - assert!(trie.is_none(), "Change trie is using a single top trie"); self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index b875a3cc70c92..7e4a79548d78a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -26,7 +26,6 @@ use crate::{ trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; -use sp_core::storage::ChildInfo; #[cfg(test)] use crate::backend::insert_into_memory_db; @@ -193,9 +192,7 @@ impl Storage for InMemoryStorage Result, String> { - // Change trie is a default top trie. - let trie = None; - MemoryDB::::get(&self.data.read().mdb, trie, key, prefix) + MemoryDB::::get(&self.data.read().mdb, key, prefix) } } @@ -214,11 +211,9 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - assert!(trie.is_none(), "Change trie is a single top trie"); self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 0a29468bbc4ef..4dd50a74828a1 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,7 +24,7 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; use sp_trie::{ - MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; @@ -268,8 +268,7 @@ impl Backend for InMemory where H::Out: Codec { .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); - let root = child_trie_root::, _, _, _>( - &storage_key, + let root = Layout::::trie_root( existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 80e292310102b..3f925e252ccc6 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -245,7 +245,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { @@ -253,7 +252,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(trie, key, prefix)?; + let backend_value = self.backend.get(key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 9d17043de7e18..2daee660a8246 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -15,17 +15,16 @@ // along with Substrate. If not, see . //! Trie-based state machine backend. - use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; use std::collections::BTreeMap; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef, ChildTrieBackendStorage}, }; /// Patricia trie-based backend. Transaction type is overlays of changes to commit @@ -86,7 +85,13 @@ impl, H: Hasher> Backend for TrieBackend where child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.child_storage(storage_key, child_info, key) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + essence.storage(key) + } else { + Ok(None) + } } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -99,7 +104,13 @@ impl, H: Hasher> Backend for TrieBackend where child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.next_child_storage_key(storage_key, child_info, key) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + essence.next_storage_key(key) + } else { + Ok(None) + } } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -116,7 +127,11 @@ impl, H: Hasher> Backend for TrieBackend where child_info: ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(storage_key, child_info, f) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + essence.for_keys(f) + } } fn for_child_keys_with_prefix( @@ -126,7 +141,11 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + essence.for_keys_with_prefix(prefix, f) + } } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -206,7 +225,7 @@ impl, H: Hasher> Backend for TrieBackend where let default_root = default_child_trie_root::>(storage_key); let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { + let mut root: H::Out = match self.storage(storage_key) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -216,15 +235,16 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let keyspaced_backend = (self.essence.backend_storage(), child_info.keyspace()); + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info), &mut buf); // Do not write prefix in overlay. let mut eph = Ephemeral::new( - &keyspaced_backend, + &child_essence, &mut write_overlay, ); - match child_delta_trie_root::, _, _, _, _, _>( - storage_key, + match delta_trie_root::, _, _, _, _>( &mut eph, root, delta @@ -246,6 +266,29 @@ impl, H: Hasher> Backend for TrieBackend where } } +impl, H: Hasher> TrieBackend where + H::Out: Ord + Codec, +{ + fn child_essence<'a>( + &'a self, + storage_key: &[u8], + child_info: ChildInfo<'a>, + buffer: &'a mut Vec, + ) -> Result, H>>, >::Error> { + let root: Option = self.storage(storage_key)? + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + Ok(if let Some(root) = root { + Some(TrieBackendEssence::new(ChildTrieBackendStorage::new( + self.essence.backend_storage(), + Some(child_info), + buffer, + ), root)) + } else { + None + }) + } +} + #[cfg(test)] pub mod tests { use std::collections::HashSet; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 06db5c946d6da..07bba94106d3f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,8 +23,8 @@ use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - default_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, keyspace_as_prefix_alloc}; + read_trie_value, + for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; @@ -74,51 +74,9 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - self.next_storage_key_from_root(&self.root, None, key) - } - - /// Return the next key in the child trie i.e. the minimum key that is strictly superior to - /// `key` in lexicographic order. - pub fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, String> { - let child_root = match self.storage(storage_key)? { - Some(child_root) => child_root, - None => return Ok(None), - }; - - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", storage_key)); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - - self.next_storage_key_from_root(&hash, Some(child_info), key) - } - - /// Return next key from main trie or child trie by providing corresponding root. - fn next_storage_key_from_root( - &self, - root: &H::Out, - child_info: Option, - key: &[u8], - ) -> Result, String> { let eph = BackendStorageDBRef::new(&self.storage); - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; - let keyspace_eph; - if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); - dyn_eph = &keyspace_eph; - } else { - dyn_eph = &eph; - } - let trie = TrieDB::::new(dyn_eph, root) + let trie = TrieDB::::new(&eph, &self.root) .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -156,72 +114,22 @@ impl, H: Hasher> TrieBackendEssence where H::O read_trie_value::, _>(&eph, &self.root, key).map_err(map_e) } - /// Get the value of child storage at given key. - pub fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, String> { - let root = self.storage(storage_key)? - .unwrap_or(default_child_trie_root::>(storage_key).encode()); - - let eph = BackendStorageDBRef::new(&self.storage); - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) - .map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( + /// Retrieve all entries keys of storage and call `f` for each of those keys. + pub fn for_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, f: F, ) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let eph = BackendStorageDBRef::new(&self.storage); - if let Err(e) = for_keys_in_child_trie::, _, BackendStorageDBRef>( - storage_key, - child_info.keyspace(), + if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( &eph, - &root, + &self.root, f, ) { debug!(target: "trie", "Error while iterating child storage: {}", e); } } - /// Execute given closure for all keys starting with prefix. - pub fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - mut f: F, - ) { - let root_vec = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) - } - /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) @@ -368,8 +276,7 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, EMPTY_PREFIX) { + match self.storage.get(&key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -389,8 +296,7 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, H: 'a + Hasher, { fn get(&self, key: &H::Out) -> Option { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, EMPTY_PREFIX) { + match self.storage.get(&key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -441,8 +347,7 @@ impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> wher if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, prefix) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -462,8 +367,7 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, prefix) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -485,7 +389,6 @@ pub trait TrieBackendStorageRef { /// Get the value stored at key. fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -497,63 +400,88 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. -impl TrieBackendStorageRef for Arc> { +impl TrieBackendStorageRef for (Arc>, Option) { type Overlay = PrefixedMemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - Storage::::get(self.deref(), trie, key, prefix) + let child_info = self.1.as_ref(); + Storage::::get(self.0.deref(), child_info.map(|c| c.as_ref()), key, prefix) } } -// This implementation is used by test storage trie clients. -impl TrieBackendStorageRef for PrefixedMemoryDB { + +/// This is an essence for the child trie backend. +pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { + db: &'a B, + info: Option>, + buffer: &'a mut Vec, + _ph: PhantomData, +} + +impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { + /// Instantiate a `ChildTrieBackendStorage`. + pub fn new(db: &'a B, info: Option>, buffer: &'a mut Vec) -> Self { + ChildTrieBackendStorage { + db, + info, + buffer, + _ph: PhantomData, + } + } +} + +impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for ChildTrieBackendStorage<'a, H, B> { type Overlay = PrefixedMemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - // TODO should we split prefixed memory db too?? -> likely yes: sharing - // rc does not make sense -> change type of PrefixedMemoryDB. - Ok(hash_db::HashDB::get(self, key, prefix)) + if let Some(keyspace) = self.info.as_ref().map(|ci| ci.keyspace()) { + // TODO switch to &mut self like in overlay pr and use commented code + /*self.buffer.resize(keyspace.len() + prefix.0.len(), 0); + self.buffer[..keyspace.len()].copy_from_slice(keyspace); + self.buffer[keyspace.len()..].copy_from_slice(prefix.0); + self.db.get(key, (self.buffer.as_slice(), prefix.1))*/ + + let prefix = keyspace_as_prefix_alloc(keyspace, prefix); + self.db.get(key, (prefix.0.as_slice(), prefix.1)) + } else { + self.db.get(key, prefix) + } } } -impl TrieBackendStorageRef for MemoryDB { - type Overlay = MemoryDB; + +// This implementation is used by test storage trie clients. +impl TrieBackendStorageRef for PrefixedMemoryDB { + type Overlay = PrefixedMemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { // TODO should we split prefixed memory db too?? -> likely yes: sharing // rc does not make sense -> change type of PrefixedMemoryDB. - // This could be mergde with prefixed impl through genericmemorydb Ok(hash_db::HashDB::get(self, key, prefix)) } } -// TODO remove : should not be used anymore. -impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for (&'a B, &'a [u8]) { - type Overlay = PrefixedMemoryDB; +impl TrieBackendStorageRef for MemoryDB { + type Overlay = MemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - let prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(trie, key, (prefix.0.as_slice(), prefix.1)) + Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -562,6 +490,8 @@ mod test { use sp_core::{Blake2Hasher, H256}; use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; + use crate::trie_backend::TrieBackend; + use crate::backend::Backend; #[test] fn next_storage_key_and_next_child_storage_key_work() { @@ -592,7 +522,7 @@ mod test { trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackend::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -601,7 +531,7 @@ mod test { assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 0407444e0055b..34e7f0ead6d18 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -113,7 +113,7 @@ pub mod well_known_keys { /// /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. /// - /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. + /// `trie_root` can panic if invalid value is provided to them. pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); if has_right_prefix { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fe8d7e66a6331..788c3627b2af4 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -217,72 +217,45 @@ pub fn default_child_trie_root( L::trie_root::<_, Vec, Vec>(core::iter::empty()) } -/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - _storage_key: &[u8], - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, -{ - L::trie_root(input) -} - -/// Determine a child trie root given a hash DB and delta values. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_delta_trie_root( +/// Call `f` for all keys in a child trie. +pub fn for_keys_in_child_trie( _storage_key: &[u8], - db: &mut DB, - root_data: RD, - delta: I, -) -> Result<::Out, Box>> + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + mut f: F +) -> Result<(), Box>> where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB - + hash_db::PlainDB, trie_db::DBValue>, + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_data.as_ref()); + root.as_mut().copy_from_slice(root_slice); - { - let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + let db = KeySpacedDB::new(&*db, keyspace); + let trie = TrieDB::::new(&db, &root)?; + let iter = trie.iter()?; - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } + for x in iter { + let (key, _) = x?; + f(&key); } - Ok(root) + Ok(()) } /// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( - _storage_key: &[u8], - keyspace: &[u8], +pub fn for_keys_in_trie( db: &DB, - root_slice: &[u8], + root: &TrieHash, mut f: F ) -> Result<(), Box>> where DB: hash_db::HashDBRef + hash_db::PlainDBRef, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - let trie = TrieDB::::new(&db, &root)?; + let trie = TrieDB::::new(&*db, &root)?; let iter = trie.iter()?; for x in iter { @@ -293,6 +266,7 @@ pub fn for_keys_in_child_trie( Ok(()) } + /// Record all keys for a given root. pub fn record_all_keys( db: &DB, @@ -316,26 +290,6 @@ pub fn record_all_keys( Ok(()) } -/// Read a value from the child trie. -pub fn read_child_trie_value( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) -} - /// Read a value from the child trie with given query. pub fn read_child_trie_value_with, DB>( _storage_key: &[u8], From f39ce3f3d894b6dce2adcc2a024a93612285971d Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 30 Jan 2020 17:46:19 +0100 Subject: [PATCH 011/185] small refact --- client/db/src/lib.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index bf6bac25beec5..2729825c63c92 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1117,11 +1117,11 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = BTreeMap::new(); + let mut changesets = BTreeMap::<_, sc_state_db::ChangeSet>>::new(); let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { - let mut data: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let data = changesets.entry(info).or_default(); for (key, (val, rc)) in updates.drain() { if rc > 0 { ops += 1; @@ -1135,14 +1135,6 @@ impl Backend { data.deleted.push(key); } } - match changesets.entry(info) { - Entry::Vacant(e) => { e.insert(data); }, - Entry::Occupied(mut e) => { - let e = e.get_mut(); - e.inserted.extend(data.inserted); - e.deleted.extend(data.deleted); - }, - } } self.state_usage.tally_writes(ops, bytes); From 9f0c600c7f4c4bafebc482ee9f3c0d2a6c29fc48 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 31 Jan 2020 09:35:11 +0100 Subject: [PATCH 012/185] indent --- client/db/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2729825c63c92..e011c0373efb8 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, BTreeMap, btree_map::Entry}; +use std::collections::{HashMap, BTreeMap}; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; From ecb43d04f055de789c824eab865312c9dd883fee Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 31 Jan 2020 09:35:11 +0100 Subject: [PATCH 013/185] Use const of null hash check on BackendStorageRef. --- client/src/cht.rs | 13 +++--- client/src/light/backend.rs | 2 +- client/src/light/call_executor.rs | 2 +- client/src/light/fetcher.rs | 3 +- client/transaction-pool/src/api.rs | 2 +- primitives/api/src/lib.rs | 2 +- primitives/core/src/lib.rs | 44 ++++++++++++++++++- primitives/state-machine/src/backend.rs | 4 +- .../state-machine/src/changes_trie/build.rs | 4 +- .../src/changes_trie/changes_iterator.rs | 2 +- .../state-machine/src/changes_trie/mod.rs | 3 +- .../state-machine/src/changes_trie/prune.rs | 4 +- .../state-machine/src/changes_trie/storage.rs | 3 +- primitives/state-machine/src/ext.rs | 2 +- .../state-machine/src/in_memory_backend.rs | 4 +- primitives/state-machine/src/lib.rs | 4 +- .../state-machine/src/overlayed_changes.rs | 2 +- .../state-machine/src/proving_backend.rs | 5 ++- primitives/state-machine/src/testing.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 2 +- .../state-machine/src/trie_backend_essence.rs | 13 +++++- primitives/trie/src/lib.rs | 41 ++++++++++------- 22 files changed, 114 insertions(+), 49 deletions(-) diff --git a/client/src/cht.rs b/client/src/cht.rs index 29f19a77504b9..f470ee4fbe6fa 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -23,11 +23,10 @@ //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. -use hash_db; use codec::Encode; use sp_trie; -use sp_core::{H256, convert_hash}; +use sp_core::{H256, convert_hash, self}; use sp_runtime::traits::{Header as HeaderT, SimpleArithmetic, Zero, One}; use sp_state_machine::{ MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, @@ -86,7 +85,7 @@ pub fn compute_root( ) -> ClientResult where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord, I: IntoIterator>>, { @@ -105,7 +104,7 @@ pub fn build_proof( ) -> ClientResult where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, BlocksI: IntoIterator, HashesI: IntoIterator>>, @@ -132,7 +131,7 @@ pub fn check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -161,7 +160,7 @@ pub fn check_proof_on_proving_backend( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -185,7 +184,7 @@ fn do_check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord, F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 34259ac895539..d7b992403b288 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -46,7 +46,7 @@ use sc_client_api::{ UsageInfo, }; use crate::light::blockchain::Blockchain; -use hash_db::Hasher; +use sp_core::Hasher; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 01a93c78219bc..20b4faf4a303c 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -31,7 +31,7 @@ use sp_state_machine::{ execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, merge_storage_proofs, }; -use hash_db::Hasher; +use sp_core::Hasher; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index d66108b7f0adb..38bf4aaf24eb3 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use hash_db::{HashDB, EMPTY_PREFIX}; +use sp_core::Hasher; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; use sp_runtime::traits::{ diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index bfc13c01fdf53..a2bf7fb6021ba 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -27,7 +27,7 @@ use sc_client_api::{ light::{Fetcher, RemoteCallRequest, RemoteBodyRequest}, BlockBody, }; -use sp_core::Hasher; +use sp_core::InnerHasher; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, transaction_validity::TransactionValidity, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index bde00d48172e8..97f24de2d4a5b 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -43,7 +43,7 @@ pub use sp_state_machine::{ pub use sp_core::NativeOrEncoded; #[doc(hidden)] #[cfg(feature = "std")] -pub use hash_db::Hasher; +pub use sp_state_machine::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 5bb9a3927f965..95efbce865d51 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -79,7 +79,8 @@ pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher; +pub use hash_db::Hasher as InnerHasher; +pub use hash_db::{Prefix, EMPTY_PREFIX}; // Switch back to Blake after PoC-3 is out // pub use self::hasher::blake::BlakeHasher; pub use self::hasher::blake2::Blake2Hasher; @@ -349,3 +350,44 @@ macro_rules! impl_maybe_marker { )+ } } + +/// Technical trait to avoid calculating empty root. +/// This assumes (same wrong asumption as for hashdb trait), +/// an empty node is `[0u8]`. +pub trait Hasher: InnerHasher { + /// Associated constant value. + const EMPTY_ROOT: Option<&'static [u8]>; + + + /// Test to call for all new implementation. + #[cfg(test)] + fn test_associated_empty_root() -> bool { + if let Some(root) = Self::EMPTY_ROOT.as_ref() { + let empty = Self::hash(&[0u8]); + if *root != empty.as_ref() { + return false; + } + } + + true + } +} + +impl Hasher for Blake2Hasher { + const EMPTY_ROOT: Option<&'static [u8]> = Some(&[ + 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, + 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, + 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, + 19, 20, + ]); +} + +#[cfg(test)] +mod test { + use super::{Blake2Hasher, Hasher}; + + #[test] + fn empty_root_const() { + assert!(Blake2Hasher::test_associated_empty_root()); + } +} diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index cd8a69f3f2d2a..cdb226935cc42 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,7 +17,7 @@ //! State machine backends. These manage the code and storage of contracts. use log::warn; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use codec::Encode; use sp_core::storage::{ChildInfo, OwnedChildInfo}; @@ -369,7 +369,7 @@ pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Op H: Hasher, I: IntoIterator, { - let mut root = ::Out::default(); + let mut root = ::Out::default(); { let mut trie = TrieDBMut::::new(mdb, &mut root); for (key, value) in input { diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c731d4104b260..16e6a2da4583f 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use codec::{Decode, Encode}; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use num_traits::One; use crate::{ StorageKey, @@ -291,7 +291,7 @@ fn prepare_digest_input<'a, H, Number>( trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); + let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.insert(trie_key.storage_key, trie_root); } diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 9f2d44967d716..9e185d0444c86 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -20,7 +20,7 @@ use std::cell::RefCell; use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; -use hash_db::Hasher; +use sp_core::Hasher; use num_traits::Zero; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 77fbd2f17ad14..45970e7a31dc7 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -67,7 +67,8 @@ pub use self::prune::prune; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; -use hash_db::{Hasher, Prefix}; +use hash_db::Prefix; +use sp_core::Hasher; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index f6be3223ae9f8..94e8fe4bdaed2 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -16,7 +16,7 @@ //! Changes trie pruning-related functions. -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use sp_trie::Recorder; use log::warn; use num_traits::One; @@ -68,7 +68,7 @@ pub fn prune( trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); + let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.push(trie_root); } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7e4a79548d78a..ee2599d09548a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -17,7 +17,8 @@ //! Changes trie storage utilities. use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use hash_db::{Prefix, EMPTY_PREFIX}; +use sp_core::Hasher; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 9d70382bf4ccc..39dbe2e901592 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -22,8 +22,8 @@ use crate::{ changes_trie::State as ChangesTrieState, }; -use hash_db::Hasher; use sp_core::{ + Hasher, storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 4dd50a74828a1..ab96a63c63686 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -22,7 +22,7 @@ use crate::{ backend::{Backend, insert_into_memory_db}, }; use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use sp_trie::{ MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; @@ -232,7 +232,7 @@ impl Backend for InMemory where H::Out: Codec { fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)>, - ::Out: Ord, + ::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 173de031c5db7..66da5b8920450 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -20,7 +20,7 @@ use std::{fmt, result, collections::HashMap, panic::UnwindSafe, marker::PhantomData}; use log::{warn, trace}; -use hash_db::Hasher; +pub use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode, Codec}; use sp_core::{ storage::ChildInfo, NativeOrEncoded, NeverNativeValue, @@ -84,7 +84,7 @@ pub type DefaultHandler = fn(CallResult, CallResult) -> CallRe /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( MemoryDB, - ChangesTrieCacheAction<::Out, N>, + ChangesTrieCacheAction<::Out, N>, ); /// Strategy for executing a call into the runtime. diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index a15e8c613d3d0..d983680ff0797 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -31,7 +31,7 @@ use codec::{Decode, Encode}; use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; use std::{mem, ops}; -use hash_db::Hasher; +use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 3f925e252ccc6..65e5d25027c9d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use parking_lot::RwLock; use codec::{Decode, Encode, Codec}; use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; +use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; +use sp_core::{Hasher, InnerHasher}; use sp_trie::{ MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys @@ -190,7 +191,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Global proof recorder, act as a layer over a hash db for recording queried /// data. -pub type ProofRecorder = Arc::Out, Option>>>; +pub type ProofRecorder = Arc::Out, Option>>>; /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 39a34509b720b..56393747e9fdc 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -18,7 +18,7 @@ use std::any::{Any, TypeId}; use codec::Decode; -use hash_db::Hasher; +use sp_core::Hasher; use crate::{ backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, StorageKey, StorageValue, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2daee660a8246..6d445bc7c7562 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -16,7 +16,7 @@ //! Trie-based state machine backend. use log::{warn, debug}; -use hash_db::Hasher; +use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 54ca824f956e9..6df54341f74fa 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -21,9 +21,10 @@ use std::ops::Deref; use std::sync::Arc; use std::marker::PhantomData; use log::{debug, warn}; -use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; +use sp_core::Hasher; +use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - read_trie_value, + read_trie_value, check_if_empty_root, for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; @@ -296,6 +297,10 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, H: 'a + Hasher, { fn get(&self, key: &H::Out) -> Option { + if check_if_empty_root::(key.as_ref()) { + return Some(vec![0u8]); + } + match self.storage.get(&key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { @@ -367,6 +372,10 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + if check_if_empty_root::(key.as_ref()) { + return Some(vec![0u8]); + } + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index e265652d407e9..379d9d7a655a0 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -26,7 +26,7 @@ mod trie_stream; use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use hash_db::{Hasher, Prefix}; +use sp_core::{Hasher, InnerHasher, Prefix}; use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. @@ -49,14 +49,14 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); -impl TrieLayout for Layout { +impl TrieLayout for Layout { const USE_EXTENSION: bool = false; type Hash = H; type Codec = NodeCodec; } -impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where +impl TrieConfiguration for Layout { + fn trie_root(input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -80,8 +80,8 @@ impl TrieConfiguration for Layout { /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for key only. @@ -105,7 +105,7 @@ pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Querying interface, as in `trie_db` but less generic. pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. -pub type TrieHash = <::Hash as Hasher>::Out; +pub type TrieHash = <::Hash as InnerHasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. @@ -213,10 +213,20 @@ pub fn read_trie_value_with< /// Determine the default child trie root. pub fn default_child_trie_root( _storage_key: &[u8], -) -> ::Out { +) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } +pub fn check_if_empty_root ( + root: &[u8], +) -> bool { + if let Some(empty_root) = H::EMPTY_ROOT.as_ref() { + *empty_root == root + } else { + H::hash(&[0u8]).as_ref() == root + } +} + /// Call `f` for all keys in a child trie. pub fn for_keys_in_trie( db: &DB, @@ -304,7 +314,7 @@ pub fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option KeySpacedDB<'a, DB, H> where - H: Hasher, + H: InnerHasher, { /// instantiate new keyspaced db pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { @@ -314,7 +324,7 @@ impl<'a, DB, H> KeySpacedDB<'a, DB, H> where #[cfg(feature="test-helpers")] impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: Hasher, + H: InnerHasher, { /// instantiate new keyspaced db pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { @@ -324,7 +334,7 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where DB: hash_db::HashDBRef, - H: Hasher, + H: InnerHasher, T: From<&'static [u8]>, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { @@ -341,7 +351,7 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where #[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, - H: Hasher, + H: InnerHasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { @@ -373,7 +383,7 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where #[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, - H: Hasher, + H: InnerHasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } @@ -397,7 +407,8 @@ mod tests { use super::*; use codec::{Encode, Compact}; use sp_core::Blake2Hasher; - use hash_db::{HashDB, Hasher}; + use hash_db::HashDB; + use sp_core::{Hasher, InnerHasher}; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; @@ -581,7 +592,7 @@ mod tests { #[test] fn random_should_work() { - let mut seed = ::Out::zero(); + let mut seed = ::Out::zero(); for test_i in 0..10000 { if test_i % 50 == 0 { println!("{:?} of 10000 stress tests done", test_i); From ae29df5338c0111a9e235284cf0d8dbe5dc94e86 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 31 Jan 2020 15:35:59 +0100 Subject: [PATCH 014/185] Associated null node hash set to a non optional const. --- primitives/core/src/lib.rs | 28 ++++++++-------------------- primitives/trie/src/lib.rs | 7 ++----- 2 files changed, 10 insertions(+), 25 deletions(-) diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 95efbce865d51..113ff634ff8ca 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -355,39 +355,27 @@ macro_rules! impl_maybe_marker { /// This assumes (same wrong asumption as for hashdb trait), /// an empty node is `[0u8]`. pub trait Hasher: InnerHasher { - /// Associated constant value. - const EMPTY_ROOT: Option<&'static [u8]>; - - - /// Test to call for all new implementation. - #[cfg(test)] - fn test_associated_empty_root() -> bool { - if let Some(root) = Self::EMPTY_ROOT.as_ref() { - let empty = Self::hash(&[0u8]); - if *root != empty.as_ref() { - return false; - } - } - - true - } + /// Value for an empty root node, this + /// is the hash of `[0u8]` value. + const EMPTY_ROOT: &'static [u8]; } impl Hasher for Blake2Hasher { - const EMPTY_ROOT: Option<&'static [u8]> = Some(&[ + const EMPTY_ROOT: &'static [u8] = &[ 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, 19, 20, - ]); + ]; } #[cfg(test)] mod test { - use super::{Blake2Hasher, Hasher}; + use super::{Blake2Hasher, Hasher, InnerHasher}; #[test] fn empty_root_const() { - assert!(Blake2Hasher::test_associated_empty_root()); + let empty = Blake2Hasher::hash(&[0u8]); + assert_eq!(Blake2Hasher::EMPTY_ROOT, empty.as_ref()); } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 379d9d7a655a0..bb8c7f880aa92 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -217,14 +217,11 @@ pub fn default_child_trie_root( L::trie_root::<_, Vec, Vec>(core::iter::empty()) } +/// Test if this is an empty root node. pub fn check_if_empty_root ( root: &[u8], ) -> bool { - if let Some(empty_root) = H::EMPTY_ROOT.as_ref() { - *empty_root == root - } else { - H::hash(&[0u8]).as_ref() == root - } + H::EMPTY_ROOT == root } /// Call `f` for all keys in a child trie. From 6a06c0a81f914a4ead81f9a55209e36ca716f9b6 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Sat, 1 Feb 2020 00:12:21 +0100 Subject: [PATCH 015/185] Make ChildInfo borrow of OwnedChildInfo. --- client/chain-spec/src/chain_spec.rs | 10 +- client/db/src/lib.rs | 19 +- client/db/src/storage_cache.rs | 20 +- client/network/src/chain.rs | 4 +- client/network/src/protocol.rs | 7 +- client/network/src/protocol/light_dispatch.rs | 4 +- client/rpc/src/state/state_full.rs | 18 +- client/rpc/src/state/tests.rs | 13 +- client/src/client.rs | 8 +- client/src/light/backend.rs | 10 +- client/src/light/fetcher.rs | 12 +- client/state-db/src/lib.rs | 4 +- client/state-db/src/noncanonical.rs | 2 +- frame/contracts/src/account_db.rs | 31 ++- frame/contracts/src/exec.rs | 31 ++- frame/contracts/src/lib.rs | 17 +- frame/contracts/src/rent.rs | 4 +- frame/contracts/src/tests.rs | 12 +- frame/support/src/storage/child.rs | 30 +-- primitives/externalities/src/lib.rs | 22 +- primitives/io/src/lib.rs | 50 ++--- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 28 +-- primitives/state-machine/src/basic.rs | 37 ++-- .../state-machine/src/changes_trie/build.rs | 19 +- primitives/state-machine/src/ext.rs | 44 ++-- .../state-machine/src/in_memory_backend.rs | 20 +- primitives/state-machine/src/lib.rs | 24 ++- .../state-machine/src/overlayed_changes.rs | 19 +- .../state-machine/src/proving_backend.rs | 32 +-- primitives/state-machine/src/trie_backend.rs | 21 +- .../state-machine/src/trie_backend_essence.rs | 13 +- primitives/storage/Cargo.toml | 2 +- primitives/storage/src/lib.rs | 202 +++++++++++------- primitives/trie/src/lib.rs | 2 +- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 2 +- 37 files changed, 446 insertions(+), 351 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 81cbce5ea731c..8688e8ec9d1cd 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -22,7 +22,7 @@ use std::fs::File; use std::path::PathBuf; use std::rc::Rc; use serde::{Serialize, Deserialize}; -use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; +use sp_core::storage::{StorageKey, StorageData, OwnedChildInfo, Storage, StorageChild}; use sp_runtime::BuildStorage; use serde_json as json; use crate::RuntimeGenesis; @@ -77,10 +77,7 @@ impl BuildStorage for ChainSpec { Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content").to_owned(); + let child_info = OwnedChildInfo::new_default(child_content.child_info.as_slice()); ( sk.0, StorageChild { @@ -287,8 +284,7 @@ impl ChainSpec { .collect(); let children = storage.children.into_iter() .map(|(sk, child)| { - let info = child.child_info.as_ref(); - let (info, ci_type) = info.info(); + let (info, ci_type) = child.child_info.info(); ( StorageKey(sk), ChildRawStorage { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e011c0373efb8..b60157d5429ba 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -148,7 +148,7 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.child_storage(storage_key, child_info, key) @@ -161,7 +161,7 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(storage_key, child_info, key) @@ -174,7 +174,7 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(storage_key, child_info, key) @@ -191,7 +191,7 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(storage_key, child_info, f) @@ -200,7 +200,7 @@ impl StateBackend> for RefTrackingState { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -217,7 +217,7 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -237,7 +237,7 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(storage_key, child_info, prefix) @@ -668,7 +668,7 @@ struct StorageDb { impl sp_state_machine::Storage> for StorageDb { fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { @@ -701,7 +701,7 @@ impl DbGenesisStorage { impl sp_state_machine::Storage> for DbGenesisStorage { fn get( &self, - _trie: Option, + _trie: Option<&ChildInfo>, _key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { @@ -1329,7 +1329,6 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm for child_data in commit.data.into_iter() { if let Some(child_info) = child_data.0 { // children tries with prefixes - let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index e300ec8b29312..71fae6771c39c 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -540,7 +540,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { let key = (storage_key.to_vec(), key.to_vec()); @@ -577,7 +577,7 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(storage_key, child_info, key) @@ -586,7 +586,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(storage_key, child_info, f) @@ -599,7 +599,7 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(storage_key, child_info, key) @@ -616,7 +616,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -633,7 +633,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -653,7 +653,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(storage_key, child_info, prefix) @@ -677,8 +677,7 @@ mod tests { type Block = RawBlock>; - const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_KEY_1); + const CHILD_KEY_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; #[test] fn smoke() { @@ -969,6 +968,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_KEY_1).unwrap(); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); @@ -996,7 +996,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], CHILD_INFO_1.to_owned())], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1.to_owned())], Some(h0), Some(0), true, diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index b991a0e65208c..bad7d71419285 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -57,7 +57,7 @@ pub trait Client: Send + Sync { &self, block: &Block::Hash, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result; @@ -139,7 +139,7 @@ impl Client for SubstrateClient where &self, block: &Block::Hash, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2df8f6597c508..45f2ee3497380 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::storage::{StorageKey, OwnedChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1555,11 +1555,12 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + let proof = if ChildType::CryptoUniqueId as u32 == request.child_type { + let child_info = OwnedChildInfo::new_default(&request.child_info[..]); match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, - child_info, + &*child_info, &request.keys, ) { Ok(proof) => proof, diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index bfa8daa181ca1..b50688eea67a0 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -681,7 +681,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; - use sp_core::storage::ChildInfo; + use sp_core::storage::OwnedChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -1035,7 +1035,7 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - let child_info = ChildInfo::new_default(b"unique_id_1"); + let child_info = OwnedChildInfo::new_default(b"unique_id_1"); let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index cd77e8b080846..867bf5ff3314d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,8 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo, ChildType}, + Bytes, }; use sp_version::RuntimeVersion; use sp_state_machine::ExecutionStrategy; @@ -290,7 +291,7 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState StateBackend for FullState Option { + if child_type != ChildType::CryptoUniqueId as u32 { + None + } else { + Some(OwnedChildInfo::new_default(&child_definition[..])) + } +} + + /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index a0ab11e977204..f18e31e9d30e3 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); +const CHILD_INFO: &'static [u8] = b"\x01\x00\x00\x00unique_id"; #[test] fn should_return_storage() { @@ -38,17 +38,17 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; - + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(STORAGE_KEY.to_vec(), child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); - let (child_info, child_type) = CHILD_INFO.info(); + let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); assert_eq!( @@ -77,11 +77,12 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let (child_info, child_type) = CHILD_INFO.info(); + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); + let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) + .add_child_storage("test", "key", child_info1, vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); diff --git a/client/src/client.rs b/client/src/client.rs index 118487c4e7b72..7acef6a4a910c 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -261,7 +261,7 @@ impl Client where &self, id: &BlockId, child_storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? @@ -277,7 +277,7 @@ impl Client where &self, id: &BlockId, storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -291,7 +291,7 @@ impl Client where &self, id: &BlockId, storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -333,7 +333,7 @@ impl Client where &self, id: &BlockId, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> sp_blockchain::Result where I: IntoIterator, diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index d7b992403b288..12186a5b61ac2 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -387,7 +387,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { @@ -408,7 +408,7 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { @@ -437,7 +437,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, action: A, ) { match *self { @@ -450,7 +450,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], action: A, ) { @@ -475,7 +475,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 38bf4aaf24eb3..ed6c04816ceca 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -351,7 +351,7 @@ pub mod tests { use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; type TestChecker = LightDataChecker< NativeExecutor, @@ -399,13 +399,14 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( b":child_storage:default:child1".to_vec(), - CHILD_INFO_1, + child_info1, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -419,14 +420,14 @@ pub mod tests { let child_value = remote_client.child_storage( &remote_block_id, &StorageKey(b":child_storage:default:child1".to_vec()), - CHILD_INFO_1, + child_info1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, b":child_storage:default:child1", - CHILD_INFO_1, + child_info1, &[b"key1"], ).unwrap(); @@ -504,7 +505,8 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - let child_infos = CHILD_INFO_1.info(); + ; + let child_infos = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap().info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 80ba18dcd130e..1cfc7fa8398a7 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -420,7 +420,7 @@ impl StateDbSync { pub fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &Key, db: &D, ) -> Result, Error> @@ -497,7 +497,7 @@ impl StateDb { /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &Key, db: &D, ) -> Result, Error> diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 24e7a9f6369dd..6c5988446e881 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -460,7 +460,7 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, trie: Option, key: &Key) -> Option { + pub fn get(&self, trie: Option<&ChildInfo>, key: &Key) -> Option { // TODO make storage over data representation of OwnedChildInfo to use borrow if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { if let Some((_, value)) = values.get(&key) { diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 3615673f2d9dc..bf326dc44e70b 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -27,7 +27,7 @@ use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_runtime::traits::{Bounded, Zero}; use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance, UpdateBalanceOutcome}; -use frame_support::{storage::child, StorageMap}; +use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; use frame_system; // Note: we don't provide Option because we can't create @@ -108,7 +108,13 @@ pub trait AccountDb { /// /// Trie id is None iff account doesn't have an associated trie id in >. /// Because DirectAccountDb bypass the lookup for this association. - fn get_storage(&self, account: &T::AccountId, trie_id: Option<&TrieId>, location: &StorageKey) -> Option>; + fn get_storage( + &self, + account: &T::AccountId, + trie_id: Option<&TrieId>, + child_info: Option<&ChildInfo>, + location: &StorageKey + ) -> Option>; /// If account has an alive contract then return the code hash associated. fn get_code_hash(&self, account: &T::AccountId) -> Option>; /// If account has an alive contract then return the rent allowance associated. @@ -126,9 +132,14 @@ impl AccountDb for DirectAccountDb { &self, _account: &T::AccountId, trie_id: Option<&TrieId>, + child_info: Option<&ChildInfo>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| if let Some(child_info) = child_info { + child::get_raw(id, child_info, &blake2_256(location)) + } else { + child::get_raw(id, &*crate::trie_unique_id(&id[..]), &blake2_256(location)) + }) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -173,13 +184,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -216,19 +227,20 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } + let child_info = &*new_info.child_trie_unique_id(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( &new_info.trie_id[..], - new_info.child_trie_unique_id(), + child_info, &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.trie_id[..], child_info, &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.trie_id[..], child_info, &blake2_256(&k)); } } @@ -334,13 +346,14 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { &self, account: &T::AccountId, trie_id: Option<&TrieId>, + child_info: Option<&ChildInfo>, location: &StorageKey ) -> Option> { self.local .borrow() .get(account) .and_then(|changes| changes.storage(location)) - .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) + .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, child_info, location)) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index cfbefa2a72c93..87dbcacde5f43 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -23,7 +23,7 @@ use crate::rent; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use frame_support::{ - storage::unhashed, dispatch::DispatchError, + storage::unhashed, dispatch::DispatchError, storage::child::OwnedChildInfo, traits::{WithdrawReason, Currency, Time, Randomness}, }; @@ -277,6 +277,7 @@ pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, pub self_trie_id: Option, + pub self_child_info: Option, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -301,6 +302,7 @@ where ExecutionContext { parent: None, self_trie_id: None, + self_child_info: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, @@ -313,12 +315,13 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option, child_info: Option) -> ExecutionContext<'b, T, V, L> { ExecutionContext { parent: Some(self), self_trie_id: trie_id, + self_child_info: child_info, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, @@ -371,8 +374,9 @@ where let caller = self.self_account.clone(); let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); + let dest_child_info = dest_trie_id.as_ref().map(|id| crate::trie_unique_id(id)); - self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { if value > BalanceOf::::zero() { try_or_exec_error!( transfer( @@ -457,8 +461,9 @@ where // TrieId has not been generated yet and storage is empty since contract is new. let dest_trie_id = None; + let dest_child_info = None; - let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + let output = self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { try_or_exec_error!( nested.overlay.instantiate_contract(&dest, code_hash.clone()), input_data @@ -524,12 +529,17 @@ where } } - fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) - -> ExecResult + fn with_nested_context( + &mut self, + dest: T::AccountId, + trie_id: Option, + child_info: Option, + func: F, + ) -> ExecResult where F: FnOnce(&mut ExecutionContext) -> ExecResult { let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id); + let mut nested = self.nested(dest, trie_id, child_info); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) }; @@ -695,7 +705,12 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - self.ctx.overlay.get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) + self.ctx.overlay.get_storage( + &self.ctx.self_account, + self.ctx.self_trie_id.as_ref(), + self.ctx.self_child_info.as_deref(), + key, + ) } fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 9ac43cbb50784..9811a52246c45 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -225,15 +225,15 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::ChildInfo { + pub fn child_trie_unique_id(&self) -> child::OwnedChildInfo { trie_unique_id(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::OwnedChildInfo { let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::ChildInfo::new_default(&trie_id[start ..]) + child::OwnedChildInfo::new_default(&trie_id[start ..]) } pub type TombstoneContractInfo = @@ -716,10 +716,12 @@ impl Module { .get_alive() .ok_or(GetStorageError::IsTombstone)?; + let child_trie = contract_info.child_trie_unique_id(); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, Some(&contract_info.trie_id), + Some(&*child_trie), &key, ); Ok(maybe_value) @@ -826,16 +828,17 @@ impl Module { origin_contract.last_write }; + let child_trie = origin_contract.child_trie_unique_id(); let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &*child_trie, &blake2_256(key), ).map(|value| { child::kill( &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &*child_trie, &blake2_256(key), ); @@ -857,7 +860,7 @@ impl Module { for (key, value) in key_values_taken { child::put_raw( &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &*child_trie, &blake2_256(key), &value, ); @@ -957,7 +960,7 @@ decl_storage! { impl OnFreeBalanceZero for Module { fn on_free_balance_zero(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 46f915e64264f..3967fe03cf21b 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -183,7 +183,7 @@ fn enact_verdict( >::remove(account); child::kill_storage( &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), + &*alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -205,7 +205,7 @@ fn enact_verdict( child::kill_storage( &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), + &*alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 9a2ef36bb86f0..61e490a4210d4 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -320,6 +320,10 @@ fn account_removal_removes_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); + let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); + let child_info1 = Some(&*child_info1); + let child_info2 = Some(&*child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -365,15 +369,15 @@ fn account_removal_removes_storage() { // Verify that all entries from account 1 is removed, while // entries from account 2 is in place. { - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1).is_none()); - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info1, key1).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info2, key2).is_none()); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key2), Some(b"4".to_vec()) ); } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index f549ffc25fd94..d1dd459b9635e 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -27,12 +27,12 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::ChildInfo; +pub use sp_core::storage::{ChildInfo, OwnedChildInfo}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let (data, child_type) = child_info.info(); @@ -54,7 +54,7 @@ pub fn get( /// explicit entry. pub fn get_or_default( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { get(storage_key, child_info, key).unwrap_or_else(Default::default) @@ -64,7 +64,7 @@ pub fn get_or_default( /// explicit entry. pub fn get_or( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -75,7 +75,7 @@ pub fn get_or( /// explicit entry. pub fn get_or_else T>( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -85,7 +85,7 @@ pub fn get_or_else T>( /// Put `value` in storage under `key`. pub fn put( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &T, ) { @@ -104,7 +104,7 @@ pub fn put( /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let r = get(storage_key, child_info, key); @@ -118,7 +118,7 @@ pub fn take( /// the default for its type. pub fn take_or_default( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { take(storage_key, child_info, key).unwrap_or_else(Default::default) @@ -128,7 +128,7 @@ pub fn take_or_default( /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -139,7 +139,7 @@ pub fn take_or( /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -149,7 +149,7 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let (data, child_type) = child_info.info(); @@ -162,7 +162,7 @@ pub fn exists( /// Remove all `storage_key` key/values pub fn kill_storage( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, ) { let (data, child_type) = child_info.info(); sp_io::storage::child_storage_kill( @@ -175,7 +175,7 @@ pub fn kill_storage( /// Ensure `key` has no explicit entry in storage. pub fn kill( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { let (data, child_type) = child_info.info(); @@ -190,7 +190,7 @@ pub fn kill( /// Get a Vec of bytes from storage. pub fn get_raw( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let (data, child_type) = child_info.info(); @@ -205,7 +205,7 @@ pub fn get_raw( /// Put a raw byte slice into storage. pub fn put_raw( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &[u8], ) { diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 350b65d190840..b6006d61bd242 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -48,7 +48,7 @@ pub trait Externalities: ExtensionStore { fn child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -61,7 +61,7 @@ pub trait Externalities: ExtensionStore { fn original_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -78,7 +78,7 @@ pub trait Externalities: ExtensionStore { fn original_child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -88,7 +88,7 @@ pub trait Externalities: ExtensionStore { fn child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -101,7 +101,7 @@ pub trait Externalities: ExtensionStore { fn set_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Vec, ) { @@ -117,7 +117,7 @@ pub trait Externalities: ExtensionStore { fn clear_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { self.place_child_storage(storage_key, child_info, key.to_vec(), None) @@ -132,7 +132,7 @@ pub trait Externalities: ExtensionStore { fn exists_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { self.child_storage(storage_key, child_info, key).is_some() @@ -145,12 +145,12 @@ pub trait Externalities: ExtensionStore { fn next_child_storage_key( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); + fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: &ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -159,7 +159,7 @@ pub trait Externalities: ExtensionStore { fn clear_child_prefix( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ); @@ -170,7 +170,7 @@ pub trait Externalities: ExtensionStore { fn place_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Option>, ); diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index dce67133d39a3..454b732fe779e 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -38,7 +38,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, ChildInfo}, + storage::{ChildStorageKey, OwnedChildInfo, ChildType}, }; use sp_core::{ @@ -82,6 +82,14 @@ fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { } } +#[cfg(feature = "std")] +fn resolve_child_info(child_type: u32, child_definition: &[u8]) -> OwnedChildInfo { + if child_type != ChildType::CryptoUniqueId as u32 { + panic!("Invalid child definition"); + } + OwnedChildInfo::new_default(&child_definition[..]) +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -109,9 +117,8 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) + let child_info = resolve_child_info(child_type, child_definition); + self.child_storage(storage_key, &*child_info, key).map(|s| s.to_vec()) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -146,9 +153,8 @@ pub trait Storage { value_offset: u32, ) -> Option { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key) + let child_info = resolve_child_info(child_type, child_definition); + self.child_storage(storage_key, &*child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -175,9 +181,8 @@ pub trait Storage { value: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); + let child_info = resolve_child_info(child_type, child_definition); + self.set_child_storage(storage_key, &*child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -196,9 +201,8 @@ pub trait Storage { key: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_storage(storage_key, child_info, key); + let child_info = resolve_child_info(child_type, child_definition); + self.clear_child_storage(storage_key, &*child_info, key); } /// Clear an entire child storage. @@ -211,9 +215,8 @@ pub trait Storage { child_type: u32, ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.kill_child_storage(storage_key, child_info); + let child_info = resolve_child_info(child_type, child_definition); + self.kill_child_storage(storage_key, &*child_info); } /// Check whether the given `key` exists in storage. @@ -232,9 +235,8 @@ pub trait Storage { key: &[u8], ) -> bool { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.exists_child_storage(storage_key, child_info, key) + let child_info = resolve_child_info(child_type, child_definition); + self.exists_child_storage(storage_key, &*child_info, key) } /// Clear the storage of each key-value pair where the key starts with the given `prefix`. @@ -253,9 +255,8 @@ pub trait Storage { prefix: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_prefix(storage_key, child_info, prefix); + let child_info = resolve_child_info(child_type, child_definition); + self.clear_child_prefix(storage_key, &*child_info, prefix); } /// "Commit" all existing operations and compute the resulting storage root. @@ -307,9 +308,8 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, child_info, key) + let child_info = resolve_child_info(child_type, child_definition); + self.next_child_storage_key(storage_key, &*child_info, key) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 46930c35e8e8d..69bbb0adddf85 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,7 +136,7 @@ impl BuildStorage for sp_core::storage::Storage { let k = k.clone(); if let Some(map) = storage.children.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(other_map.child_info.as_ref()) { + if !map.child_info.try_update(&*other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index cdb226935cc42..f99ad53009261 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -55,7 +55,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; @@ -63,7 +63,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage_hash( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) @@ -78,7 +78,7 @@ pub trait Backend: std::fmt::Debug { fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { Ok(self.child_storage(storage_key, child_info, key)?.is_some()) @@ -91,7 +91,7 @@ pub trait Backend: std::fmt::Debug { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; @@ -99,7 +99,7 @@ pub trait Backend: std::fmt::Debug { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ); @@ -119,7 +119,7 @@ pub trait Backend: std::fmt::Debug { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ); @@ -138,7 +138,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -159,7 +159,7 @@ pub trait Backend: std::fmt::Debug { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); @@ -193,7 +193,7 @@ pub trait Backend: std::fmt::Debug { // child first for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); + self.child_storage_root(&storage_key[..], &*child_info, child_delta); txs.consolidate(child_txs); if empty { if return_child_roots { @@ -237,7 +237,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).child_storage(storage_key, child_info, key) @@ -246,7 +246,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { (*self).for_keys_in_child_storage(storage_key, child_info, f) @@ -259,7 +259,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).next_child_storage_key(storage_key, child_info, key) @@ -272,7 +272,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -290,7 +290,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index d905657737a8a..5a17683354e4d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -130,7 +130,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, key: &[u8], ) -> Option { self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() @@ -139,7 +139,7 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) @@ -148,7 +148,7 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage_hash(storage_key, child_info, key) @@ -157,7 +157,7 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { Externalities::child_storage(self, storage_key, child_info, key) @@ -171,7 +171,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); @@ -194,7 +194,7 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -213,7 +213,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, ) { self.inner.children.remove(storage_key.as_ref()); } @@ -241,7 +241,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, prefix: &[u8], ) { if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { @@ -289,7 +289,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 + .child_storage_root(storage_key.as_ref(), &*child.child_info, delta).0 } else { default_child_trie_root::>(storage_key.as_ref()) }.encode() @@ -315,7 +315,7 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; #[test] fn commit_should_work() { @@ -340,6 +340,7 @@ mod tests { #[test] fn children_works() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); let child_storage = b":child_storage:default:test".to_vec(); let mut ext = BasicExternalities::new(Storage { @@ -347,23 +348,23 @@ mod tests { children: map![ child_storage.clone() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info1.to_owned(), } ] }); let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child(), child_info1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child(), child_info1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child(), child_info1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child(), child_info1, b"dog"); + assert_eq!(ext.child_storage(child(), child_info1, b"dog"), None); - ext.kill_child_storage(child(), CHILD_INFO_1); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child(), child_info1); + assert_eq!(ext.child_storage(child(), child_info1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 16e6a2da4583f..639a29962ea99 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,7 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).cloned(); + let child_info = changes.child_info(sk).to_owned(); ( changes.committed.children.get(sk).map(|c| &c.0), changes.prospective.children.get(sk).map(|c| &c.0), @@ -157,8 +157,8 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( // AND are not in storage at the beginning of operation if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(sk, child_info.as_ref(), k) + if let Some(child_info) = child_info.as_deref() { + if !backend.exists_child_storage(sk, child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -351,8 +351,8 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; fn prepare_for_build(zero: u64) -> ( InMemoryBackend, @@ -360,6 +360,9 @@ mod test { OverlayedChanges, Configuration, ) { + + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -436,13 +439,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info1.to_owned())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_2.to_owned())), + ].into_iter().collect(), child_info2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -465,7 +468,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info1.to_owned())), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 39dbe2e901592..41bfcdd906d1f 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -206,7 +206,7 @@ where fn child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -231,7 +231,7 @@ where fn child_storage_hash( &self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -255,7 +255,7 @@ where fn original_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -276,7 +276,7 @@ where fn original_child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -312,7 +312,7 @@ where fn exists_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -351,7 +351,7 @@ where fn next_child_storage_key( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend @@ -396,7 +396,7 @@ where fn place_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -415,7 +415,7 @@ where fn kill_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, @@ -451,7 +451,7 @@ where fn clear_child_prefix( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", @@ -507,7 +507,7 @@ where } else { let storage_key = storage_key.as_ref(); - if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + if let Some(child_info) = self.overlay.child_info(storage_key).to_owned() { let (root, _is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() @@ -518,7 +518,7 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) + self.backend.child_storage_root(storage_key, child_info, delta) }; let root = root.encode(); @@ -714,14 +714,14 @@ mod tests { fn next_child_storage_key_works() { const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; + let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); let mut cache = StorageTransactionCache::default(); let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ @@ -731,7 +731,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info1.to_owned(), } ], }.into(); @@ -740,22 +740,22 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[40]), Some(vec![50])); } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ab96a63c63686..f083e085e1b56 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -165,9 +165,9 @@ impl From, StorageCollectio impl InMemory { /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { + pub fn child_storage_keys(&self) -> impl Iterator { self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) + item.0.as_ref().map(|v|(&v.0[..], &*v.1)) ) } } @@ -187,7 +187,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) @@ -211,7 +211,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, mut f: F, ) { self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) @@ -221,7 +221,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -253,7 +253,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -293,7 +293,7 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); @@ -320,7 +320,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) @@ -366,7 +366,7 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); + let child_info = OwnedChildInfo::new_default(b"unique_id_1"); let mut storage = storage.update( vec![( Some((b"1".to_vec(), child_info.clone())), @@ -374,7 +374,7 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(b"1", &*child_info, b"2").unwrap(), Some(b"3".to_vec())); assert!(trie_backend.storage(b"1").unwrap().is_some()); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 66da5b8920450..5b62c5ad3e05c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -551,7 +551,7 @@ where pub fn prove_child_read( mut backend: B, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -591,7 +591,7 @@ where pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -604,7 +604,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(storage_key, child_info.clone(), key.as_ref()) + .child_storage(storage_key, child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -680,7 +680,7 @@ where H::Out: Ord + Codec, { // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) + proving_backend.child_storage(storage_key, ChildInfo::top_trie(), key) .map_err(|e| Box::new(e) as Box) } @@ -702,7 +702,7 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -932,6 +932,8 @@ mod tests { #[test] fn set_child_storage_works() { + + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -946,26 +948,26 @@ mod tests { ext.set_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, b"abc" ), None @@ -974,6 +976,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1001,7 +1005,7 @@ mod tests { let remote_proof = prove_child_read( remote_backend, b":child_storage:default:sub1", - CHILD_INFO_1, + child_info1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index d983680ff0797..7de9885dce550 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -251,7 +251,7 @@ impl OverlayedChanges { pub(crate) fn set_child_storage( &mut self, storage_key: StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, val: Option, ) { @@ -279,7 +279,7 @@ impl OverlayedChanges { pub(crate) fn clear_child_storage( &mut self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); let map_entry = self.prospective.children.entry(storage_key.to_vec()) @@ -353,7 +353,7 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); @@ -537,8 +537,9 @@ impl OverlayedChanges { .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), - self.child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), + self.child_info(storage_key) + .expect("child info initialized in either committed or prospective") + .to_owned(), ) ); @@ -586,12 +587,12 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. - pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { + pub fn child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children.get(storage_key) { - return Some(&ci); + return Some(&*ci); } if let Some((_, ci)) = self.committed.children.get(storage_key) { - return Some(&ci); + return Some(&*ci); } None } @@ -843,7 +844,7 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 65e5d25027c9d..e38ca5d573357 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -145,7 +145,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub fn child_storage( &mut self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result>, String> { let root = self.storage(storage_key)? @@ -284,7 +284,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.child_storage(storage_key, child_info, key) @@ -293,7 +293,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.0.for_keys_in_child_storage(storage_key, child_info, f) @@ -306,7 +306,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.next_child_storage_key(storage_key, child_info, key) @@ -323,7 +323,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -341,7 +341,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.0.child_keys(storage_key, child_info, prefix) @@ -358,7 +358,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -411,8 +411,8 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, @@ -482,15 +482,17 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); let own2 = subtrie2.into_owned(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), CHILD_INFO_1.to_owned())), + (Some((own1.clone(), child_info1.to_owned())), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), CHILD_INFO_2.to_owned())), + (Some((own2.clone(), child_info2.to_owned())), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); @@ -505,11 +507,11 @@ mod tests { vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own1[..], child_info1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own2[..], child_info2, &[i]).unwrap().unwrap(), vec![i] )); @@ -537,7 +539,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(&own1[..], child_info1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -545,7 +547,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&own1[..], child_info1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 6d445bc7c7562..17a0d6fda8c15 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -82,7 +82,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { // TODO switch to &mut self like in overlay pr @@ -101,7 +101,7 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { // TODO switch to &mut self like in overlay pr @@ -124,7 +124,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { // TODO switch to &mut self like in overlay pr @@ -137,7 +137,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -215,7 +215,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -272,7 +272,7 @@ impl, H: Hasher> TrieBackend where fn child_essence<'a>( &'a self, storage_key: &[u8], - child_info: ChildInfo<'a>, + child_info: &'a ChildInfo, buffer: &'a mut Vec, ) -> Result, H>>, >::Error> { let root: Option = self.storage(storage_key)? @@ -299,14 +299,14 @@ pub mod tests { const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; fn test_db() -> (PrefixedMemoryDB, H256) { + let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info1.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -340,9 +340,10 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(CHILD_KEY_1, child_info1, b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 6df54341f74fa..cd6cb9f45c6b2 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync { /// Get a trie node. fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -141,7 +141,7 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option, + child_info: Option<&ChildInfo>, ) { let eph = BackendStorageDBRef::new(&self.storage); @@ -417,8 +417,7 @@ impl TrieBackendStorageRef for (Arc>, Option Result, String> { - let child_info = self.1.as_ref(); - Storage::::get(self.0.deref(), child_info.map(|c| c.as_ref()), key, prefix) + Storage::::get(self.0.deref(), self.1.as_deref(), key, prefix) } } @@ -426,14 +425,14 @@ impl TrieBackendStorageRef for (Arc>, Option> { db: &'a B, - info: Option>, + info: Option<&'a ChildInfo>, buffer: &'a mut Vec, _ph: PhantomData, } impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { /// Instantiate a `ChildTrieBackendStorage`. - pub fn new(db: &'a B, info: Option>, buffer: &'a mut Vec) -> Self { + pub fn new(db: &'a B, info: Option<&'a ChildInfo>, buffer: &'a mut Vec) -> Self { ChildTrieBackendStorage { db, info, @@ -504,7 +503,7 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); // Contains values let mut root_1 = H256::default(); // Contains child trie diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 557b206ecb29c..c0af25fc9ba9c 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -11,7 +11,7 @@ sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 34e7f0ead6d18..1cba659ad3d95 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,13 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use codec::{Decode, Encode}; +use codec::{Decode, Encode, Output}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, borrow::Cow}; +use sp_std::{vec, vec::Vec, borrow::Cow, borrow::Borrow, + borrow::ToOwned, convert::TryInto, ops::Deref}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -177,131 +177,171 @@ impl<'a> ChildStorageKey<'a> { } } -#[derive(Clone, Copy)] +#[repr(transparent)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] /// Information related to a child state. -pub enum ChildInfo<'a> { - Default(ChildTrie<'a>), +pub struct ChildInfo([u8]); + +impl Encode for ChildInfo { + fn encode_to(&self, output: &mut T) { + self.0.encode_to(output) + } } /// Owned version of `ChildInfo`. /// To be use in persistence layers. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] -pub enum OwnedChildInfo { - Default(OwnedChildTrie), -} +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +#[repr(transparent)] +pub struct OwnedChildInfo(Vec); -impl<'a> ChildInfo<'a> { - /// Instantiates information for a default child trie. - pub const fn new_default(unique_id: &'a[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id, - }) +impl ToOwned for ChildInfo { + type Owned = OwnedChildInfo; + + fn to_owned(&self) -> Self::Owned { + OwnedChildInfo(self.0.to_owned()) } +} - /// Instantiates a owned version of this child info. - pub fn to_owned(&self) -> OwnedChildInfo { - match self { - ChildInfo::Default(ChildTrie { data }) - => OwnedChildInfo::Default(OwnedChildTrie { - data: data.to_vec(), - }), +impl Borrow for OwnedChildInfo { + #[inline] + fn borrow(&self) -> &ChildInfo { + let data: &[u8] = self.0.borrow(); + unsafe { + sp_std::mem::transmute(data) } } +} + +impl Deref for OwnedChildInfo { + type Target = ChildInfo; + + #[inline] + fn deref(&self) -> &ChildInfo { + self.borrow() + } +} +impl ChildInfo { /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { - match child_type { - x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), + pub fn resolve_child_info(data: &[u8]) -> Option<&Self> { + match ChildType::read_type(data) { + Some(x) if x == ChildType::CryptoUniqueId => Some({ + unsafe { + sp_std::mem::transmute(data) + } + }), _ => None, } } + /// Instantiates information for a child trie. + /// No check is done on consistency. + pub fn new_unchecked(data: &[u8]) -> &Self { + unsafe { + sp_std::mem::transmute(data) + } + } + + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn top_trie() -> &'static Self { + Self::new_unchecked(b"\x01\x00\x00\x00") + } + /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { - match self { - ChildInfo::Default(ChildTrie { - data, - }) => (data, ChildType::CryptoUniqueId as u32), - } + let child_type = ChildType::read_type_unchecked(&self.0); + (&self.0, child_type as u32) } /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { - match self { - ChildInfo::Default(ChildTrie { - data, - }) => &data[..], + match ChildType::read_type_unchecked(&self.0) { + ChildType::CryptoUniqueId => &self.0[4..], } } + + fn child_type(&self) -> ChildType { + ChildType::read_type_unchecked(&self.0[..]) + } } -/// Type of child. +/// Type of child, it is encoded in the four first byte of the +/// encoded child info (LE u32). /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] +#[derive(Clone, Copy, PartialEq)] pub enum ChildType { /// Default, it uses a cryptographic strong unique id as input. + /// All bytes following the type in encoded form are this unique + /// id. + /// If the trie got a unique id of length 0 it is considered + /// as a top child trie. CryptoUniqueId = 1, } -impl OwnedChildInfo { - /// Instantiates info for a default child trie. - pub fn new_default(unique_id: Vec) -> Self { - OwnedChildInfo::Default(OwnedChildTrie { - data: unique_id, - }) - } +const LOWER_CHILD_TYPE: u32 = 1; +const HIGHER_CHILD_TYPE: u32 = 1; - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: ChildInfo) -> bool { - match self { - OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), +impl ChildType { + /// Try to read type from child definition. + pub fn read_type(slice: &[u8]) -> Option { + if slice.len() < 4 { + return None; } + slice[..4].try_into().ok() + .map(|b| u32::from_le_bytes(b)) + .filter(|b| *b >= LOWER_CHILD_TYPE && *b <= HIGHER_CHILD_TYPE) + .map(|b| unsafe { + sp_std::mem::transmute(b) + }) } - /// Get `ChildInfo` reference to this owned child info. - pub fn as_ref(&self) -> ChildInfo { - match self { - OwnedChildInfo::Default(OwnedChildTrie { data }) - => ChildInfo::Default(ChildTrie { - data: data.as_slice(), - }), - } + fn read_type_unchecked(slice: &[u8]) -> Self { + let child_type = u32::from_le_bytes(slice[..4].try_into() + .expect("This function is only called on initialized child info.")); + unsafe { sp_std::mem::transmute(child_type) } } } -/// A child trie of default type. -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Clone, Copy)] -pub struct ChildTrie<'a> { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: &'a[u8], -} - -/// Owned version of default child trie `ChildTrie`. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] -pub struct OwnedChildTrie { - /// See `ChildTrie` reference field documentation. - data: Vec, -} +impl OwnedChildInfo { + /// Create a new child trie information for default + /// child type. + pub fn new_default(unique_id: &[u8]) -> Self { + let mut vec = vec![0; unique_id.len() + 4]; + vec[..4].copy_from_slice(&(ChildType::CryptoUniqueId as u32).to_le_bytes()[..]); + vec[4..].copy_from_slice(unique_id); + OwnedChildInfo(vec) + } -impl OwnedChildTrie { /// Try to update with another instance, return false if both instance /// are not compatible. - fn try_update(&mut self, other: ChildInfo) -> bool { - match other { - ChildInfo::Default(other) => self.data[..] == other.data[..], + pub fn try_update(&self, other: &ChildInfo) -> bool { + match self.child_type() { + ChildType::CryptoUniqueId => { + match other.child_type() { + ChildType::CryptoUniqueId => self.deref() == other, + } + }, } } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_top_trie() { + let top_trie = ChildInfo::top_trie(); + assert!(top_trie.child_type() == ChildType::CryptoUniqueId); + assert_eq!(top_trie.encode(), top_trie.to_owned().encode()); + // 16 compact enc 4 and le 1 u32 + assert!(top_trie.encode() == vec![16, 1, 0, 0, 0]); + assert_eq!(top_trie.keyspace(), &[]); + } +} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index bb8c7f880aa92..08d7b2d590866 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -405,7 +405,7 @@ mod tests { use codec::{Encode, Compact}; use sp_core::Blake2Hasher; use hash_db::HashDB; - use sp_core::{Hasher, InnerHasher}; + use sp_core::InnerHasher; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e95c5ad162760..61a6730bf8d1f 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -131,7 +131,7 @@ impl TestClientBuilder, child_key: impl AsRef<[u8]>, - child_info: ChildInfo, + child_info: &ChildInfo, value: impl AsRef<[u8]>, ) -> Self { let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 21cf94dfa673a..84fc61eb0b6b1 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -195,7 +195,7 @@ pub trait TestClientBuilderExt: Sized { fn add_extra_child_storage>, K: Into>, V: Into>>( mut self, storage_key: SK, - child_info: ChildInfo, + child_info: &ChildInfo, key: K, value: V, ) -> Self { From 25aaa3a1c602f6408c5b15751595f614b657757d Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Mon, 3 Feb 2020 09:15:05 +0100 Subject: [PATCH 016/185] Removing unsafe cast, using ref_cast asumption for borrow case. --- Cargo.lock | 21 ++++++++++++++++++ primitives/storage/Cargo.toml | 1 + primitives/storage/src/lib.rs | 41 ++++++++++++++++------------------- 3 files changed, 41 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e1f02f5aa101..056d4ad0c2772 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4902,6 +4902,24 @@ dependencies = [ "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ref-cast" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex" version = "1.3.3" @@ -6782,6 +6800,7 @@ version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", "sp-std 2.0.0", @@ -8774,6 +8793,8 @@ dependencies = [ "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +"checksum ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +"checksum ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" "checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" "checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c0af25fc9ba9c..ebb3062a37313 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -12,6 +12,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +ref-cast = "1.0.0" [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 1cba659ad3d95..8371ae30680bd 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -22,6 +22,7 @@ use codec::{Decode, Encode, Output}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; +use ref_cast::RefCast; use sp_std::{vec, vec::Vec, borrow::Cow, borrow::Borrow, borrow::ToOwned, convert::TryInto, ops::Deref}; @@ -178,7 +179,7 @@ impl<'a> ChildStorageKey<'a> { } #[repr(transparent)] -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RefCast)] /// Information related to a child state. pub struct ChildInfo([u8]); @@ -206,9 +207,7 @@ impl Borrow for OwnedChildInfo { #[inline] fn borrow(&self) -> &ChildInfo { let data: &[u8] = self.0.borrow(); - unsafe { - sp_std::mem::transmute(data) - } + ChildInfo::ref_cast(data) } } @@ -225,11 +224,9 @@ impl ChildInfo { /// Create child info from a linear byte packed value and a given type. pub fn resolve_child_info(data: &[u8]) -> Option<&Self> { match ChildType::read_type(data) { - Some(x) if x == ChildType::CryptoUniqueId => Some({ - unsafe { - sp_std::mem::transmute(data) - } - }), + Some(x) if x == ChildType::CryptoUniqueId => Some( + ChildInfo::ref_cast(data) + ), _ => None, } } @@ -237,9 +234,7 @@ impl ChildInfo { /// Instantiates information for a child trie. /// No check is done on consistency. pub fn new_unchecked(data: &[u8]) -> &Self { - unsafe { - sp_std::mem::transmute(data) - } + ChildInfo::ref_cast(data) } /// Top trie defined as the unique crypto id trie with @@ -284,10 +279,14 @@ pub enum ChildType { CryptoUniqueId = 1, } -const LOWER_CHILD_TYPE: u32 = 1; -const HIGHER_CHILD_TYPE: u32 = 1; - impl ChildType { + fn new(repr: u32) -> Option { + Some(match repr { + r if r == ChildType::CryptoUniqueId as u32 => ChildType::CryptoUniqueId, + _ => return None, + }) + } + /// Try to read type from child definition. pub fn read_type(slice: &[u8]) -> Option { if slice.len() < 4 { @@ -295,16 +294,14 @@ impl ChildType { } slice[..4].try_into().ok() .map(|b| u32::from_le_bytes(b)) - .filter(|b| *b >= LOWER_CHILD_TYPE && *b <= HIGHER_CHILD_TYPE) - .map(|b| unsafe { - sp_std::mem::transmute(b) - }) + .and_then(|b| ChildType::new(b)) } fn read_type_unchecked(slice: &[u8]) -> Self { - let child_type = u32::from_le_bytes(slice[..4].try_into() - .expect("This function is only called on initialized child info.")); - unsafe { sp_std::mem::transmute(child_type) } + slice[..4].try_into().ok() + .map(|b| u32::from_le_bytes(b)) + .and_then(|b| ChildType::new(b)) + .expect("This function is only called on initialized child info.") } } From 0d45d8559a46d297311574c1304245e371ea7ee4 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Feb 2020 12:40:41 +0100 Subject: [PATCH 017/185] Borrow approach on OwnedChildInfo and ChildInfo did not make sense allocation whise unless we break all api for a close to nothing perf change: switching to simple single child info struct. --- Cargo.lock | 21 --- client/api/src/notifications.rs | 2 +- client/chain-spec/src/chain_spec.rs | 4 +- client/db/src/lib.rs | 7 +- client/db/src/storage_cache.rs | 6 +- client/network/src/protocol.rs | 6 +- client/network/src/protocol/light_dispatch.rs | 4 +- client/rpc/src/state/state_full.rs | 17 +- client/rpc/src/state/tests.rs | 10 +- client/src/light/backend.rs | 4 +- client/src/light/fetcher.rs | 15 +- client/state-db/src/lib.rs | 11 +- client/state-db/src/noncanonical.rs | 10 +- client/state-db/src/pruning.rs | 16 +- client/state-db/src/test.rs | 4 +- frame/contracts/src/account_db.rs | 8 +- frame/contracts/src/exec.rs | 41 ++--- frame/contracts/src/lib.rs | 19 +- frame/contracts/src/rent.rs | 4 +- frame/contracts/src/tests.rs | 4 +- frame/support/src/storage/child.rs | 2 +- primitives/io/src/lib.rs | 50 +++--- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 8 +- primitives/state-machine/src/basic.rs | 22 +-- .../state-machine/src/changes_trie/build.rs | 8 +- primitives/state-machine/src/ext.rs | 22 +-- .../state-machine/src/in_memory_backend.rs | 26 +-- primitives/state-machine/src/lib.rs | 18 +- .../state-machine/src/overlayed_changes.rs | 22 +-- .../state-machine/src/proving_backend.rs | 20 +-- primitives/state-machine/src/trie_backend.rs | 14 +- .../state-machine/src/trie_backend_essence.rs | 18 +- primitives/storage/Cargo.toml | 1 - primitives/storage/src/lib.rs | 164 ++++++------------ 35 files changed, 262 insertions(+), 348 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 056d4ad0c2772..7e1f02f5aa101 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4902,24 +4902,6 @@ dependencies = [ "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ref-cast" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "regex" version = "1.3.3" @@ -6800,7 +6782,6 @@ version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", "sp-std 2.0.0", @@ -8793,8 +8774,6 @@ dependencies = [ "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" -"checksum ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" -"checksum ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" "checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" "checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index 13bf06396d163..72a9f357fce33 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -323,7 +323,7 @@ mod tests { let child_filters = Some([ (StorageKey(vec![4]), None), (StorageKey(vec![5]), None), - ].into_iter().cloned().collect()); + ].iter().cloned().collect()); StorageChangeSet { changes: Arc::new(changes.0), child_changes: Arc::new(changes.1), diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 8688e8ec9d1cd..173941f6624c6 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -22,7 +22,7 @@ use std::fs::File; use std::path::PathBuf; use std::rc::Rc; use serde::{Serialize, Deserialize}; -use sp_core::storage::{StorageKey, StorageData, OwnedChildInfo, Storage, StorageChild}; +use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; use sp_runtime::BuildStorage; use serde_json as json; use crate::RuntimeGenesis; @@ -77,7 +77,7 @@ impl BuildStorage for ChainSpec { Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = OwnedChildInfo::new_default(child_content.child_info.as_slice()); + let child_info = ChildInfo::new_default(child_content.child_info.as_slice()); ( sk.0, StorageChild { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index b60157d5429ba..e1c35c0d676c7 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -79,7 +79,6 @@ use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; pub use sc_state_db::PruningMode; -use sp_core::storage::OwnedChildInfo; #[cfg(feature = "test-helpers")] use sc_client::in_mem::Backend as InMemoryBackend; @@ -92,7 +91,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - (Arc>>, Option), HasherFor + (Arc>>, Option), HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -514,7 +513,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: BTreeMap, PrefixedMemoryDB>>, + db_updates: BTreeMap, PrefixedMemoryDB>>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -571,7 +570,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_db_storage( &mut self, - update: BTreeMap, PrefixedMemoryDB>>, + update: BTreeMap, PrefixedMemoryDB>>, ) -> ClientResult<()> { self.db_updates = update; Ok(()) diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 71fae6771c39c..9a5c15e9910e6 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -677,7 +677,7 @@ mod tests { type Block = RawBlock>; - const CHILD_KEY_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; #[test] fn smoke() { @@ -968,7 +968,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_KEY_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_KEY_1); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); @@ -996,7 +996,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1.to_owned())], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1)], Some(h0), Some(0), true, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 45f2ee3497380..68352b3f404fb 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, OwnedChildInfo, ChildType}; +use sp_core::storage::{StorageKey, ChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1556,11 +1556,11 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); let proof = if ChildType::CryptoUniqueId as u32 == request.child_type { - let child_info = OwnedChildInfo::new_default(&request.child_info[..]); + let child_info = ChildInfo::new_default(&request.child_info[..]); match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, - &*child_info, + &child_info, &request.keys, ) { Ok(proof) => proof, diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index b50688eea67a0..bfa8daa181ca1 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -681,7 +681,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; - use sp_core::storage::OwnedChildInfo; + use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -1035,7 +1035,7 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1"); + let child_info = ChildInfo::new_default(b"unique_id_1"); let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 867bf5ff3314d..430c0230f0b6d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo, ChildType}, + storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, Bytes, }; use sp_version::RuntimeVersion; @@ -291,7 +291,7 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState StateBackend for FullState Option { - if child_type != ChildType::CryptoUniqueId as u32 { - None - } else { - Some(OwnedChildInfo::new_default(&child_definition[..])) - } -} - - /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index f18e31e9d30e3..dd26a8a42fac2 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: &'static [u8] = b"\x01\x00\x00\x00unique_id"; +const CHILD_INFO: &'static [u8] = b"unique_id"; #[test] fn should_return_storage() { @@ -38,11 +38,11 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(STORAGE_KEY.to_vec(), &child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); @@ -77,12 +77,12 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO); let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", child_info1, vec![42_u8]) + .add_child_storage("test", "key", &child_info1, vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 12186a5b61ac2..f1bea18adc643 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use codec::{Decode, Encode}; use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, @@ -312,7 +312,7 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + let mut storage: HashMap, ChildInfo)>, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index ed6c04816ceca..8bcbb80c775a3 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -351,7 +351,7 @@ pub mod tests { use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; type TestChecker = LightDataChecker< NativeExecutor, @@ -399,14 +399,14 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( b":child_storage:default:child1".to_vec(), - child_info1, + &child_info1, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -420,14 +420,14 @@ pub mod tests { let child_value = remote_client.child_storage( &remote_block_id, &StorageKey(b":child_storage:default:child1".to_vec()), - child_info1, + &child_info1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, b":child_storage:default:child1", - child_info1, + &child_info1, &[b"key1"], ).unwrap(); @@ -505,8 +505,9 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - ; - let child_infos = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap().info(); + + let child_info = ChildInfo::new_default(CHILD_INFO_1); + let child_infos = child_info.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 1cfc7fa8398a7..046e40d0506af 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,7 +40,7 @@ use std::collections::{BTreeMap, HashMap, hash_map::Entry, btree_map::Entry as B use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::{OwnedChildInfo, ChildInfo}; +use sp_core::storage::ChildInfo; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -129,14 +129,17 @@ pub struct ChildTrieChangeSet { pub data: ChangeSet, /// Child trie descripton. /// If not set, this is the top trie. - pub info: Option, + pub info: Option, } /// Change sets of all child trie (top is key None). -pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; +pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; /// Extends for `ChildTrieChangeSets` is merging. -fn extend_change_sets(set: &mut ChildTrieChangeSets, other: impl Iterator, ChangeSet)>) { +fn extend_change_sets( + set: &mut ChildTrieChangeSets, + other: impl Iterator, ChangeSet)>, +) { for (ci, o_cs) in other { match set.entry(ci) { BEntry::Occupied(mut e) => { diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6c5988446e881..0b3bb36f253be 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -25,16 +25,16 @@ use std::collections::{HashMap, VecDeque, hash_map::Entry, BTreeMap}; use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; // version at start to avoid collision when adding a unit const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; -type Keys = Vec<(Option, Vec)>; -type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; -type ChildKeyVals = BTreeMap, HashMap>; +type Keys = Vec<(Option, Vec)>; +type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; +type ChildKeyVals = BTreeMap, HashMap>; /// See module documentation. pub struct NonCanonicalOverlay { @@ -461,7 +461,7 @@ impl NonCanonicalOverlay { /// Get a value from the node overlay. This searches in every existing changeset. pub fn get(&self, trie: Option<&ChildInfo>, key: &Key) -> Option { - // TODO make storage over data representation of OwnedChildInfo to use borrow + // TODO use top_trie instead of none if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { if let Some((_, value)) = values.get(&key) { return Some(value.clone()); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index fdf5dec0515b7..77dd2e099ad8a 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,21 +26,21 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::ChildInfo; use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; -type Keys = Vec<(Option, Vec)>; +type Keys = Vec<(Option, Vec)>; /// See module documentation. pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, HashMap>, + death_index: HashMap, HashMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -52,7 +52,7 @@ pub struct RefWindow { } impl RefWindow { - fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { + fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { if let Some(child_index) = self.death_index.get_mut(ct) { child_index.remove(key) } else { @@ -65,11 +65,11 @@ impl RefWindow { struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashMap, HashSet>, + deleted: HashMap, HashSet>, } impl DeathRow { - fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { + fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { if let Some(child_index) = self.deleted.get_mut(ct) { child_index.remove(key) } else { @@ -153,7 +153,7 @@ impl RefWindow { Ok(pruning) } - fn import, Vec)>>( + fn import, Vec)>>( &mut self, hash: &BlockHash, journal_key: Vec, @@ -178,7 +178,7 @@ impl RefWindow { entry.insert(k.clone(), imported_block); } } - let mut deleted_death_row = HashMap::, HashSet>::new(); + let mut deleted_death_row = HashMap::, HashSet>::new(); for (ct, deleted) in deleted.into_iter() { let entry = deleted_death_row.entry(ct).or_default(); entry.extend(deleted); diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index c7be13fb15595..76f7b09b83d84 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -19,11 +19,11 @@ use std::collections::HashMap; use sp_core::H256; use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::ChildInfo; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, HashMap>, + pub data: HashMap, HashMap>, pub meta: HashMap, DBValue>, } diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index bf326dc44e70b..081f1edc501e8 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -138,7 +138,7 @@ impl AccountDb for DirectAccountDb { trie_id.and_then(|id| if let Some(child_info) = child_info { child::get_raw(id, child_info, &blake2_256(location)) } else { - child::get_raw(id, &*crate::trie_unique_id(&id[..]), &blake2_256(location)) + child::get_raw(id, &crate::trie_unique_id(&id[..]), &blake2_256(location)) }) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { @@ -184,13 +184,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -227,7 +227,7 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } - let child_info = &*new_info.child_trie_unique_id(); + let child_info = &new_info.child_trie_unique_id(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( &new_info.trie_id[..], diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 87dbcacde5f43..bc91ebcec56d0 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -23,9 +23,10 @@ use crate::rent; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use frame_support::{ - storage::unhashed, dispatch::DispatchError, storage::child::OwnedChildInfo, + storage::unhashed, dispatch::DispatchError, traits::{WithdrawReason, Currency, Time, Randomness}, }; +use sp_core::storage::ChildInfo; pub type AccountIdOf = ::AccountId; pub type CallOf = ::Call; @@ -276,8 +277,7 @@ pub enum DeferredAction { pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, - pub self_trie_id: Option, - pub self_child_info: Option, + pub self_trie_info: Option<(TrieId, ChildInfo)>, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -301,8 +301,7 @@ where pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { ExecutionContext { parent: None, - self_trie_id: None, - self_child_info: None, + self_trie_info: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, @@ -315,13 +314,12 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option, child_info: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option<(TrieId, ChildInfo)>) -> ExecutionContext<'b, T, V, L> { ExecutionContext { parent: Some(self), - self_trie_id: trie_id, - self_child_info: child_info, + self_trie_info: trie_info, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, @@ -374,9 +372,8 @@ where let caller = self.self_account.clone(); let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); - let dest_child_info = dest_trie_id.as_ref().map(|id| crate::trie_unique_id(id)); - self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { + self.with_nested_context(dest.clone(), dest_trie_id, |nested| { if value > BalanceOf::::zero() { try_or_exec_error!( transfer( @@ -461,9 +458,8 @@ where // TrieId has not been generated yet and storage is empty since contract is new. let dest_trie_id = None; - let dest_child_info = None; - let output = self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { + let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { try_or_exec_error!( nested.overlay.instantiate_contract(&dest, code_hash.clone()), input_data @@ -529,17 +525,15 @@ where } } - fn with_nested_context( - &mut self, - dest: T::AccountId, - trie_id: Option, - child_info: Option, - func: F, - ) -> ExecResult + fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) + -> ExecResult where F: FnOnce(&mut ExecutionContext) -> ExecResult { let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id, child_info); + let mut nested = self.nested(dest, trie_id.map(|trie_id| { + let child_info = crate::trie_unique_id(&trie_id); + (trie_id, child_info) + })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) }; @@ -705,10 +699,13 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { + let (trie_id, child_info) = self.ctx.self_trie_info.as_ref() + .map(|info| (Some(&info.0), Some(&info.1))) + .unwrap_or((None, None)); self.ctx.overlay.get_storage( &self.ctx.self_account, - self.ctx.self_trie_id.as_ref(), - self.ctx.self_child_info.as_deref(), + trie_id, + child_info, key, ) } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 9811a52246c45..88bb9dda3221c 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -225,15 +225,15 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::OwnedChildInfo { + pub fn child_trie_unique_id(&self) -> child::ChildInfo { trie_unique_id(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::OwnedChildInfo { +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::OwnedChildInfo::new_default(&trie_id[start ..]) + child::ChildInfo::new_default(&trie_id[start ..]) } pub type TombstoneContractInfo = @@ -716,12 +716,12 @@ impl Module { .get_alive() .ok_or(GetStorageError::IsTombstone)?; - let child_trie = contract_info.child_trie_unique_id(); + let child_info = Some(trie_unique_id(&contract_info.trie_id)); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, Some(&contract_info.trie_id), - Some(&*child_trie), + child_info.as_ref(), &key, ); Ok(maybe_value) @@ -828,17 +828,16 @@ impl Module { origin_contract.last_write }; - let child_trie = origin_contract.child_trie_unique_id(); let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( &origin_contract.trie_id, - &*child_trie, + &origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( &origin_contract.trie_id, - &*child_trie, + &origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -860,7 +859,7 @@ impl Module { for (key, value) in key_values_taken { child::put_raw( &origin_contract.trie_id, - &*child_trie, + &origin_contract.child_trie_unique_id(), &blake2_256(key), &value, ); @@ -960,7 +959,7 @@ decl_storage! { impl OnFreeBalanceZero for Module { fn on_free_balance_zero(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 3967fe03cf21b..a538e1eddb11d 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -183,7 +183,7 @@ fn enact_verdict( >::remove(account); child::kill_storage( &alive_contract_info.trie_id, - &*alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -205,7 +205,7 @@ fn enact_verdict( child::kill_storage( &alive_contract_info.trie_id, - &*alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 61e490a4210d4..8267bd1e6b263 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -322,8 +322,8 @@ fn account_removal_removes_storage() { let trie_id2 = ::TrieIdGenerator::trie_id(&2); let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); - let child_info1 = Some(&*child_info1); - let child_info2 = Some(&*child_info2); + let child_info1 = Some(&child_info1); + let child_info2 = Some(&child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index d1dd459b9635e..601f33f79d853 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -27,7 +27,7 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::{ChildInfo, OwnedChildInfo}; +pub use sp_core::storage::ChildInfo; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 454b732fe779e..5b923f9d74bc2 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -38,7 +38,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, OwnedChildInfo, ChildType}, + storage::{ChildStorageKey, ChildInfo}, }; use sp_core::{ @@ -82,14 +82,6 @@ fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { } } -#[cfg(feature = "std")] -fn resolve_child_info(child_type: u32, child_definition: &[u8]) -> OwnedChildInfo { - if child_type != ChildType::CryptoUniqueId as u32 { - panic!("Invalid child definition"); - } - OwnedChildInfo::new_default(&child_definition[..]) -} - /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -117,8 +109,9 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.child_storage(storage_key, &*child_info, key).map(|s| s.to_vec()) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, &child_info, key).map(|s| s.to_vec()) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -153,8 +146,9 @@ pub trait Storage { value_offset: u32, ) -> Option { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.child_storage(storage_key, &*child_info, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, &child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -181,8 +175,9 @@ pub trait Storage { value: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.set_child_storage(storage_key, &*child_info, key.to_vec(), value.to_vec()); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.set_child_storage(storage_key, &child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -201,8 +196,9 @@ pub trait Storage { key: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.clear_child_storage(storage_key, &*child_info, key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_storage(storage_key, &child_info, key); } /// Clear an entire child storage. @@ -215,8 +211,9 @@ pub trait Storage { child_type: u32, ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.kill_child_storage(storage_key, &*child_info); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.kill_child_storage(storage_key, &child_info); } /// Check whether the given `key` exists in storage. @@ -235,8 +232,9 @@ pub trait Storage { key: &[u8], ) -> bool { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.exists_child_storage(storage_key, &*child_info, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.exists_child_storage(storage_key, &child_info, key) } /// Clear the storage of each key-value pair where the key starts with the given `prefix`. @@ -255,8 +253,9 @@ pub trait Storage { prefix: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.clear_child_prefix(storage_key, &*child_info, prefix); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_prefix(storage_key, &child_info, prefix); } /// "Commit" all existing operations and compute the resulting storage root. @@ -308,8 +307,9 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.next_child_storage_key(storage_key, &*child_info, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.next_child_storage_key(storage_key, &child_info, key) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 69bbb0adddf85..2f46ae6e1d4c8 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,7 +136,7 @@ impl BuildStorage for sp_core::storage::Storage { let k = k.clone(); if let Some(map) = storage.children.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(&*other_map.child_info) { + if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index f99ad53009261..d8c805508975b 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,7 +20,7 @@ use log::warn; use sp_core::{Hasher, InnerHasher}; use codec::Encode; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use std::collections::{BTreeMap, btree_map::Entry}; use crate::{ @@ -184,7 +184,7 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -193,7 +193,7 @@ pub trait Backend: std::fmt::Debug { // child first for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], &*child_info, child_delta); + self.child_storage_root(&storage_key[..], &child_info, child_delta); txs.consolidate(child_txs); if empty { if return_child_roots { @@ -326,7 +326,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option<(StorageKey, ChildInfo)>, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 5a17683354e4d..644c629984f69 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -289,7 +289,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), &*child.child_info, delta).0 + .child_storage_root(storage_key.as_ref(), &child.child_info, delta).0 } else { default_child_trie_root::>(storage_key.as_ref()) }.encode() @@ -315,7 +315,7 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; #[test] fn commit_should_work() { @@ -340,7 +340,7 @@ mod tests { #[test] fn children_works() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); let child_storage = b":child_storage:default:test".to_vec(); let mut ext = BasicExternalities::new(Storage { @@ -348,23 +348,23 @@ mod tests { children: map![ child_storage.clone() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: child_info1.to_owned(), + child_info: child_info1.clone(), } ] }); let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - assert_eq!(ext.child_storage(child(), child_info1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), child_info1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), child_info1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child(), &child_info1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), child_info1, b"dog"); - assert_eq!(ext.child_storage(child(), child_info1, b"dog"), None); + ext.clear_child_storage(child(), &child_info1, b"dog"); + assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), None); - ext.kill_child_storage(child(), child_info1); - assert_eq!(ext.child_storage(child(), child_info1, b"doe"), None); + ext.kill_child_storage(child(), &child_info1); + assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 639a29962ea99..4bfe7d8f8ef23 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -351,8 +351,8 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; fn prepare_for_build(zero: u64) -> ( InMemoryBackend, @@ -361,8 +361,8 @@ mod test { Configuration, ) { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); - let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info2 = ChildInfo::new_default(CHILD_INFO_2); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 41bfcdd906d1f..b1ea92c79ad90 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -714,14 +714,14 @@ mod tests { fn next_child_storage_key_works() { const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; - let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut cache = StorageTransactionCache::default(); let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![30], Some(vec![31])); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ @@ -731,7 +731,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - child_info: child_info1.to_owned(), + child_info: child_info1.clone(), } ], }.into(); @@ -740,22 +740,22 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![50], Some(vec![50])); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[40]), Some(vec![50])); } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f083e085e1b56..753f8ccbbf9ae 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,7 +27,7 @@ use sp_trie::{ MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; +use sp_core::storage::{ChildInfo, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -47,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -88,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -107,10 +107,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -121,7 +121,7 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> + let mut inner: HashMap, BTreeMap> = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); inner.insert(None, inners.top); InMemory { @@ -144,12 +144,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, + inner: Vec<(Option<(StorageKey, ChildInfo)>, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -167,7 +167,7 @@ impl InMemory { /// child storage key iterator pub fn child_storage_keys(&self) -> impl Iterator { self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], &*v.1)) + item.0.as_ref().map(|v|(&v.0[..], &v.1)) ) } } @@ -175,7 +175,7 @@ impl InMemory { impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option<(StorageKey, ChildInfo)>, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -366,7 +366,7 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1"); + let child_info = ChildInfo::new_default(b"unique_id_1"); let mut storage = storage.update( vec![( Some((b"1".to_vec(), child_info.clone())), @@ -374,7 +374,7 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", &*child_info, b"2").unwrap(), + assert_eq!(trie_backend.child_storage(b"1", &child_info, b"2").unwrap(), Some(b"3".to_vec())); assert!(trie_backend.storage(b"1").unwrap().is_some()); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5b62c5ad3e05c..3aa57e9679f30 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -680,7 +680,7 @@ where H::Out: Ord + Codec, { // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::top_trie(), key) + proving_backend.child_storage(storage_key, &ChildInfo::top_trie(), key) .map_err(|e| Box::new(e) as Box) } @@ -702,7 +702,7 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -933,7 +933,7 @@ mod tests { #[test] fn set_child_storage_works() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -948,26 +948,26 @@ mod tests { ext.set_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, b"abc" ), None @@ -977,7 +977,7 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1005,7 +1005,7 @@ mod tests { let remote_proof = prove_child_read( remote_backend, b":child_storage:default:sub1", - child_info1, + &child_info1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 7de9885dce550..4afc8a328ba8a 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -28,7 +28,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use std::{mem, ops}; use sp_core::Hasher; @@ -43,7 +43,7 @@ pub type StorageValue = Vec; pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, OwnedChildInfo)>; +pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, ChildInfo)>; /// The overlayed changes to state to be queried on top of the backend. /// @@ -77,7 +77,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, OwnedChildInfo)>, + pub children: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -433,7 +433,7 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. pub fn into_committed(self) -> ( impl Iterator)>, - impl Iterator)>, OwnedChildInfo))>, + impl Iterator)>, ChildInfo))>, ){ assert!(self.prospective.is_empty()); ( @@ -844,14 +844,14 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); - let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); + let child_info = ChildInfo::new_default(b"uniqueid"); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child.clone(), &child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child.clone(), &child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child.clone(), &child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), child_info, vec![30], None); + overlay.set_child_storage(child.clone(), &child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child.clone(), &child_info, vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); @@ -873,7 +873,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); - overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child.clone(), &child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e38ca5d573357..7256f6815c535 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -411,8 +411,8 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, @@ -482,17 +482,17 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); - let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info2 = ChildInfo::new_default(CHILD_INFO_2); let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); let own2 = subtrie2.into_owned(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), child_info1.to_owned())), + (Some((own1.clone(), child_info1.clone())), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), child_info2.to_owned())), + (Some((own2.clone(), child_info2.clone())), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); @@ -507,11 +507,11 @@ mod tests { vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], child_info1, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own1[..], &child_info1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], child_info2, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own2[..], &child_info2, &[i]).unwrap().unwrap(), vec![i] )); @@ -539,7 +539,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], child_info1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(&own1[..], &child_info1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -547,7 +547,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], child_info1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 17a0d6fda8c15..8b29da56a6def 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,7 +19,7 @@ use log::{warn, debug}; use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use std::collections::BTreeMap; use codec::{Codec, Decode}; use crate::{ @@ -72,7 +72,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = BTreeMap, S::Overlay>; + type Transaction = BTreeMap, S::Overlay>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -190,7 +190,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) + fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -299,10 +299,10 @@ pub mod tests { const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_UUID_1: &[u8] = b"unique_id_1"; fn test_db() -> (PrefixedMemoryDB, H256) { - let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { @@ -340,10 +340,10 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, child_info1, b"value3").unwrap(), + test_trie.child_storage(CHILD_KEY_1, &child_info1, b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index cd6cb9f45c6b2..0419556c18e37 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -28,7 +28,7 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use codec::Encode; /// Patricia trie-based storage trait. @@ -409,7 +409,7 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. -impl TrieBackendStorageRef for (Arc>, Option) { +impl TrieBackendStorageRef for (Arc>, Option) { type Overlay = PrefixedMemoryDB; fn get( @@ -417,7 +417,7 @@ impl TrieBackendStorageRef for (Arc>, Option Result, String> { - Storage::::get(self.0.deref(), self.1.as_deref(), key, prefix) + Storage::::get(self.0.deref(), self.1.as_ref(), key, prefix) } } @@ -503,7 +503,7 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); + let child_info = ChildInfo::new_default(b"uniqueid"); // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -542,19 +542,19 @@ mod test { let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"3"), Ok(Some(b"4".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"4"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"5"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"6"), Ok(None) ); } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index ebb3062a37313..c0af25fc9ba9c 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -12,7 +12,6 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -ref-cast = "1.0.0" [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 8371ae30680bd..250a1fa325dfd 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,14 +18,12 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, Output}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use ref_cast::RefCast; -use sp_std::{vec, vec::Vec, borrow::Cow, borrow::Borrow, - borrow::ToOwned, convert::TryInto, ops::Deref}; +use sp_std::{vec::Vec, borrow::Cow}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -56,7 +54,7 @@ pub struct StorageChild { pub data: StorageMap, /// Associated child info for a child /// trie. - pub child_info: OwnedChildInfo, + pub child_info: ChildInfo, } #[cfg(feature = "std")] @@ -178,89 +176,70 @@ impl<'a> ChildStorageKey<'a> { } } -#[repr(transparent)] -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RefCast)] -/// Information related to a child state. -pub struct ChildInfo([u8]); -impl Encode for ChildInfo { - fn encode_to(&self, output: &mut T) { - self.0.encode_to(output) - } -} - -/// Owned version of `ChildInfo`. -/// To be use in persistence layers. +/// Information related to a child state. #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] -#[repr(transparent)] -pub struct OwnedChildInfo(Vec); - -impl ToOwned for ChildInfo { - type Owned = OwnedChildInfo; - - fn to_owned(&self) -> Self::Owned { - OwnedChildInfo(self.0.to_owned()) - } +pub enum ChildInfo { + Default(ChildTrie), } -impl Borrow for OwnedChildInfo { - #[inline] - fn borrow(&self) -> &ChildInfo { - let data: &[u8] = self.0.borrow(); - ChildInfo::ref_cast(data) +impl ChildInfo { + /// Create a new child trie information for default + /// child type. + pub fn new_default(unique_id: &[u8]) -> Self { + ChildInfo::Default(ChildTrie { + data: unique_id.to_vec(), + }) } -} - -impl Deref for OwnedChildInfo { - type Target = ChildInfo; - #[inline] - fn deref(&self) -> &ChildInfo { - self.borrow() + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: &ChildInfo) -> bool { + match self { + ChildInfo::Default(child_trie) => child_trie.try_update(other), + } } -} -impl ChildInfo { /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(data: &[u8]) -> Option<&Self> { - match ChildType::read_type(data) { - Some(x) if x == ChildType::CryptoUniqueId => Some( - ChildInfo::ref_cast(data) - ), - _ => None, + pub fn resolve_child_info(child_type: u32, data: &[u8]) -> Option { + match ChildType::new(child_type) { + Some(ChildType::CryptoUniqueId) => Some(ChildInfo::new_default(data)), + None => None, } } - /// Instantiates information for a child trie. - /// No check is done on consistency. - pub fn new_unchecked(data: &[u8]) -> &Self { - ChildInfo::ref_cast(data) - } - /// Top trie defined as the unique crypto id trie with /// 0 length unique id. - pub fn top_trie() -> &'static Self { - Self::new_unchecked(b"\x01\x00\x00\x00") + pub fn top_trie() -> Self { + Self::new_default(&[]) } /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { - let child_type = ChildType::read_type_unchecked(&self.0); - (&self.0, child_type as u32) + match self { + ChildInfo::Default(ChildTrie { + data, + }) => (data, ChildType::CryptoUniqueId as u32), + } } /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { - match ChildType::read_type_unchecked(&self.0) { - ChildType::CryptoUniqueId => &self.0[4..], + match self { + ChildInfo::Default(ChildTrie { + data, + }) => &data[..], } } - fn child_type(&self) -> ChildType { - ChildType::read_type_unchecked(&self.0[..]) + /// Return type for child trie. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfo::Default(..) => ChildType::CryptoUniqueId, + } } } @@ -286,59 +265,26 @@ impl ChildType { _ => return None, }) } - - /// Try to read type from child definition. - pub fn read_type(slice: &[u8]) -> Option { - if slice.len() < 4 { - return None; - } - slice[..4].try_into().ok() - .map(|b| u32::from_le_bytes(b)) - .and_then(|b| ChildType::new(b)) - } - - fn read_type_unchecked(slice: &[u8]) -> Self { - slice[..4].try_into().ok() - .map(|b| u32::from_le_bytes(b)) - .and_then(|b| ChildType::new(b)) - .expect("This function is only called on initialized child info.") - } +} +/// A child trie of default type. +/// Default is the same implementation as the top trie. +/// It share its trie node storage with any kind of key, +/// and its unique id needs to be collision free (eg strong +/// crypto hash). +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +pub struct ChildTrie { + /// Data containing unique id. + /// Unique id must but unique and free of any possible key collision + /// (depending on its storage behavior). + data: Vec, } -impl OwnedChildInfo { - /// Create a new child trie information for default - /// child type. - pub fn new_default(unique_id: &[u8]) -> Self { - let mut vec = vec![0; unique_id.len() + 4]; - vec[..4].copy_from_slice(&(ChildType::CryptoUniqueId as u32).to_le_bytes()[..]); - vec[4..].copy_from_slice(unique_id); - OwnedChildInfo(vec) - } - +impl ChildTrie { /// Try to update with another instance, return false if both instance /// are not compatible. - pub fn try_update(&self, other: &ChildInfo) -> bool { - match self.child_type() { - ChildType::CryptoUniqueId => { - match other.child_type() { - ChildType::CryptoUniqueId => self.deref() == other, - } - }, + fn try_update(&mut self, other: &ChildInfo) -> bool { + match other { + ChildInfo::Default(other) => self.data[..] == other.data[..], } } } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_top_trie() { - let top_trie = ChildInfo::top_trie(); - assert!(top_trie.child_type() == ChildType::CryptoUniqueId); - assert_eq!(top_trie.encode(), top_trie.to_owned().encode()); - // 16 compact enc 4 and le 1 u32 - assert!(top_trie.encode() == vec![16, 1, 0, 0, 0]); - assert_eq!(top_trie.keyspace(), &[]); - } -} From 274a92357ca60f099b736627b9de8554aa08e967 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Feb 2020 16:42:12 +0100 Subject: [PATCH 018/185] Factoring map of children code, before switching key. --- client/db/src/lib.rs | 10 +-- client/state-db/src/lib.rs | 37 ++++----- client/state-db/src/noncanonical.rs | 20 ++--- client/state-db/src/pruning.rs | 8 +- client/state-db/src/test.rs | 2 +- primitives/state-machine/src/backend.rs | 12 +-- primitives/state-machine/src/trie_backend.rs | 11 ++- primitives/storage/src/lib.rs | 81 +++++++++++++++++++- 8 files changed, 121 insertions(+), 60 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e1c35c0d676c7..c904d0b0cf296 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, BTreeMap}; +use std::collections::HashMap; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; @@ -56,7 +56,7 @@ use kvdb::{KeyValueDB, DBTransaction}; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap}; use sp_runtime::{ generic::BlockId, Justification, Storage, BuildStorage, @@ -513,7 +513,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: BTreeMap, PrefixedMemoryDB>>, + db_updates: ChildrenMap>>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -570,7 +570,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_db_storage( &mut self, - update: BTreeMap, PrefixedMemoryDB>>, + update: ChildrenMap>>, ) -> ClientResult<()> { self.db_updates = update; Ok(()) @@ -1116,7 +1116,7 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = BTreeMap::<_, sc_state_db::ChangeSet>>::new(); + let mut changesets = ChildrenMap::>>::default(); let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 046e40d0506af..dfcfe2b596be1 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -36,11 +36,11 @@ mod pruning; use std::fmt; use parking_lot::RwLock; use codec::Codec; -use std::collections::{BTreeMap, HashMap, hash_map::Entry, btree_map::Entry as BEntry}; +use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -114,17 +114,23 @@ impl fmt::Debug for Error { /// A set of state node changes. #[derive(Default, Debug, Clone)] -pub struct ChangeSet { +pub struct ChangeSet { /// Inserted nodes. pub inserted: Vec<(H, DBValue)>, /// Deleted nodes. pub deleted: Vec, } +impl ChangeSet { + fn merge(&mut self, other: ChangeSet) { + self.inserted.extend(other.inserted.into_iter()); + self.deleted.extend(other.deleted.into_iter()); + } +} /// A set of state node changes for a child trie. /// TODO remove?? #[derive(Debug, Clone)] -pub struct ChildTrieChangeSet { +pub struct ChildTrieChangeSet { /// Change set of this element. pub data: ChangeSet, /// Child trie descripton. @@ -133,35 +139,18 @@ pub struct ChildTrieChangeSet { } /// Change sets of all child trie (top is key None). -pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; - -/// Extends for `ChildTrieChangeSets` is merging. -fn extend_change_sets( - set: &mut ChildTrieChangeSets, - other: impl Iterator, ChangeSet)>, -) { - for (ci, o_cs) in other { - match set.entry(ci) { - BEntry::Occupied(mut e) => { - let entry = e.get_mut(); - entry.inserted.extend(o_cs.inserted); - entry.deleted.extend(o_cs.deleted); - }, - BEntry::Vacant(e) => { e.insert(o_cs); }, - } - } -} +pub type ChildTrieChangeSets = ChildrenMap>; /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] -pub struct CommitSet { +pub struct CommitSet { /// State node changes. pub data: ChildTrieChangeSets, /// Metadata changes. pub meta: ChangeSet>, } -impl CommitSet { +impl CommitSet { /// Number of inserted key value element in the set. pub fn inserted_len(&self) -> usize { self.data.iter().map(|set| set.1.inserted.len()).sum() diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 0b3bb36f253be..b4258f97aeb63 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -21,20 +21,20 @@ //! `revert_pending` use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry, BTreeMap}; +use std::collections::{HashMap, VecDeque, hash_map::Entry}; use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenVec}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; // version at start to avoid collision when adding a unit const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; -type Keys = Vec<(Option, Vec)>; -type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; -type ChildKeyVals = BTreeMap, HashMap>; +type Keys = ChildrenVec>; +type KeyVals = ChildrenVec>; +type ChildKeyVals = ChildrenMap>; /// See module documentation. pub struct NonCanonicalOverlay { @@ -174,7 +174,7 @@ impl NonCanonicalOverlay { }; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); - let mut values = BTreeMap::new(); + let mut values = ChildrenMap::default(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); @@ -389,7 +389,7 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - crate::extend_change_sets(&mut commit.data, overlay.inserted.iter() + commit.data.extend_with(overlay.inserted.iter() .map(|(ct, keys)| ( ct.clone(), ChangeSet { @@ -403,15 +403,15 @@ impl NonCanonicalOverlay { )).collect(), deleted: Vec::new(), }, - ))); - crate::extend_change_sets(&mut commit.data, overlay.deleted.iter().cloned() + )), ChangeSet::merge); + commit.data.extend_with(overlay.deleted.iter().cloned() .map(|(ct, keys)| ( ct, ChangeSet { inserted: Vec::new(), deleted: keys, }, - ))); + )), ChangeSet::merge); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 77dd2e099ad8a..44fe7f6fc54ac 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,14 +26,14 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenVec}; use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; -type Keys = Vec<(Option, Vec)>; +type Keys = ChildrenVec>; /// See module documentation. pub struct RefWindow { @@ -219,14 +219,14 @@ impl RefWindow { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - crate::extend_change_sets(&mut commit.data, pruned.deleted.iter() + commit.data.extend_with(pruned.deleted.iter() .map(|(ct, keys)| ( ct.clone(), ChangeSet { inserted: Vec::new(), deleted: keys.iter().cloned().collect(), }, - ))); + )), ChangeSet::merge); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 76f7b09b83d84..cc8639043a3d5 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -82,7 +82,7 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { - let mut result = ChildTrieChangeSets::new(); + let mut result = ChildTrieChangeSets::default(); result.insert(None, make_changeset(inserted, deleted)); result } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index d8c805508975b..f932e7cfbb716 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,9 +20,8 @@ use log::warn; use sp_core::{Hasher, InnerHasher}; use codec::Encode; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; -use std::collections::{BTreeMap, btree_map::Entry}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, @@ -334,14 +333,9 @@ impl Consolidate for Vec<( } } -impl Consolidate for BTreeMap { +impl Consolidate for ChildrenMap { fn consolidate(&mut self, other: Self) { - for (k, v) in other.into_iter() { - match self.entry(k) { - Entry::Occupied(mut e) => e.get_mut().consolidate(v), - Entry::Vacant(e) => { e.insert(v); }, - } - } + self.extend_with(other.into_iter(), Consolidate::consolidate) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 8b29da56a6def..e63f01e360167 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,8 +19,7 @@ use log::{warn, debug}; use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; -use std::collections::BTreeMap; +use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -72,7 +71,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = BTreeMap, S::Overlay>; + type Transaction = ChildrenMap; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -190,7 +189,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) + fn storage_root(&self, delta: I) -> (H::Out, ChildrenMap) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -207,7 +206,7 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } - let mut tx = BTreeMap::new(); + let mut tx = ChildrenMap::default(); tx.insert(None, write_overlay); (root, tx) } @@ -256,7 +255,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - let mut tx = BTreeMap::new(); + let mut tx = ChildrenMap::default(); tx.insert(Some(child_info.to_owned()), write_overlay); (root, is_default, tx) } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 250a1fa325dfd..9180ff720e6f4 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -21,6 +21,8 @@ use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; +#[cfg(feature = "std")] +use sp_std::collections::btree_map::BTreeMap; use sp_debug_derive::RuntimeDebug; use sp_std::{vec::Vec, borrow::Cow}; @@ -44,7 +46,7 @@ pub struct StorageData( /// Map of data to use in a storage, it is a collection of /// byte key and values. #[cfg(feature = "std")] -pub type StorageMap = std::collections::BTreeMap, Vec>; +pub type StorageMap = BTreeMap, Vec>; #[cfg(feature = "std")] #[derive(Debug, PartialEq, Eq, Clone)] @@ -288,3 +290,80 @@ impl ChildTrie { } } } + +#[cfg(feature = "std")] +#[derive(Clone, PartialEq, Eq, Debug)] +/// Type for storing a map of child trie related information. +/// A few utilities methods are defined. +pub struct ChildrenMap(pub BTreeMap, T>); + +/// Type alias for storage of children related content. +pub type ChildrenVec = Vec<(Option, T)>; + +/// Type alias for storage of children related content. +pub type ChildrenSlice<'a, T> = &'a [(Option, T)]; + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for ChildrenMap { + type Target = BTreeMap, T>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::DerefMut for ChildrenMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::default::Default for ChildrenMap { + fn default() -> Self { + ChildrenMap(BTreeMap::new()) + } +} + +#[cfg(feature = "std")] +impl ChildrenMap { + /// Extend for `ChildrenMap` is usually about merging entries, + /// this method extends two maps, by applying a merge function + /// on each of its entries. + pub fn extend_with( + &mut self, + other: impl Iterator, T)>, + merge: impl Fn(&mut T, T), + ) { + use sp_std::collections::btree_map::Entry; + for (child_info, child_content) in other { + match self.0.entry(child_info) { + Entry::Occupied(mut entry) => { + merge(entry.get_mut(), child_content) + }, + Entry::Vacant(entry) => { + entry.insert(child_content); + }, + } + } + } + + /// Extends two maps, by enxtending entries with the same key. + pub fn extend_replace( + &mut self, + other: impl Iterator, T)>, + ) { + self.0.extend(other) + } +} + +#[cfg(feature = "std")] +impl IntoIterator for ChildrenMap { + type Item = (Option, T); + type IntoIter = sp_std::collections::btree_map::IntoIter, T>; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} From 2e47a1d1ea627a2fe94d4c3a4210156c9bd22d15 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Feb 2020 18:37:17 +0100 Subject: [PATCH 019/185] Switching children key from optional to simple ChildInfo. --- client/db/src/lib.rs | 35 ++++++++++------ client/state-db/src/lib.rs | 4 +- client/state-db/src/noncanonical.rs | 25 ++++++----- client/state-db/src/pruning.rs | 42 +++++++++---------- client/state-db/src/test.rs | 12 +++--- primitives/state-machine/src/ext.rs | 34 ++++++++++++++- .../state-machine/src/proving_backend.rs | 8 ++-- primitives/state-machine/src/trie_backend.rs | 4 +- .../state-machine/src/trie_backend_essence.rs | 6 +-- primitives/storage/src/lib.rs | 24 +++++++---- 10 files changed, 123 insertions(+), 71 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c904d0b0cf296..a447a6b87801e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -91,7 +91,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - (Arc>>, Option), HasherFor + (Arc>>, ChildInfo), HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -667,7 +667,7 @@ struct StorageDb { impl sp_state_machine::Storage> for StorageDb { fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { @@ -700,7 +700,7 @@ impl DbGenesisStorage { impl sp_state_machine::Storage> for DbGenesisStorage { fn get( &self, - _trie: Option<&ChildInfo>, + _trie: &ChildInfo, _key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { @@ -1326,9 +1326,9 @@ impl Backend { fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { - if let Some(child_info) = child_data.0 { + if !child_data.0.is_top_trie() { // children tries with prefixes - let keyspace = child_info.keyspace(); + let keyspace = child_data.0.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); key_buffer[..keyspace_len].copy_from_slice(keyspace); @@ -1598,7 +1598,7 @@ impl sc_client_api::backend::Backend for Backend { BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::::new((Arc::new(genesis_storage), None), root); + let db_state = DbState::::new((Arc::new(genesis_storage), ChildInfo::top_trie()), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1617,7 +1617,7 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root(); - let db_state = DbState::::new((self.storage.clone(), None), *root); + let db_state = DbState::::new((self.storage.clone(), ChildInfo::top_trie()), *root); let state = RefTrackingState::new( db_state, self.storage.clone(), @@ -1647,7 +1647,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(Some(header)) => { sp_state_machine::Storage::get( self.storage.as_ref(), - None, // header in top trie + &ChildInfo::top_trie(), &header.state_root(), (&[], None), ).unwrap_or(None).is_some() @@ -1916,7 +1916,9 @@ pub(crate) mod tests { children: Default::default(), }).unwrap(); - key = op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); + key = op.db_updates.entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -1952,8 +1954,14 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); - op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .insert(EMPTY_PREFIX, b"hello"); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -1989,7 +1997,10 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index dfcfe2b596be1..cfe2bb5c76aee 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -412,7 +412,7 @@ impl StateDbSync { pub fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> @@ -489,7 +489,7 @@ impl StateDb { /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index b4258f97aeb63..6d79dfeffd4bb 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -73,8 +73,8 @@ impl From> for J JournalRecordV1 { hash: old.hash, parent_hash: old.parent_hash, - inserted: vec![(None, old.inserted)], - deleted: vec![(None, old.deleted)], + inserted: vec![(ChildInfo::top_trie(), old.inserted)], + deleted: vec![(ChildInfo::top_trie(), old.deleted)], } } } @@ -99,8 +99,8 @@ fn insert_values( values: &mut ChildKeyVals, inserted: KeyVals, ) { - for (ct, inserted) in inserted { - let values = values.entry(ct).or_default(); + for (child_info, inserted) in inserted { + let values = values.entry(child_info).or_default(); for (k, v) in inserted { debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); @@ -110,8 +110,8 @@ fn insert_values( } fn discard_values(values: &mut ChildKeyVals, inserted: Keys) { - for (ct, inserted) in inserted { - let values = values.entry(ct).or_default(); + for (child_info, inserted) in inserted { + let values = values.entry(child_info).or_default(); for k in inserted { match values.entry(k) { Entry::Occupied(mut e) => { @@ -198,7 +198,9 @@ impl NonCanonicalOverlay { } }, }; - let inserted = record.inserted.iter().map(|(ct, rec)| (ct.clone(), rec.iter().map(|(k, _)| k.clone()).collect())).collect(); + let inserted = record.inserted.iter().map(|(child_info, rec)| + (child_info.clone(), rec.iter().map(|(k, _)| k.clone()).collect()) + ).collect(); let overlay = BlockOverlay { hash: record.hash.clone(), journal_key, @@ -460,9 +462,8 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, trie: Option<&ChildInfo>, key: &Key) -> Option { - // TODO use top_trie instead of none - if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { + pub fn get(&self, child_info: &ChildInfo, key: &Key) -> Option { + if let Some(values) = self.values.get(child_info) { if let Some((_, value)) = values.get(&key) { return Some(value.clone()); } @@ -566,12 +567,14 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; + use sp_core::storage::ChildInfo; use super::{NonCanonicalOverlay, to_journal_key_v1}; use crate::CommitSet; use crate::test::{make_db, make_childchangeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(None, &H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&ChildInfo::top_trie(), &H256::from_low_u64_be(key)) + == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 44fe7f6fc54ac..1fd736913188b 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -40,7 +40,7 @@ pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, HashMap>, + death_index: HashMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -52,8 +52,8 @@ pub struct RefWindow { } impl RefWindow { - fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { - if let Some(child_index) = self.death_index.get_mut(ct) { + fn remove_death_index(&mut self, child_info: &ChildInfo, key: &Key) -> Option { + if let Some(child_index) = self.death_index.get_mut(child_info) { child_index.remove(key) } else { None @@ -65,12 +65,12 @@ impl RefWindow { struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashMap, HashSet>, + deleted: HashMap>, } impl DeathRow { - fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { - if let Some(child_index) = self.deleted.get_mut(ct) { + fn remove_deleted(&mut self, child_info: &ChildInfo, key: &Key) -> bool { + if let Some(child_index) = self.deleted.get_mut(child_info) { child_index.remove(key) } else { false @@ -104,8 +104,8 @@ impl From> for J fn from(old: JournalRecordCompat) -> Self { JournalRecordV1 { hash: old.hash, - inserted: vec![(None, old.inserted)], - deleted: vec![(None, old.deleted)], + inserted: vec![(ChildInfo::top_trie(), old.inserted)], + deleted: vec![(ChildInfo::top_trie(), old.deleted)], } } } @@ -153,7 +153,7 @@ impl RefWindow { Ok(pruning) } - fn import, Vec)>>( + fn import)>>( &mut self, hash: &BlockHash, journal_key: Vec, @@ -161,26 +161,26 @@ impl RefWindow { deleted: Keys, ) { // remove all re-inserted keys from death rows - for (ct, inserted) in inserted { + for (child_info, inserted) in inserted { for k in inserted { - if let Some(block) = self.remove_death_index(&ct, &k) { + if let Some(block) = self.remove_death_index(&child_info, &k) { self.death_rows[(block - self.pending_number) as usize] - .remove_deleted(&ct, &k); + .remove_deleted(&child_info, &k); } } } // add new keys let imported_block = self.pending_number + self.death_rows.len() as u64; - for (ct, deleted) in deleted.iter() { - let entry = self.death_index.entry(ct.clone()).or_default(); + for (child_info, deleted) in deleted.iter() { + let entry = self.death_index.entry(child_info.clone()).or_default(); for k in deleted.iter() { entry.insert(k.clone(), imported_block); } } - let mut deleted_death_row = HashMap::, HashSet>::new(); - for (ct, deleted) in deleted.into_iter() { - let entry = deleted_death_row.entry(ct).or_default(); + let mut deleted_death_row = HashMap::>::new(); + for (child_info, deleted) in deleted.into_iter() { + let entry = deleted_death_row.entry(child_info).or_default(); entry.extend(deleted); } @@ -220,8 +220,8 @@ impl RefWindow { let index = self.pending_number + self.pending_prunings as u64; commit.data.extend_with(pruned.deleted.iter() - .map(|(ct, keys)| ( - ct.clone(), + .map(|(child_info, keys)| ( + child_info.clone(), ChangeSet { inserted: Vec::new(), deleted: keys.iter().cloned().collect(), @@ -272,8 +272,8 @@ impl RefWindow { for _ in 0 .. self.pending_prunings { let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for (ct, deleted) in pruned.deleted.iter() { - if let Some(child_index) = self.death_index.get_mut(ct) { + for (child_info, deleted) in pruned.deleted.iter() { + if let Some(child_index) = self.death_index.get_mut(child_info) { for key in deleted.iter() { child_index.remove(key); } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index cc8639043a3d5..6cfa2256b2c1c 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -19,11 +19,11 @@ use std::collections::HashMap; use sp_core::H256; use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, HashMap>, + pub data: ChildrenMap>, pub meta: HashMap, DBValue>, } @@ -40,7 +40,7 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(&None).and_then(|data| data.get(key).cloned())) + Ok(self.data.get(&ChildInfo::top_trie()).and_then(|data| data.get(key).cloned())) } } @@ -83,7 +83,7 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { let mut result = ChildTrieChangeSets::default(); - result.insert(None, make_changeset(inserted, deleted)); + result.insert(ChildInfo::top_trie(), make_changeset(inserted, deleted)); result } @@ -95,8 +95,8 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { } pub fn make_db(inserted: &[u64]) -> TestDb { - let mut data = HashMap::new(); - data.insert(None, inserted.iter() + let mut data = ChildrenMap::default(); + data.insert(ChildInfo::top_trie(), inserted.iter() .map(|v| { (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) }) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index b1ea92c79ad90..a1dffcbae9989 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -209,6 +209,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) @@ -231,15 +234,19 @@ where fn child_storage_hash( &self, storage_key: ChildStorageKey, - _child_info: &ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { + if child_info.is_top_trie() { + return self.storage_hash(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| - self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL) + self.backend.child_storage_hash(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", @@ -258,6 +265,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.original_storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend .child_storage(storage_key.as_ref(), child_info, key) @@ -279,6 +289,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option> { + if child_info.is_top_trie() { + return self.original_storage_hash(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend .child_storage_hash(storage_key.as_ref(), child_info, key) @@ -315,6 +328,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> bool { + if child_info.is_top_trie() { + return self.exists_storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = match self.overlay.child_storage(storage_key.as_ref(), key) { @@ -354,6 +370,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.next_storage_key(key); + } let next_backend_key = self.backend .next_child_storage_key(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -400,6 +419,9 @@ where key: StorageKey, value: Option, ) { + if child_info.is_top_trie() { + return self.place_storage(key, value); + } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -417,6 +439,10 @@ where storage_key: ChildStorageKey, child_info: &ChildInfo, ) { + if child_info.is_top_trie() { + trace!(target: "state-trace", "Ignoring kill_child_storage on top trie"); + return; + } trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -454,6 +480,10 @@ where child_info: &ChildInfo, prefix: &[u8], ) { + if child_info.is_top_trie() { + return self.clear_prefix(prefix); + } + trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, HexDisplay::from(&storage_key.as_ref()), diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 7256f6815c535..ed574650cf78b 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -351,8 +351,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> where I: IntoIterator, Option>)> { let (root, mut tx) = self.0.storage_root(delta); - // We may rather want to return a btreemap - (root, tx.remove(&None)) + // TODO should we prove over a collection of child trie instead? + (root, tx.remove(&ChildInfo::top_trie())) } fn child_storage_root( @@ -366,7 +366,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord { let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); - (root, is_empty, tx.remove(&Some(child_info.to_owned()))) + (root, is_empty, tx.remove(child_info)) } } @@ -454,7 +454,7 @@ mod tests { let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - let mut trie_mdb = trie_mdb.remove(&None).unwrap(); + let mut trie_mdb = trie_mdb.remove(&ChildInfo::top_trie()).unwrap(); assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e63f01e360167..af00fa438ed7e 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -207,7 +207,7 @@ impl, H: Hasher> Backend for TrieBackend where } } let mut tx = ChildrenMap::default(); - tx.insert(None, write_overlay); + tx.insert(ChildInfo::top_trie(), write_overlay); (root, tx) } @@ -256,7 +256,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; let mut tx = ChildrenMap::default(); - tx.insert(Some(child_info.to_owned()), write_overlay); + tx.insert(child_info.clone(), write_overlay); (root, is_default, tx) } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0419556c18e37..0faa93f3a7f1d 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync { /// Get a trie node. fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -409,7 +409,7 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. -impl TrieBackendStorageRef for (Arc>, Option) { +impl TrieBackendStorageRef for (Arc>, ChildInfo) { type Overlay = PrefixedMemoryDB; fn get( @@ -417,7 +417,7 @@ impl TrieBackendStorageRef for (Arc>, Option Result, String> { - Storage::::get(self.0.deref(), self.1.as_ref(), key, prefix) + Storage::::get(self.0.deref(), &self.1, key, prefix) } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 9180ff720e6f4..e4d4b5604ae2b 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -216,6 +216,14 @@ impl ChildInfo { Self::new_default(&[]) } + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn is_top_trie(&self) -> bool { + match self { + ChildInfo::Default(ChildTrie { data }) => data.len() == 0 + } + } + /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { @@ -295,17 +303,17 @@ impl ChildTrie { #[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. /// A few utilities methods are defined. -pub struct ChildrenMap(pub BTreeMap, T>); +pub struct ChildrenMap(pub BTreeMap); /// Type alias for storage of children related content. -pub type ChildrenVec = Vec<(Option, T)>; +pub type ChildrenVec = Vec<(ChildInfo, T)>; /// Type alias for storage of children related content. -pub type ChildrenSlice<'a, T> = &'a [(Option, T)]; +pub type ChildrenSlice<'a, T> = &'a [(ChildInfo, T)]; #[cfg(feature = "std")] impl sp_std::ops::Deref for ChildrenMap { - type Target = BTreeMap, T>; + type Target = BTreeMap; fn deref(&self) -> &Self::Target { &self.0 @@ -333,7 +341,7 @@ impl ChildrenMap { /// on each of its entries. pub fn extend_with( &mut self, - other: impl Iterator, T)>, + other: impl Iterator, merge: impl Fn(&mut T, T), ) { use sp_std::collections::btree_map::Entry; @@ -352,7 +360,7 @@ impl ChildrenMap { /// Extends two maps, by enxtending entries with the same key. pub fn extend_replace( &mut self, - other: impl Iterator, T)>, + other: impl Iterator, ) { self.0.extend(other) } @@ -360,8 +368,8 @@ impl ChildrenMap { #[cfg(feature = "std")] impl IntoIterator for ChildrenMap { - type Item = (Option, T); - type IntoIter = sp_std::collections::btree_map::IntoIter, T>; + type Item = (ChildInfo, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() From b07d7cac096d8df2443c687f068a5192dd631896 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 10:02:57 +0100 Subject: [PATCH 020/185] fix merge test --- primitives/state-machine/src/ext.rs | 33 ++++++++++++++--------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index df631e623c0b5..06ba6bd26bca9 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -633,10 +633,7 @@ mod tests { type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -748,9 +745,7 @@ mod tests { #[test] fn next_child_storage_key_works() { - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut cache = StorageTransactionCache::default(); @@ -797,11 +792,15 @@ mod tests { #[test] fn child_storage_works() { + use sp_core::InnerHasher; + + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); + let mut cache = StorageTransactionCache::default(); let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ @@ -811,31 +810,31 @@ mod tests { vec![20] => vec![20], vec![30] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info1.clone(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(child(), &child_info1, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(child(), &child_info1, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), + ext.child_storage_hash(child(), &child_info1, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(child(), &child_info1, &[20]), None); + assert_eq!(ext.original_child_storage(child(), &child_info1, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), + ext.child_storage_hash(child(), &child_info1, &[20]), None, ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(child(), &child_info1, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(child(), &child_info1, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), + ext.child_storage_hash(child(), &child_info1, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); From e5d7b04a0651d2f5cb0bd603b037430e1765f4ca Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 10:52:08 +0100 Subject: [PATCH 021/185] clean todos --- client/state-db/src/lib.rs | 10 ---------- primitives/state-machine/src/proving_backend.rs | 4 ++-- primitives/state-machine/src/trie_backend.rs | 5 ----- primitives/state-machine/src/trie_backend_essence.rs | 10 ---------- 4 files changed, 2 insertions(+), 27 deletions(-) diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index cfe2bb5c76aee..8bd303d9b85a2 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -127,16 +127,6 @@ impl ChangeSet { self.deleted.extend(other.deleted.into_iter()); } } -/// A set of state node changes for a child trie. -/// TODO remove?? -#[derive(Debug, Clone)] -pub struct ChildTrieChangeSet { - /// Change set of this element. - pub data: ChangeSet, - /// Child trie descripton. - /// If not set, this is the top trie. - pub info: Option, -} /// Change sets of all child trie (top is key None). pub type ChildTrieChangeSets = ChildrenMap>; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ed574650cf78b..ae6dd9b2dbf68 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -239,6 +239,8 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } +// proof run on a flatten storage of tries and currently only need implement a single +// trie backend storage api. impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef for ProofRecorderBackend<'a, S, H> { @@ -249,7 +251,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef key: &H::Out, prefix: Prefix, ) -> Result, String> { - // TODO switch proof model too (use a trie) if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } @@ -351,7 +352,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> where I: IntoIterator, Option>)> { let (root, mut tx) = self.0.storage_root(delta); - // TODO should we prove over a collection of child trie instead? (root, tx.remove(&ChildInfo::top_trie())) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index af00fa438ed7e..771364aa964c7 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -84,7 +84,6 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { essence.storage(key) @@ -103,7 +102,6 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { essence.next_storage_key(key) @@ -126,7 +124,6 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { essence.for_keys(f) @@ -140,7 +137,6 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { essence.for_keys_with_prefix(prefix, f) @@ -234,7 +230,6 @@ impl, H: Hasher> Backend for TrieBackend where }; { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info), &mut buf); // Do not write prefix in overlay. diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0faa93f3a7f1d..32b2ba0bbca51 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -273,7 +273,6 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out) -> Option { - // TODO need new trait with ct as parameter!!! if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { @@ -426,7 +425,6 @@ impl TrieBackendStorageRef for (Arc>, ChildInfo) { pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { db: &'a B, info: Option<&'a ChildInfo>, - buffer: &'a mut Vec, _ph: PhantomData, } @@ -451,12 +449,6 @@ impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for Ch prefix: Prefix, ) -> Result, String> { if let Some(keyspace) = self.info.as_ref().map(|ci| ci.keyspace()) { - // TODO switch to &mut self like in overlay pr and use commented code - /*self.buffer.resize(keyspace.len() + prefix.0.len(), 0); - self.buffer[..keyspace.len()].copy_from_slice(keyspace); - self.buffer[keyspace.len()..].copy_from_slice(prefix.0); - self.db.get(key, (self.buffer.as_slice(), prefix.1))*/ - let prefix = keyspace_as_prefix_alloc(keyspace, prefix); self.db.get(key, (prefix.0.as_slice(), prefix.1)) } else { @@ -475,8 +467,6 @@ impl TrieBackendStorageRef for PrefixedMemoryDB { key: &H::Out, prefix: Prefix, ) -> Result, String> { - // TODO should we split prefixed memory db too?? -> likely yes: sharing - // rc does not make sense -> change type of PrefixedMemoryDB. Ok(hash_db::HashDB::get(self, key, prefix)) } } From 3a7166934168b1bcd4565f536d1df78331ac65c0 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 11:01:00 +0100 Subject: [PATCH 022/185] fix --- primitives/state-machine/src/trie_backend.rs | 17 +++++------------ .../state-machine/src/trie_backend_essence.rs | 3 +-- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 771364aa964c7..1847fb89bb33e 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -84,8 +84,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - let mut buf = Vec::new(); - if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + if let Some(essence) = self.child_essence(storage_key, child_info)? { essence.storage(key) } else { Ok(None) @@ -102,8 +101,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - let mut buf = Vec::new(); - if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + if let Some(essence) = self.child_essence(storage_key, child_info)? { essence.next_storage_key(key) } else { Ok(None) @@ -124,8 +122,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - let mut buf = Vec::new(); - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { essence.for_keys(f) } } @@ -137,8 +134,7 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - let mut buf = Vec::new(); - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { essence.for_keys_with_prefix(prefix, f) } } @@ -230,8 +226,7 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let mut buf = Vec::new(); - let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info), &mut buf); + let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info)); // Do not write prefix in overlay. let mut eph = Ephemeral::new( &child_essence, @@ -267,7 +262,6 @@ impl, H: Hasher> TrieBackend where &'a self, storage_key: &[u8], child_info: &'a ChildInfo, - buffer: &'a mut Vec, ) -> Result, H>>, >::Error> { let root: Option = self.storage(storage_key)? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); @@ -275,7 +269,6 @@ impl, H: Hasher> TrieBackend where Some(TrieBackendEssence::new(ChildTrieBackendStorage::new( self.essence.backend_storage(), Some(child_info), - buffer, ), root)) } else { None diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 32b2ba0bbca51..b4f24502d9c3c 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -430,11 +430,10 @@ pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { /// Instantiate a `ChildTrieBackendStorage`. - pub fn new(db: &'a B, info: Option<&'a ChildInfo>, buffer: &'a mut Vec) -> Self { + pub fn new(db: &'a B, info: Option<&'a ChildInfo>) -> Self { ChildTrieBackendStorage { db, info, - buffer, _ph: PhantomData, } } From c8464710f905455aaeb6791c1ec309e31c956d4b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 17:29:48 +0100 Subject: [PATCH 023/185] End up removing all keypacedDB from code. --- client/db/src/changes_tries_storage.rs | 11 +- client/db/src/lib.rs | 13 +- client/src/client.rs | 3 +- client/state-db/src/lib.rs | 12 +- client/state-db/src/test.rs | 4 +- .../state-machine/src/changes_trie/build.rs | 13 +- .../src/changes_trie/changes_iterator.rs | 11 +- .../state-machine/src/changes_trie/mod.rs | 7 +- .../state-machine/src/changes_trie/prune.rs | 5 +- .../state-machine/src/changes_trie/storage.rs | 7 +- .../state-machine/src/proving_backend.rs | 39 +++--- primitives/state-machine/src/trie_backend.rs | 32 +++-- .../state-machine/src/trie_backend_essence.rs | 80 ++++++----- primitives/trie/src/lib.rs | 125 +----------------- 14 files changed, 138 insertions(+), 224 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index ab8c7465badd1..f5c1d34688e23 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -488,9 +488,11 @@ where fn get( &self, + child_info: &sp_core::storage::ChildInfo, key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { + debug_assert!(child_info.is_top_trie()); self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } @@ -594,8 +596,9 @@ mod tests { assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); let storage = backend.changes_tries_storage.storage(); + let top_trie = sp_core::storage::ChildInfo::top_trie(); for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); + assert_eq!(storage.get(&top_trie, &key, EMPTY_PREFIX), Ok(Some(val))); } }; @@ -704,7 +707,11 @@ mod tests { .log(DigestItem::as_changes_trie_root) .cloned(); match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + Some(trie_root) => backend.changes_tries_storage.get( + &sp_core::storage::ChildInfo::top_trie(), + &trie_root, + EMPTY_PREFIX, + ).unwrap().is_none(), None => true, } }; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a447a6b87801e..2f73ea3c7d2a9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -681,8 +681,17 @@ impl sc_state_db::NodeDb for StorageDb { type Error = io::Error; type Key = [u8]; - fn get(&self, key: &[u8]) -> Result>, Self::Error> { - self.db.get(columns::STATE, key).map(|r| r.map(|v| v.to_vec())) + fn get(&self, child_info: &ChildInfo, key: &[u8]) -> Result>, Self::Error> { + if child_info.is_top_trie() { + self.db.get(columns::STATE, key) + } else { + let keyspace = child_info.keyspace(); + // TODO try to switch api to &mut and use a key buffer from StorageDB + let mut key_buffer = vec![0; keyspace.len() + key.len()]; + key_buffer[..keyspace.len()].copy_from_slice(keyspace); + key_buffer[keyspace.len()..].copy_from_slice(&key[..]); + self.db.get(columns::STATE, &key_buffer[..]) + }.map(|r| r.map(|v| v.to_vec())) } } diff --git a/client/src/client.rs b/client/src/client.rs index 7acef6a4a910c..888bd88428863 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -566,10 +566,11 @@ impl Client where fn get( &self, + child_info: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { - self.storage.get(key, prefix) + self.storage.get(child_info, key, prefix) } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 8bd303d9b85a2..992e8fa81f250 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -68,7 +68,7 @@ pub trait NodeDb { type Error: fmt::Debug; /// Get state trie node. - fn get(&self, key: &Self::Key) -> Result, Self::Error>; + fn get(&self, child_info: &ChildInfo, key: &Self::Key) -> Result, Self::Error>; } /// Error type. @@ -402,16 +402,16 @@ impl StateDbSync { pub fn get( &self, - trie: &ChildInfo, + child_info: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> where Key: AsRef { - if let Some(value) = self.non_canonical.get(trie, key) { + if let Some(value) = self.non_canonical.get(child_info, key) { return Ok(Some(value)); } - db.get(key.as_ref()).map_err(|e| Error::Db(e)) + db.get(child_info, key.as_ref()).map_err(|e| Error::Db(e)) } pub fn apply_pending(&mut self) { @@ -479,13 +479,13 @@ impl StateDb { /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, - trie: &ChildInfo, + child_info: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> where Key: AsRef { - self.db.read().get(trie, key, db) + self.db.read().get(child_info, key, db) } /// Revert all non-canonical blocks with the best block number. diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 6cfa2256b2c1c..b9f2941bcc5e0 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -39,8 +39,8 @@ impl NodeDb for TestDb { type Error = (); type Key = H256; - fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(&ChildInfo::top_trie()).and_then(|data| data.get(key).cloned())) + fn get(&self, child_info: &ChildInfo, key: &H256) -> Result, ()> { + Ok(self.data.get(child_info).and_then(|data| data.get(key).cloned())) } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 4bfe7d8f8ef23..cefc4d88470a2 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -281,6 +281,9 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } + // change trie content are all stored as top_trie (default child trie with empty keyspace) + let child_info = sp_core::storage::ChildInfo::top_trie(); + let child_info = &child_info; let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( @@ -288,7 +291,7 @@ fn prepare_digest_input<'a, H, Number>( trie_root, ); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| + trie_storage.for_key_values_with_prefix(child_info, &child_prefix, |key, value| if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { let mut trie_root = ::Out::default(); @@ -297,12 +300,12 @@ fn prepare_digest_input<'a, H, Number>( } }); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); @@ -319,12 +322,12 @@ fn prepare_digest_input<'a, H, Number>( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 9e185d0444c86..84be4a3f55541 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -67,6 +67,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, + child_info: sp_core::storage::ChildInfo::top_trie(), }) } @@ -177,6 +178,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, + child_info: sp_core::storage::ChildInfo::top_trie(), }.collect() } @@ -314,6 +316,10 @@ pub struct DrilldownIterator<'a, H, Number> H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, + /// This is always top trie info, but it cannot be + /// statically instantiated at the time (vec of null + /// size could be in theory). + child_info: sp_core::storage::ChildInfo, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> @@ -322,8 +328,11 @@ impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, N type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { + let child_info = &self.child_info; self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root) + .storage(child_info, key) + ) } } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 45970e7a31dc7..b6aba93108407 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -69,6 +69,7 @@ use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use hash_db::Prefix; use sp_core::Hasher; +use sp_core::storage::ChildInfo; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; @@ -160,8 +161,11 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. + /// Note that child info is use only for case where we use this trait + /// as an adapter to storage. fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -175,10 +179,11 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBack fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.0.get(key, prefix) + self.0.get(child_info, key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 94e8fe4bdaed2..87bd5dad60e09 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -65,7 +65,8 @@ pub fn prune( ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { + let child_info = sp_core::storage::ChildInfo::top_trie(); + trie_storage.for_key_values_with_prefix(&child_info, &child_prefix, |key, value| { if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { let mut trie_root = ::Out::default(); @@ -100,7 +101,7 @@ fn prune_trie( backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), proof_recorder: &mut proof_recorder, }; - trie.record_all_keys(); + trie.record_all_top_trie_keys(); } // all nodes of this changes trie should be pruned diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index ee2599d09548a..53bb62675d9bb 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -19,6 +19,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Prefix, EMPTY_PREFIX}; use sp_core::Hasher; +use sp_core::storage::ChildInfo; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -190,10 +191,11 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + MemoryDB::::get(&self.data.read().mdb, child_info, key, prefix) } } @@ -212,9 +214,10 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.storage.get(key, prefix) + self.storage.get(child_info, key, prefix) } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ae6dd9b2dbf68..e4eca1181089e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,13 +23,14 @@ use log::debug; use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ - MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys + MemoryDB, default_child_trie_root, read_trie_value_with, + record_all_keys, }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage, TrieBackendStorageRef}; +use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, + TrieBackendStorage, TrieBackendStorageRef}; use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; @@ -125,15 +126,15 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> { /// Produce proof for a key query. pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + let child_info = ChildInfo::top_trie(); + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + &child_info, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, Ephemeral>( + read_trie_value_with::, _, BackendStorageDBRef>( &eph, self.backend.root(), key, @@ -146,36 +147,33 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> &mut self, storage_key: &[u8], child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result>, String> { let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or(default_child_trie_root::>(storage_key)); - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + child_info, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value_with::, _, _>( - storage_key, - child_info.keyspace(), + read_trie_value_with::, _, _>( &eph, - &root.as_ref(), + &root, key, &mut *self.proof_recorder ).map_err(map_e) } /// Produce proof for the whole backend. - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + pub fn record_all_top_trie_keys(&mut self) { + let child_info = ChildInfo::top_trie(); + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + &child_info, ); let mut iter = move || -> Result<(), Box>> { @@ -248,13 +246,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(child_info, key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 1847fb89bb33e..dfe0e43f76dc9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -30,6 +30,9 @@ use crate::{ /// for this trie and child tries. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, + // storing child_info of top trie even if it is in + // theory a bit useless (no heap alloc on empty vec). + top_trie: ChildInfo, } impl, H: Hasher> TrieBackend where H::Out: Codec { @@ -37,6 +40,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), + top_trie: ChildInfo::top_trie(), } } @@ -75,7 +79,7 @@ impl, H: Hasher> Backend for TrieBackend where type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.storage(key) + self.essence.storage(&self.top_trie, key) } fn child_storage( @@ -85,14 +89,14 @@ impl, H: Hasher> Backend for TrieBackend where key: &[u8], ) -> Result, Self::Error> { if let Some(essence) = self.child_essence(storage_key, child_info)? { - essence.storage(key) + essence.storage(child_info, key) } else { Ok(None) } } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.next_storage_key(key) + self.essence.next_storage_key(&self.top_trie, key) } fn next_child_storage_key( @@ -102,18 +106,18 @@ impl, H: Hasher> Backend for TrieBackend where key: &[u8], ) -> Result, Self::Error> { if let Some(essence) = self.child_essence(storage_key, child_info)? { - essence.next_storage_key(key) + essence.next_storage_key(child_info, key) } else { Ok(None) } } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_keys_with_prefix(prefix, f) + self.essence.for_keys_with_prefix(&self.top_trie, prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_key_values_with_prefix(prefix, f) + self.essence.for_key_values_with_prefix(&self.top_trie, prefix, f) } fn for_keys_in_child_storage( @@ -123,7 +127,7 @@ impl, H: Hasher> Backend for TrieBackend where f: F, ) { if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { - essence.for_keys(f) + essence.for_keys(child_info, f) } } @@ -135,12 +139,12 @@ impl, H: Hasher> Backend for TrieBackend where f: F, ) { if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { - essence.for_keys_with_prefix(prefix, f) + essence.for_keys_with_prefix(child_info, prefix, f) } } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let eph = BackendStorageDBRef::new(self.essence.backend_storage()); + let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -163,7 +167,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &[u8]) -> Vec { - let eph = BackendStorageDBRef::new(self.essence.backend_storage()); + let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -190,6 +194,7 @@ impl, H: Hasher> Backend for TrieBackend where { let mut eph = Ephemeral::new( self.essence.backend_storage(), + &self.top_trie, &mut write_overlay, ); @@ -199,7 +204,7 @@ impl, H: Hasher> Backend for TrieBackend where } } let mut tx = ChildrenMap::default(); - tx.insert(ChildInfo::top_trie(), write_overlay); + tx.insert(self.top_trie.clone(), write_overlay); (root, tx) } @@ -230,6 +235,7 @@ impl, H: Hasher> Backend for TrieBackend where // Do not write prefix in overlay. let mut eph = Ephemeral::new( &child_essence, + child_info, &mut write_overlay, ); @@ -281,7 +287,7 @@ pub mod tests { use std::collections::HashSet; use sp_core::{Blake2Hasher, H256}; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; @@ -289,11 +295,9 @@ pub mod tests { const CHILD_UUID_1: &[u8] = b"unique_id_1"; fn test_db() -> (PrefixedMemoryDB, H256) { - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info1.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index b4f24502d9c3c..291c613174255 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -25,7 +25,7 @@ use sp_core::Hasher; use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, read_trie_value, check_if_empty_root, - for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; + for_keys_in_trie}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -74,8 +74,8 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - let eph = BackendStorageDBRef::new(&self.storage); + pub fn next_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { + let eph = BackendStorageDBRef::new(&self.storage, child_info); let trie = TrieDB::::new(&eph, &self.root) .map_err(|e| format!("TrieDB creation error: {}", e))?; @@ -107,8 +107,8 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { - let eph = BackendStorageDBRef::new(&self.storage); + pub fn storage(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { + let eph = BackendStorageDBRef::new(&self.storage, child_info); let map_e = |e| format!("Trie lookup error: {}", e); @@ -118,9 +118,10 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Retrieve all entries keys of storage and call `f` for each of those keys. pub fn for_keys( &self, + child_info: &ChildInfo, f: F, ) { - let eph = BackendStorageDBRef::new(&self.storage); + let eph = BackendStorageDBRef::new(&self.storage, child_info); if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( &eph, @@ -132,8 +133,8 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + pub fn for_keys_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], mut f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), child_info) } fn keys_values_with_prefix_inner( @@ -141,9 +142,9 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option<&ChildInfo>, + child_info: &ChildInfo, ) { - let eph = BackendStorageDBRef::new(&self.storage); + let eph = BackendStorageDBRef::new(&self.storage, child_info); let mut iter = move |db| -> Result<(), Box>> { let trie = TrieDB::::new(db, root)?; @@ -164,20 +165,14 @@ impl, H: Hasher> TrieBackendEssence where H::O Ok(()) }; - let result = if let Some(child_info) = child_info { - let db = KeySpacedDB::new(&eph, child_info.keyspace()); - iter(&db) - } else { - iter(&eph) - }; - if let Err(e) = result { + if let Err(e) = iter(&eph) { debug!(target: "trie", "Error while iterating by prefix: {}", e); } } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + pub fn for_key_values_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, f, child_info) } } @@ -187,6 +182,7 @@ pub(crate) struct Ephemeral<'a, S, H, O> where O: hash_db::HashDB + Default + Consolidate, { storage: &'a S, + child_info: &'a ChildInfo, overlay: &'a mut O, _ph: PhantomData, } @@ -196,6 +192,7 @@ pub(crate) struct BackendStorageDBRef<'a, S, H> where H: 'a + Hasher, { storage: &'a S, + child_info: &'a ChildInfo, _ph: PhantomData, } @@ -224,9 +221,10 @@ impl<'a, S, H, O> Ephemeral<'a, S, H, O> where H: 'a + Hasher, O: hash_db::HashDB + Default + Consolidate, { - pub fn new(storage: &'a S, overlay: &'a mut O) -> Self { + pub fn new(storage: &'a S, child_info: &'a ChildInfo, overlay: &'a mut O) -> Self { Ephemeral { storage, + child_info, overlay, _ph: PhantomData, } @@ -237,9 +235,10 @@ impl<'a, S, H> BackendStorageDBRef<'a, S, H> where S: 'a + TrieBackendStorageRef, H: 'a + Hasher, { - pub fn new(storage: &'a S) -> Self { + pub fn new(storage: &'a S, child_info: &'a ChildInfo) -> Self { BackendStorageDBRef { storage, + child_info, _ph: PhantomData, } } @@ -276,7 +275,7 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - match self.storage.get(&key, EMPTY_PREFIX) { + match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -300,7 +299,7 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, return Some(vec![0u8]); } - match self.storage.get(&key, EMPTY_PREFIX) { + match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -351,7 +350,7 @@ impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> wher if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - match self.storage.get(&key, prefix) { + match self.storage.get(self.child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -375,7 +374,7 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> return Some(vec![0u8]); } - match self.storage.get(&key, prefix) { + match self.storage.get(self.child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -397,6 +396,7 @@ pub trait TrieBackendStorageRef { /// Get the value stored at key. fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -408,22 +408,24 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. +// TODO remove stored ChildInfo impl TrieBackendStorageRef for (Arc>, ChildInfo) { type Overlay = PrefixedMemoryDB; fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - Storage::::get(self.0.deref(), &self.1, key, prefix) + Storage::::get(self.0.deref(), child_info, key, prefix) } } - /// This is an essence for the child trie backend. pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { db: &'a B, + // TODO is it usefull? -> seems like not -> TODO remove this struct info: Option<&'a ChildInfo>, _ph: PhantomData, } @@ -444,15 +446,11 @@ impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for Ch fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - if let Some(keyspace) = self.info.as_ref().map(|ci| ci.keyspace()) { - let prefix = keyspace_as_prefix_alloc(keyspace, prefix); - self.db.get(key, (prefix.0.as_slice(), prefix.1)) - } else { - self.db.get(key, prefix) - } + self.db.get(child_info, key, prefix) } } @@ -463,9 +461,11 @@ impl TrieBackendStorageRef for PrefixedMemoryDB { fn get( &self, + _child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { + // No need to use keyspace for in memory db, ignoring child_info parameter. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -475,9 +475,11 @@ impl TrieBackendStorageRef for MemoryDB { fn get( &self, + _child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { + // No need to use keyspace for in memory db, ignoring child_info parameter. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -485,7 +487,7 @@ impl TrieBackendStorageRef for MemoryDB { #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; use crate::trie_backend::TrieBackend; use crate::backend::Backend; @@ -505,17 +507,9 @@ mod test { trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); - // reuse of root_1 implicitly assert child trie root is same - // as top trie (contents must remain the same). - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); + // using top trie as child trie (both with same content) trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); }; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 08d7b2d590866..1410a9ff1b7ef 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -26,7 +26,7 @@ mod trie_stream; use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use sp_core::{Hasher, InnerHasher, Prefix}; +use sp_core::{Hasher, InnerHasher}; use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. @@ -47,7 +47,7 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; #[derive(Default)] /// substrate trie layout -pub struct Layout(sp_std::marker::PhantomData); +pub struct Layout(PhantomData); impl TrieLayout for Layout { const USE_EXTENSION: bool = false; @@ -269,127 +269,6 @@ pub fn record_all_keys( Ok(()) } -/// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) -} - -/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); - -#[cfg(feature="test-helpers")] -/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -/// -/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); - -/// Utility function used to merge some byte data (keyspace) and `prefix` data -/// before calling key value database primitives. -pub fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { - let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; - result[..ks.len()].copy_from_slice(ks); - result[ks.len()..].copy_from_slice(prefix.0); - (result, prefix.1) -} - -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where - H: InnerHasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) - } -} - -#[cfg(feature="test-helpers")] -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: InnerHasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } -} - -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, - H: InnerHasher, - T: From<&'static [u8]>, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -#[cfg(feature="test-helpers")] -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: InnerHasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -#[cfg(feature="test-helpers")] -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: InnerHasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self - } -} - /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; From 0b557b676bd7448d175a57005311bb447148a0ac Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 17:55:04 +0100 Subject: [PATCH 024/185] Clean unused struct and useless child info. --- client/db/src/lib.rs | 6 ++-- primitives/state-machine/src/trie_backend.rs | 22 ++++++------- .../state-machine/src/trie_backend_essence.rs | 32 +++---------------- 3 files changed, 17 insertions(+), 43 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2f73ea3c7d2a9..06e6db6c43af1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -91,7 +91,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - (Arc>>, ChildInfo), HasherFor + Arc>>, HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -1607,7 +1607,7 @@ impl sc_client_api::backend::Backend for Backend { BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::::new((Arc::new(genesis_storage), ChildInfo::top_trie()), root); + let db_state = DbState::::new(Arc::new(genesis_storage), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1626,7 +1626,7 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root(); - let db_state = DbState::::new((self.storage.clone(), ChildInfo::top_trie()), *root); + let db_state = DbState::::new(self.storage.clone(), *root); let state = RefTrackingState::new( db_state, self.storage.clone(), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dfe0e43f76dc9..6f9bd8b810c6a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -23,7 +23,7 @@ use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef, ChildTrieBackendStorage}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; /// Patricia trie-based backend. Transaction type is overlays of changes to commit @@ -88,7 +88,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key, child_info)? { + if let Some(essence) = self.child_essence(storage_key)? { essence.storage(child_info, key) } else { Ok(None) @@ -105,7 +105,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key, child_info)? { + if let Some(essence) = self.child_essence(storage_key)? { essence.next_storage_key(child_info, key) } else { Ok(None) @@ -126,7 +126,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { + if let Ok(Some(essence)) = self.child_essence(storage_key) { essence.for_keys(child_info, f) } } @@ -138,7 +138,7 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { + if let Ok(Some(essence)) = self.child_essence(storage_key) { essence.for_keys_with_prefix(child_info, prefix, f) } } @@ -231,10 +231,10 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info)); + let storage = self.essence.backend_storage(); // Do not write prefix in overlay. let mut eph = Ephemeral::new( - &child_essence, + storage, child_info, &mut write_overlay, ); @@ -267,15 +267,11 @@ impl, H: Hasher> TrieBackend where fn child_essence<'a>( &'a self, storage_key: &[u8], - child_info: &'a ChildInfo, - ) -> Result, H>>, >::Error> { + ) -> Result>, >::Error> { let root: Option = self.storage(storage_key)? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); Ok(if let Some(root) = root { - Some(TrieBackendEssence::new(ChildTrieBackendStorage::new( - self.essence.backend_storage(), - Some(child_info), - ), root)) + Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) } else { None }) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 291c613174255..2224084938a84 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -407,9 +407,7 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} -// This implementation is used by normal storage trie clients. -// TODO remove stored ChildInfo -impl TrieBackendStorageRef for (Arc>, ChildInfo) { +impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; fn get( @@ -418,31 +416,12 @@ impl TrieBackendStorageRef for (Arc>, ChildInfo) { key: &H::Out, prefix: Prefix, ) -> Result, String> { - Storage::::get(self.0.deref(), child_info, key, prefix) + Storage::::get(self.deref(), child_info, key, prefix) } } -/// This is an essence for the child trie backend. -pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { - db: &'a B, - // TODO is it usefull? -> seems like not -> TODO remove this struct - info: Option<&'a ChildInfo>, - _ph: PhantomData, -} - -impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { - /// Instantiate a `ChildTrieBackendStorage`. - pub fn new(db: &'a B, info: Option<&'a ChildInfo>) -> Self { - ChildTrieBackendStorage { - db, - info, - _ph: PhantomData, - } - } -} - -impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for ChildTrieBackendStorage<'a, H, B> { - type Overlay = PrefixedMemoryDB; +impl> TrieBackendStorageRef for &S { + type Overlay = >::Overlay; fn get( &self, @@ -450,11 +429,10 @@ impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for Ch key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.db.get(child_info, key, prefix) + >::get(self, child_info, key, prefix) } } - // This implementation is used by test storage trie clients. impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; From 8483da40f31607ac8717841271d806ce09936b1f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 17:59:17 +0100 Subject: [PATCH 025/185] remove todo --- client/db/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 06e6db6c43af1..8e5b401bc3db5 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -686,7 +686,6 @@ impl sc_state_db::NodeDb for StorageDb { self.db.get(columns::STATE, key) } else { let keyspace = child_info.keyspace(); - // TODO try to switch api to &mut and use a key buffer from StorageDB let mut key_buffer = vec![0; keyspace.len() + key.len()]; key_buffer[..keyspace.len()].copy_from_slice(keyspace); key_buffer[keyspace.len()..].copy_from_slice(&key[..]); From 64ffcead72174a4daa55bf9425b7440c47d7f63b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 19:24:45 +0100 Subject: [PATCH 026/185] actual touch to keyspace prefixing (fail on wrong code asserted). --- client/db/src/lib.rs | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 8e5b401bc3db5..a400b06bdb1ce 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1811,6 +1811,10 @@ pub(crate) mod tests { #[test] fn set_state_data() { let db = Backend::::new_test(2, 0); + + let child_info = sp_core::storage::ChildInfo::new_default(b"unique_id"); + let storage_key = b":child_storage:default:key1"; + let hash = { let mut op = db.begin_operation().unwrap(); db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); @@ -1827,16 +1831,28 @@ pub(crate) mod tests { (vec![1, 2, 3], vec![9, 9, 9]), ]; - header.state_root = op.old_state.storage_root(storage + let child_storage = vec![ + (vec![2, 3, 5], Some(vec![4, 4, 6])), + (vec![2, 2, 3], Some(vec![7, 9, 9])), + ]; + + header.state_root = op.old_state.full_storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))) + .map(|(x, y)| (x, Some(y))), + vec![(storage_key.to_vec(), child_storage.clone(), child_info.clone())], + false, ).0.into(); let hash = header.hash(); + let mut children = HashMap::default(); + children.insert(storage_key.to_vec(), sp_core::storage::StorageChild { + child_info: child_info.clone(), + data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), + }); op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children, }).unwrap(); op.set_block_data( header.clone(), @@ -1852,6 +1868,10 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); + assert_eq!( + state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); hash }; @@ -1890,6 +1910,12 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); + assert_eq!( + state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); + + } } From 2bc3cb610273a15bef05f6133dc4b54c3f13ea94 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 5 Feb 2020 13:43:48 +0100 Subject: [PATCH 027/185] self review changes. --- client/chain-spec/src/chain_spec.rs | 5 +++- client/db/src/changes_tries_storage.rs | 7 ++--- client/db/src/lib.rs | 27 +++++++++--------- client/db/src/storage_cache.rs | 4 +-- client/network/src/protocol.rs | 5 ++-- client/rpc/src/state/state_full.rs | 3 +- client/rpc/src/state/tests.rs | 6 ++-- client/src/client.rs | 3 +- client/src/light/fetcher.rs | 6 ++-- client/state-db/src/lib.rs | 16 +++++------ client/state-db/src/noncanonical.rs | 4 +-- client/state-db/src/pruning.rs | 28 +++++++------------ frame/contracts/src/account_db.rs | 17 ++++------- frame/contracts/src/exec.rs | 6 ++-- frame/contracts/src/lib.rs | 5 ++-- frame/contracts/src/tests.rs | 12 ++++---- primitives/state-machine/Cargo.toml | 1 - primitives/state-machine/src/basic.rs | 4 +-- .../state-machine/src/changes_trie/build.rs | 17 +++++------ .../src/changes_trie/changes_iterator.rs | 7 +++-- .../state-machine/src/changes_trie/mod.rs | 6 ++-- .../state-machine/src/changes_trie/storage.rs | 6 ++-- primitives/state-machine/src/ext.rs | 2 +- primitives/state-machine/src/lib.rs | 6 ++-- .../state-machine/src/overlayed_changes.rs | 8 +++--- .../state-machine/src/proving_backend.rs | 7 ++--- primitives/storage/src/lib.rs | 23 ++++++++++----- primitives/trie/Cargo.toml | 1 - 28 files changed, 107 insertions(+), 135 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 173941f6624c6..6bc3145534a06 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -77,7 +77,10 @@ impl BuildStorage for ChainSpec { Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::new_default(child_content.child_info.as_slice()); + let child_info = ChildInfo::resolve_child_info( + child_content.child_type, + child_content.child_info.as_slice(), + ).expect("chain spec contains correct content"); ( sk.0, StorageChild { diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index f5c1d34688e23..93bfd8b4cc673 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -488,11 +488,9 @@ where fn get( &self, - child_info: &sp_core::storage::ChildInfo, key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { - debug_assert!(child_info.is_top_trie()); self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } @@ -532,6 +530,7 @@ mod tests { }; use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; use sp_core::H256; + use sp_core::storage::ChildInfo; use sp_runtime::testing::{Digest, Header}; use sp_runtime::traits::{Hash, BlakeTwo256}; use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; @@ -596,9 +595,8 @@ mod tests { assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); let storage = backend.changes_tries_storage.storage(); - let top_trie = sp_core::storage::ChildInfo::top_trie(); for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&top_trie, &key, EMPTY_PREFIX), Ok(Some(val))); + assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); } }; @@ -708,7 +706,6 @@ mod tests { .cloned(); match trie_root { Some(trie_root) => backend.changes_tries_storage.get( - &sp_core::storage::ChildInfo::top_trie(), &trie_root, EMPTY_PREFIX, ).unwrap().is_none(), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a400b06bdb1ce..7642e944dfd7a 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -688,7 +688,7 @@ impl sc_state_db::NodeDb for StorageDb { let keyspace = child_info.keyspace(); let mut key_buffer = vec![0; keyspace.len() + key.len()]; key_buffer[..keyspace.len()].copy_from_slice(keyspace); - key_buffer[keyspace.len()..].copy_from_slice(&key[..]); + key_buffer[keyspace.len()..].copy_from_slice(key); self.db.get(columns::STATE, &key_buffer[..]) }.map(|r| r.map(|v| v.to_vec())) } @@ -1128,18 +1128,18 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { - let data = changesets.entry(info).or_default(); + let changeset = changesets.entry(info).or_default(); for (key, (val, rc)) in updates.drain() { if rc > 0 { ops += 1; bytes += key.len() as u64 + val.len() as u64; - data.inserted.push((key, val.to_vec())); + changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { ops += 1; bytes += key.len() as u64; - data.deleted.push(key); + changeset.deleted.push(key); } } } @@ -1334,8 +1334,15 @@ impl Backend { fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { - if !child_data.0.is_top_trie() { - // children tries with prefixes + if child_data.0.is_top_trie() { + // empty prefix + for (key, val) in child_data.1.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in child_data.1.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); + } + } else { let keyspace = child_data.0.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); @@ -1350,14 +1357,6 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.delete(columns::STATE, &key_buffer[..]); } - } else { - // top trie without prefixes - for (key, val) in child_data.1.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in child_data.1.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); - } } } for (key, val) in commit.meta.inserted.into_iter() { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 9a5c15e9910e6..2dd27a2e3cbd3 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -677,8 +677,6 @@ mod tests { type Block = RawBlock>; - const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; - #[test] fn smoke() { //init_log(); @@ -968,7 +966,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { - let child_info1 = ChildInfo::new_default(CHILD_KEY_1); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 68352b3f404fb..1207b7f883145 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo, ChildType}; +use sp_core::storage::{StorageKey, ChildInfo}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1555,8 +1555,7 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if ChildType::CryptoUniqueId as u32 == request.child_type { - let child_info = ChildInfo::new_default(&request.child_info[..]); + let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 220edd4860e5e..d396b191a2235 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,8 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, - Bytes, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, }; use sp_version::RuntimeVersion; use sp_runtime::{ diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index dd26a8a42fac2..f459a5391b7ea 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: &'static [u8] = b"unique_id"; +const CHILD_UID: &'static [u8] = b"unique_id"; #[test] fn should_return_storage() { @@ -38,7 +38,7 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; - let child_info1 = ChildInfo::new_default(CHILD_INFO); + let child_info1 = ChildInfo::new_default(CHILD_UID); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) @@ -77,7 +77,7 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let child_info1 = ChildInfo::new_default(CHILD_INFO); + let child_info1 = ChildInfo::new_default(CHILD_UID); let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); diff --git a/client/src/client.rs b/client/src/client.rs index 888bd88428863..7acef6a4a910c 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -566,11 +566,10 @@ impl Client where fn get( &self, - child_info: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { - self.storage.get(child_info, key, prefix) + self.storage.get(key, prefix) } } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 8bcbb80c775a3..a4168f356e609 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -351,7 +351,7 @@ pub mod tests { use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_UID_1: &'static [u8] = b"unique_id_1"; type TestChecker = LightDataChecker< NativeExecutor, @@ -399,7 +399,7 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(CHILD_UID_1); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; // prepare remote client @@ -506,7 +506,7 @@ pub mod tests { result, ) = prepare_for_read_child_proof_check(); - let child_info = ChildInfo::new_default(CHILD_INFO_1); + let child_info = ChildInfo::new_default(CHILD_UID_1); let child_infos = child_info.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 992e8fa81f250..77373ce47649b 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -141,12 +141,12 @@ pub struct CommitSet { } impl CommitSet { - /// Number of inserted key value element in the set. + /// Number of inserted key value elements in the set. pub fn inserted_len(&self) -> usize { self.data.iter().map(|set| set.1.inserted.len()).sum() } - /// Number of deleted key value element in the set. + /// Number of deleted key value elements in the set. pub fn deleted_len(&self) -> usize { self.data.iter().map(|set| set.1.deleted.len()).sum() } @@ -261,7 +261,7 @@ impl StateDbSync { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - mut changeset: ChildTrieChangeSets, + mut changesets: ChildTrieChangeSets, ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { @@ -271,17 +271,17 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { - for changeset in changeset.iter_mut() { + for changeset in changesets.iter_mut() { changeset.1.deleted.clear(); } // write changes immediately Ok(CommitSet { - data: changeset, + data: changesets, meta: meta, }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); + let commit = self.non_canonical.insert(hash, number, parent_hash, changesets); commit.map(|mut c| { c.meta.inserted.extend(meta.inserted); c @@ -456,9 +456,9 @@ impl StateDb { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - changeset: ChildTrieChangeSets, + changesets: ChildTrieChangeSets, ) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changeset) + self.db.write().insert_block(hash, number, parent_hash, changesets) } /// Finalize a previously inserted block. diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6d79dfeffd4bb..4f06d9dd52180 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -28,7 +28,6 @@ use log::trace; use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenVec}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -// version at start to avoid collision when adding a unit const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; @@ -67,8 +66,7 @@ struct JournalRecordV1 { impl From> for JournalRecordV1 { // Note that this compatibility only works as long as the backend - // db strategy match the one from current implementation, that - // is for default child trie which use same state column as top. + // child storage format is the same in both case. fn from(old: JournalRecordCompat) -> Self { JournalRecordV1 { hash: old.hash, diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 1fd736913188b..a4e6fe1473fa1 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,7 +26,7 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::{ChildInfo, ChildrenVec}; +use sp_core::storage::{ChildInfo, ChildrenVec, ChildrenMap}; use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; @@ -40,7 +40,7 @@ pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap>, + death_index: ChildrenMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -51,21 +51,11 @@ pub struct RefWindow { pending_prunings: usize, } -impl RefWindow { - fn remove_death_index(&mut self, child_info: &ChildInfo, key: &Key) -> Option { - if let Some(child_index) = self.death_index.get_mut(child_info) { - child_index.remove(key) - } else { - None - } - } -} - #[derive(Debug, PartialEq, Eq)] struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashMap>, + deleted: ChildrenMap>, } impl DeathRow { @@ -162,10 +152,12 @@ impl RefWindow { ) { // remove all re-inserted keys from death rows for (child_info, inserted) in inserted { - for k in inserted { - if let Some(block) = self.remove_death_index(&child_info, &k) { - self.death_rows[(block - self.pending_number) as usize] - .remove_deleted(&child_info, &k); + if let Some(child_index) = self.death_index.get_mut(&child_info) { + for k in inserted { + if let Some(block) = child_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize] + .remove_deleted(&child_info, &k); + } } } } @@ -178,7 +170,7 @@ impl RefWindow { entry.insert(k.clone(), imported_block); } } - let mut deleted_death_row = HashMap::>::new(); + let mut deleted_death_row = ChildrenMap::>::default(); for (child_info, deleted) in deleted.into_iter() { let entry = deleted_death_row.entry(child_info).or_default(); entry.extend(deleted); diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index e228d3205d09c..5e85dcb4fc0fd 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -111,8 +111,7 @@ pub trait AccountDb { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<&TrieId>, - child_info: Option<&ChildInfo>, + trie_id: Option<(&TrieId, &ChildInfo)>, location: &StorageKey ) -> Option>; /// If account has an alive contract then return the code hash associated. @@ -131,15 +130,10 @@ impl AccountDb for DirectAccountDb { fn get_storage( &self, _account: &T::AccountId, - trie_id: Option<&TrieId>, - child_info: Option<&ChildInfo>, + trie_id: Option<(&TrieId, &ChildInfo)>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| if let Some(child_info) = child_info { - child::get_raw(id, child_info, &blake2_256(location)) - } else { - child::get_raw(id, &crate::trie_unique_id(&id[..]), &blake2_256(location)) - }) + trie_id.and_then(|(id, child_info)| child::get_raw(id, child_info, &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -345,15 +339,14 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<&TrieId>, - child_info: Option<&ChildInfo>, + trie_id: Option<(&TrieId, &ChildInfo)>, location: &StorageKey ) -> Option> { self.local .borrow() .get(account) .and_then(|changes| changes.storage(location)) - .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, child_info, location)) + .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f7830c4d1d0bc..77cb8af84a6ec 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -699,13 +699,11 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - let (trie_id, child_info) = self.ctx.self_trie_info.as_ref() - .map(|info| (Some(&info.0), Some(&info.1))) - .unwrap_or((None, None)); + let trie_id = self.ctx.self_trie_info.as_ref() + .map(|info| ((&info.0, &info.1))); self.ctx.overlay.get_storage( &self.ctx.self_account, trie_id, - child_info, key, ) } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index a40f5b8b726ad..a49d7195f4c6e 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -703,12 +703,11 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let child_info = Some(trie_unique_id(&contract_info.trie_id)); + let child_info = trie_unique_id(&contract_info.trie_id); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, - Some(&contract_info.trie_id), - child_info.as_ref(), + Some((&contract_info.trie_id, &child_info)), &key, ); Ok(maybe_value) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 8c9dbd96a08e7..cc29658776539 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -318,8 +318,8 @@ fn account_removal_removes_storage() { let trie_id2 = ::TrieIdGenerator::trie_id(&2); let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); - let child_info1 = Some(&child_info1); - let child_info2 = Some(&child_info2); + let child_info1 = Some((&trie_id1, &child_info1)); + let child_info2 = Some((&trie_id2, &child_info2)); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -365,15 +365,15 @@ fn account_removal_removes_storage() { // Verify that all entries from account 1 is removed, while // entries from account 2 is in place. { - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info1, key1).is_none()); - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info2, key2).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, child_info1, key1).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, child_info1, key2).is_none()); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key1), + >::get_storage(&DirectAccountDb, &2, child_info2, key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key2), + >::get_storage(&DirectAccountDb, &2, child_info2, key2), Some(b"4".to_vec()) ); } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 42cbdc2e97495..a85614666701b 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -22,7 +22,6 @@ sp-externalities = { version = "0.8.0", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" -sp-trie = { version = "2.0.0", path = "../trie", features = ["test-helpers"] } [features] default = [] diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 644c629984f69..50e4fe69c60bd 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -315,8 +315,6 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; - #[test] fn commit_should_work() { let mut ext = BasicExternalities::default(); @@ -340,7 +338,7 @@ mod tests { #[test] fn children_works() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); let child_storage = b":child_storage:default:test".to_vec(); let mut ext = BasicExternalities::new(Storage { diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index cefc4d88470a2..c0ebeff189450 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,7 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).to_owned(); + let child_info = changes.child_info(sk).clone(); ( changes.committed.children.get(sk).map(|c| &c.0), changes.prospective.children.get(sk).map(|c| &c.0), @@ -157,7 +157,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( // AND are not in storage at the beginning of operation if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_deref() { + if let Some(child_info) = child_info.as_ref() { if !backend.exists_child_storage(sk, child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); @@ -354,9 +354,6 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; - fn prepare_for_build(zero: u64) -> ( InMemoryBackend, InMemoryStorage, @@ -364,8 +361,8 @@ mod test { Configuration, ) { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); - let child_info2 = ChildInfo::new_default(CHILD_INFO_2); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); + let child_info2 = ChildInfo::new_default(b"unique_id_2"); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -442,13 +439,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), child_info1.to_owned())), + ].into_iter().collect(), child_info1.clone())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), child_info2.to_owned())), + ].into_iter().collect(), child_info2)), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -471,7 +468,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), child_info1.to_owned())), + ].into_iter().collect(), child_info1)), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 84be4a3f55541..dc28890c613d5 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -21,6 +21,7 @@ use std::cell::RefCell; use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; use sp_core::Hasher; +use sp_core::storage::ChildInfo; use num_traits::Zero; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; @@ -67,7 +68,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, - child_info: sp_core::storage::ChildInfo::top_trie(), + child_info: ChildInfo::top_trie(), }) } @@ -178,7 +179,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, - child_info: sp_core::storage::ChildInfo::top_trie(), + child_info: ChildInfo::top_trie(), }.collect() } @@ -319,7 +320,7 @@ pub struct DrilldownIterator<'a, H, Number> /// This is always top trie info, but it cannot be /// statically instantiated at the time (vec of null /// size could be in theory). - child_info: sp_core::storage::ChildInfo, + child_info: ChildInfo, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index b6aba93108407..58deb27c1056e 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -161,11 +161,8 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. - /// Note that child info is use only for case where we use this trait - /// as an adapter to storage. fn get( &self, - child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -183,7 +180,8 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBack key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.0.get(child_info, key, prefix) + debug_assert!(child_info.is_top_trie()); + self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 53bb62675d9bb..23cd3b7bf050c 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -191,11 +191,10 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, child_info, key, prefix) + MemoryDB::::get(&self.data.read().mdb, &ChildInfo::top_trie(), key, prefix) } } @@ -218,6 +217,7 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.storage.get(child_info, key, prefix) + debug_assert!(child_info.is_top_trie()); + self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 06ba6bd26bca9..a8ab84b399ed9 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -537,7 +537,7 @@ where } else { let storage_key = storage_key.as_ref(); - if let Some(child_info) = self.overlay.child_info(storage_key).to_owned() { + if let Some(child_info) = self.overlay.child_info(storage_key).clone() { let (root, _is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 3aa57e9679f30..802d7937c73d8 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -702,7 +702,7 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_UID_1: &'static [u8] = b"unique_id_1"; impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -933,7 +933,7 @@ mod tests { #[test] fn set_child_storage_works() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(CHILD_UID_1); let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -977,7 +977,7 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(CHILD_UID_1); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 4afc8a328ba8a..783608e2ae1af 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -130,7 +130,7 @@ pub struct StorageTransactionCache { pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, - /// The child root storage root after applying the transaction. + /// The storage child roots after applying the transaction. pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, @@ -539,7 +539,7 @@ impl OverlayedChanges { ), self.child_info(storage_key) .expect("child info initialized in either committed or prospective") - .to_owned(), + .clone(), ) ); @@ -589,10 +589,10 @@ impl OverlayedChanges { /// Take the latest value so prospective first. pub fn child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children.get(storage_key) { - return Some(&*ci); + return Some(&ci); } if let Some((_, ci)) = self.committed.children.get(storage_key) { - return Some(&*ci); + return Some(&ci); } None } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e4eca1181089e..d49df322749db 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -410,9 +410,6 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; - fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { @@ -481,8 +478,8 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); - let child_info2 = ChildInfo::new_default(CHILD_INFO_2); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); + let child_info2 = ChildInfo::new_default(b"unique_id_2"); let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index e4d4b5604ae2b..085805e73862f 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -253,18 +253,13 @@ impl ChildInfo { } } -/// Type of child, it is encoded in the four first byte of the -/// encoded child info (LE u32). +/// Type of child. /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] #[derive(Clone, Copy, PartialEq)] pub enum ChildType { /// Default, it uses a cryptographic strong unique id as input. - /// All bytes following the type in encoded form are this unique - /// id. - /// If the trie got a unique id of length 0 it is considered - /// as a top child trie. CryptoUniqueId = 1, } @@ -357,13 +352,27 @@ impl ChildrenMap { } } - /// Extends two maps, by enxtending entries with the same key. + /// Extends two maps, by extending entries with the same key. pub fn extend_replace( &mut self, other: impl Iterator, ) { self.0.extend(other) } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, mut f: impl FnMut(&ChildInfo, &mut T) -> bool) { + let mut to_del = Vec::new(); + for (k, v) in self.0.iter_mut() { + if !f(k, v) { + // this clone can be avoid with unsafe code + to_del.push(k.clone()); + } + } + for k in to_del { + self.0.remove(&k); + } + } } #[cfg(feature = "std")] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 6cbd19cd0f70b..a78a26db736c4 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -28,7 +28,6 @@ hex-literal = "0.2.1" [features] default = ["std"] -test-helpers = [] std = [ "sp-std/std", "codec/std", From 313635504323d5af65011537aeb0a682827cc6f5 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 5 Feb 2020 14:02:53 +0100 Subject: [PATCH 028/185] bump impl version. --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f2c374cedd4bc..9dc1ce7d11054 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -80,7 +80,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 212, - impl_version: 0, + impl_version: 1, apis: RUNTIME_API_VERSIONS, }; From bae6523007b291bec9be8ad1fc11c5092ee6a109 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 11:32:06 +0100 Subject: [PATCH 029/185] calculate size for single operation on usize. --- client/db/src/lib.rs | 8 ++++---- client/src/cht.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7642e944dfd7a..3af8d7c384b5e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1126,24 +1126,24 @@ impl Backend { let finalized = if operation.commit_state { let mut changesets = ChildrenMap::>>::default(); let mut ops: u64 = 0; - let mut bytes: u64 = 0; + let mut bytes = 0; for (info, mut updates) in operation.db_updates.into_iter() { let changeset = changesets.entry(info).or_default(); for (key, (val, rc)) in updates.drain() { if rc > 0 { ops += 1; - bytes += key.len() as u64 + val.len() as u64; + bytes += key.len() + val.len(); changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { ops += 1; - bytes += key.len() as u64; + bytes += key.len(); changeset.deleted.push(key); } } } - self.state_usage.tally_writes(ops, bytes); + self.state_usage.tally_writes(ops, bytes as u64); let number_u64 = number.saturated_into::(); let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changesets) diff --git a/client/src/cht.rs b/client/src/cht.rs index f470ee4fbe6fa..9e1a3bff017f1 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -26,7 +26,7 @@ use codec::Encode; use sp_trie; -use sp_core::{H256, convert_hash, self}; +use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, SimpleArithmetic, Zero, One}; use sp_state_machine::{ MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, From 88ed5036cf39d5b59e6678db678af4ef706d11f9 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 11:54:54 +0100 Subject: [PATCH 030/185] Put keyspace logic in its own struct. --- client/db/src/changes_tries_storage.rs | 1 - client/db/src/lib.rs | 49 +++++++++++++++++--------- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 93bfd8b4cc673..6f447f256a158 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -530,7 +530,6 @@ mod tests { }; use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; use sp_core::H256; - use sp_core::storage::ChildInfo; use sp_runtime::testing::{Digest, Header}; use sp_runtime::traits::{Hash, BlakeTwo256}; use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3af8d7c384b5e..71e2408891ed4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -685,11 +685,8 @@ impl sc_state_db::NodeDb for StorageDb { if child_info.is_top_trie() { self.db.get(columns::STATE, key) } else { - let keyspace = child_info.keyspace(); - let mut key_buffer = vec![0; keyspace.len() + key.len()]; - key_buffer[..keyspace.len()].copy_from_slice(keyspace); - key_buffer[keyspace.len()..].copy_from_slice(key); - self.db.get(columns::STATE, &key_buffer[..]) + let mut keyspace = Keyspaced::new(child_info.keyspace()); + self.db.get(columns::STATE, keyspace.prefix_key(key)) }.map(|r| r.map(|v| v.to_vec())) } } @@ -1332,7 +1329,7 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut key_buffer = Vec::new(); + let mut keyspace = Keyspaced::new(&[]); for child_data in commit.data.into_iter() { if child_data.0.is_top_trie() { // empty prefix @@ -1343,19 +1340,12 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm transaction.delete(columns::STATE, &key[..]); } } else { - let keyspace = child_data.0.keyspace(); - let keyspace_len = keyspace.len(); - key_buffer.resize(keyspace_len, 0); - key_buffer[..keyspace_len].copy_from_slice(keyspace); + keyspace.change_keyspace(child_data.0.keyspace()); for (key, val) in child_data.1.inserted.into_iter() { - key_buffer.resize(keyspace_len + key.len(), 0); - key_buffer[keyspace_len..].copy_from_slice(&key[..]); - transaction.put(columns::STATE, &key_buffer[..], &val); + transaction.put(columns::STATE, keyspace.prefix_key(key.as_slice()), &val); } for key in child_data.1.deleted.into_iter() { - key_buffer.resize(keyspace_len + key.len(), 0); - key_buffer[keyspace_len..].copy_from_slice(&key[..]); - transaction.delete(columns::STATE, &key_buffer[..]); + transaction.delete(columns::STATE, keyspace.prefix_key(key.as_slice())); } } } @@ -1682,6 +1672,33 @@ impl sc_client_api::backend::Backend for Backend { impl sc_client_api::backend::LocalBackend for Backend {} +/// Rules for storing a default child trie with unique id. +struct Keyspaced { + keyspace_len: usize, + buffer: Vec, +} + +impl Keyspaced { + fn new(keyspace: &[u8]) -> Self { + Keyspaced { + keyspace_len: keyspace.len(), + buffer: keyspace.to_vec(), + } + } + + fn change_keyspace(&mut self, new_keyspace: &[u8]) { + self.keyspace_len = new_keyspace.len(); + self.buffer.resize(new_keyspace.len(), 0); + self.buffer[..new_keyspace.len()].copy_from_slice(new_keyspace); + } + + fn prefix_key(&mut self, key: &[u8]) -> &[u8] { + self.buffer.resize(self.keyspace_len + key.len(), 0); + self.buffer[self.keyspace_len..].copy_from_slice(key); + self.buffer.as_slice() + } +} + #[cfg(test)] pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; From 4eb467642dc538da36d9ca96169f82ebd04a6262 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 12:10:11 +0100 Subject: [PATCH 031/185] Restrict top trie in ext to the storage key build from an empty key. --- primitives/state-machine/src/ext.rs | 50 ++++++++++++++++++++++++----- primitives/storage/src/lib.rs | 11 +++++++ 2 files changed, 53 insertions(+), 8 deletions(-) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index a8ab84b399ed9..667b073eb4cf8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -210,7 +210,11 @@ where key: &[u8], ) -> Option { if child_info.is_top_trie() { - return self.storage(key); + if storage_key.is_empty() { + return self.storage(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -238,7 +242,11 @@ where key: &[u8], ) -> Option> { if child_info.is_top_trie() { - return self.storage_hash(key); + if storage_key.is_empty() { + return self.storage_hash(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -266,7 +274,11 @@ where key: &[u8], ) -> Option { if child_info.is_top_trie() { - return self.original_storage(key); + if storage_key.is_empty() { + return self.original_storage(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -290,7 +302,11 @@ where key: &[u8], ) -> Option> { if child_info.is_top_trie() { - return self.original_storage_hash(key); + if storage_key.is_empty() { + return self.original_storage_hash(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -329,7 +345,11 @@ where key: &[u8], ) -> bool { if child_info.is_top_trie() { - return self.exists_storage(key); + if storage_key.is_empty() { + return self.exists_storage(key); + } else { + return false; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -371,7 +391,11 @@ where key: &[u8], ) -> Option { if child_info.is_top_trie() { - return self.next_storage_key(key); + if storage_key.is_empty() { + return self.next_storage_key(key); + } else { + return None; + } } let next_backend_key = self.backend .next_child_storage_key(storage_key.as_ref(), child_info, key) @@ -420,7 +444,12 @@ where value: Option, ) { if child_info.is_top_trie() { - return self.place_storage(key, value); + if storage_key.is_empty() { + return self.place_storage(key, value); + } else { + trace!(target: "state-trace", "Ignoring place_child_storage on top trie"); + return; + } } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, @@ -481,7 +510,12 @@ where prefix: &[u8], ) { if child_info.is_top_trie() { - return self.clear_prefix(prefix); + if storage_key.is_empty() { + return self.clear_prefix(prefix); + } else { + trace!(target: "state-trace", "Ignoring clear_child_prefix on top trie"); + return; + } } trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 085805e73862f..69e746f725267 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -127,6 +127,11 @@ pub mod well_known_keys { } has_right_prefix } + + /// Return true if the variable part of the key is empty. + pub fn is_child_trie_key_empty(storage_key: &[u8]) -> bool { + storage_key.len() == b":child_storage:default:".len() + } } /// A wrapper around a child storage key. @@ -176,6 +181,12 @@ impl<'a> ChildStorageKey<'a> { pub fn into_owned(self) -> Vec { self.storage_key.into_owned() } + + /// Return true if the variable part of the key is empty. + pub fn is_empty(&self) -> bool { + well_known_keys::is_child_trie_key_empty(&*self.storage_key) + } + } From 5151471c85b95d16ac7fedf1dc6e4a451bc1736a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 17:00:32 +0100 Subject: [PATCH 032/185] Implementation basis for this PR, note that child storage key default prefix will be added at query and on full storage root lazilly. Also note that both type are implementation compatible so we do not need a different well known key. --- primitives/storage/src/lib.rs | 92 +++++++++++++++-------------------- 1 file changed, 40 insertions(+), 52 deletions(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index d32c54aae8c47..da42d29c79688 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -22,7 +22,7 @@ use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, borrow::Cow}; +use sp_std::vec::Vec; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -126,58 +126,10 @@ pub mod well_known_keys { } } -/// A wrapper around a child storage key. -/// -/// This wrapper ensures that the child storage key is correct and properly used. It is -/// impossible to create an instance of this struct without providing a correct `storage_key`. -pub struct ChildStorageKey<'a> { - storage_key: Cow<'a, [u8]>, -} - -impl<'a> ChildStorageKey<'a> { - /// Create new instance of `Self`. - fn new(storage_key: Cow<'a, [u8]>) -> Option { - if well_known_keys::is_child_trie_key_valid(&storage_key) { - Some(ChildStorageKey { storage_key }) - } else { - None - } - } - - /// Create a new `ChildStorageKey` from a vector. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_vec(key: Vec) -> Option { - Self::new(Cow::Owned(key)) - } - - /// Create a new `ChildStorageKey` from a slice. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_slice(key: &'a [u8]) -> Option { - Self::new(Cow::Borrowed(key)) - } - - /// Get access to the byte representation of the storage key. - /// - /// This key is guaranteed to be correct. - pub fn as_ref(&self) -> &[u8] { - &*self.storage_key - } - - /// Destruct this instance into an owned vector that represents the storage key. - /// - /// This key is guaranteed to be correct. - pub fn into_owned(self) -> Vec { - self.storage_key.into_owned() - } -} - #[derive(Clone, Copy)] /// Information related to a child state. pub enum ChildInfo<'a> { + ParentKeyId(ChildTrie<'a>), Default(ChildTrie<'a>), } @@ -186,10 +138,18 @@ pub enum ChildInfo<'a> { #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum OwnedChildInfo { + ParentKeyId(OwnedChildTrie), Default(OwnedChildTrie), } impl<'a> ChildInfo<'a> { + /// Instantiates information for a default child trie. + pub const fn new_uid_parent_key(storage_key: &'a[u8]) -> Self { + ChildInfo::ParentKeyId(ChildTrie { + data: storage_key, + }) + } + /// Instantiates information for a default child trie. pub const fn new_default(unique_id: &'a[u8]) -> Self { ChildInfo::Default(ChildTrie { @@ -204,12 +164,23 @@ impl<'a> ChildInfo<'a> { => OwnedChildInfo::Default(OwnedChildTrie { data: data.to_vec(), }), + ChildInfo::ParentKeyId(ChildTrie { data }) + => OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data: data.to_vec(), + }), } } /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { + pub fn resolve_child_info(child_type: u32, data: &'a[u8], storage_key: &'a[u8]) -> Option { match child_type { + x if x == ChildType::ParentKeyId as u32 => { + if !data.len() == 0 { + // do not allow anything for additional data. + return None; + } + Some(ChildInfo::new_uid_parent_key(storage_key)) + }, x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), _ => None, } @@ -219,6 +190,9 @@ impl<'a> ChildInfo<'a> { /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { match self { + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => (data, ChildType::ParentKeyId as u32), ChildInfo::Default(ChildTrie { data, }) => (data, ChildType::CryptoUniqueId as u32), @@ -230,6 +204,9 @@ impl<'a> ChildInfo<'a> { /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { match self { + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => &data[..], ChildInfo::Default(ChildTrie { data, }) => &data[..], @@ -242,7 +219,11 @@ impl<'a> ChildInfo<'a> { /// be related to technical consideration or api variant. #[repr(u32)] pub enum ChildType { - /// Default, it uses a cryptographic strong unique id as input. + /// If runtime module ensures that the child key is a unique id that will + /// only be used once, this parent key is used as a child trie unique id. + ParentKeyId = 0, + /// Default, this uses a cryptographic strong unique id as input, this id + /// is used as a unique child trie identifier. CryptoUniqueId = 1, } @@ -259,6 +240,7 @@ impl OwnedChildInfo { pub fn try_update(&mut self, other: ChildInfo) -> bool { match self { OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), + OwnedChildInfo::ParentKeyId(owned_child_trie) => owned_child_trie.try_update(other), } } @@ -269,6 +251,11 @@ impl OwnedChildInfo { => ChildInfo::Default(ChildTrie { data: data.as_slice(), }), + OwnedChildInfo::ParentKeyId(OwnedChildTrie { data }) + => ChildInfo::ParentKeyId(ChildTrie { + data: data.as_slice(), + }), + } } } @@ -300,6 +287,7 @@ impl OwnedChildTrie { fn try_update(&mut self, other: ChildInfo) -> bool { match other { ChildInfo::Default(other) => self.data[..] == other.data[..], + ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } } } From 8715446e9a356778875b967a7e5251f68130153d Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 17:24:01 +0100 Subject: [PATCH 033/185] Resolve prefix from child_info --- primitives/storage/src/lib.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index da42d29c79688..161e90dc848bf 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -113,7 +113,7 @@ pub mod well_known_keys { /// /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { - let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); + let has_right_prefix = storage_key.starts_with(super::DEFAULT_CHILD_TYPE_PARENT_PREFIX); if has_right_prefix { // This is an attempt to catch a change of `is_child_storage_key`, which // just checks if the key has prefix `:child_storage:` at the moment of writing. @@ -212,6 +212,16 @@ impl<'a> ChildInfo<'a> { }) => &data[..], } } + + /// Return the location reserved for this child trie in their parent trie if there + /// is one. + pub fn parent_prefix(&self, _parent: Option<&'a ChildInfo>) -> &'a [u8] { + match self { + ChildInfo::ParentKeyId(..) + | ChildInfo::Default(..) => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + } + } + } /// Type of child. @@ -291,3 +301,12 @@ impl OwnedChildTrie { } } } + +const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; + +#[test] +fn assert_default_trie_in_child_trie() { + let child_info = ChildInfo::new_default(b"any key"); + let prefix = child_info.parent_prefix(None); + assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); +} From b72cd9c80377261f385e4488b1e404432b63017b Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 18:53:48 +0100 Subject: [PATCH 034/185] Removed all child prefix, next is putting it back on full storage and on backend access. --- client/chain-spec/src/chain_spec.rs | 5 +- client/network/src/protocol.rs | 6 +- client/rpc/src/state/state_full.rs | 18 ++--- client/rpc/src/state/tests.rs | 2 +- client/src/light/fetcher.rs | 8 +- primitives/externalities/src/lib.rs | 28 +++---- primitives/io/src/lib.rs | 62 +++++---------- primitives/state-machine/src/basic.rs | 45 +++++------ primitives/state-machine/src/ext.rs | 78 +++++++++---------- primitives/state-machine/src/lib.rs | 24 +++--- .../state-machine/src/proving_backend.rs | 10 +-- primitives/state-machine/src/trie_backend.rs | 3 +- primitives/storage/src/lib.rs | 3 + test-utils/runtime/src/lib.rs | 2 +- 14 files changed, 134 insertions(+), 160 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 81cbce5ea731c..b47c41f107ccd 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -76,13 +76,14 @@ impl BuildStorage for ChainSpec { Genesis::Runtime(gc) => gc.build_storage(), Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children: children_map.into_iter().map(|(sk, child_content)| { + children: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::resolve_child_info( child_content.child_type, child_content.child_info.as_slice(), + storage_key.0.as_slice(), ).expect("chain spec contains correct content").to_owned(); ( - sk.0, + storage_key.0, StorageChild { data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), child_info, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5e8df2831ba63..849cae509adfc 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1555,7 +1555,11 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + let proof = if let Some(child_info) = ChildInfo::resolve_child_info( + request.child_type, + &request.child_info[..], + &request.storage_key[..], + ) { match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 3d5613626e044..caf7a5787e1c3 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -308,7 +308,7 @@ impl StateBackend for FullState, - child_storage_key: StorageKey, + storage_key: StorageKey, child_info: StorageKey, child_type: u32, prefix: StorageKey, @@ -317,8 +317,8 @@ impl StateBackend for FullState StateBackend for FullState, - child_storage_key: StorageKey, + storage_key: StorageKey, child_info: StorageKey, child_type: u32, key: StorageKey, @@ -337,8 +337,8 @@ impl StateBackend for FullState StateBackend for FullState, - child_storage_key: StorageKey, + storage_key: StorageKey, child_info: StorageKey, child_type: u32, key: StorageKey, @@ -357,8 +357,8 @@ impl StateBackend for FullState = ChildInfo::new_default(b"unique_id"); fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; - const STORAGE_KEY: &[u8] = b":child_storage:default:child"; + const STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"hello world !"; let mut core = tokio::runtime::Runtime::new().unwrap(); diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index d66108b7f0adb..477c26a0bdc7c 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -403,7 +403,7 @@ pub mod tests { // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( - b":child_storage:default:child1".to_vec(), + b"child1".to_vec(), CHILD_INFO_1, b"key1".to_vec(), b"value1".to_vec(), @@ -417,14 +417,14 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, - &StorageKey(b":child_storage:default:child1".to_vec()), + &StorageKey(b"child1".to_vec()), CHILD_INFO_1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, - b":child_storage:default:child1", + b"child1", CHILD_INFO_1, &[b"key1"], ).unwrap(); @@ -508,7 +508,7 @@ pub mod tests { &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b":child_storage:default:child1".to_vec(), + storage_key: b"child1".to_vec(), child_info: child_infos.0.to_vec(), child_type: child_infos.1, keys: vec![b"key1".to_vec()], diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 350b65d190840..8beccc8201a8f 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -24,7 +24,7 @@ use std::any::{Any, TypeId}; -use sp_storage::{ChildStorageKey, ChildInfo}; +use sp_storage::ChildInfo; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; pub use extensions::{Extension, Extensions, ExtensionStore}; @@ -47,7 +47,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -60,7 +60,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -77,7 +77,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -87,7 +87,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -100,7 +100,7 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: Vec, child_info: ChildInfo, key: Vec, value: Vec, @@ -116,11 +116,11 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) { - self.place_child_storage(storage_key, child_info, key.to_vec(), None) + self.place_child_storage(storage_key.to_vec(), child_info, key.to_vec(), None) } /// Whether a storage entry exists. @@ -131,7 +131,7 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { @@ -144,13 +144,13 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); + fn kill_child_storage(&mut self, storage_key: &[u8], child_info: ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -158,7 +158,7 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ); @@ -169,7 +169,7 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: Vec, child_info: ChildInfo, key: Vec, value: Option>, @@ -192,7 +192,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 1b531725fefc8..fa3e895fc3482 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, ChildInfo}, + storage::ChildInfo, }; use sp_core::{ @@ -68,19 +68,6 @@ pub enum EcdsaVerifyError { BadSignature, } -/// Returns a `ChildStorageKey` if the given `storage_key` slice is a valid storage -/// key or panics otherwise. -/// -/// Panicking here is aligned with what the `without_std` environment would do -/// in the case of an invalid child storage key. -#[cfg(feature = "std")] -fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { - match ChildStorageKey::from_slice(storage_key) { - Some(storage_key) => storage_key, - None => panic!("child storage key is invalid"), - } -} - /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -102,13 +89,12 @@ pub trait Storage { /// if the key can not be found. fn child_get( &self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) } @@ -137,15 +123,14 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_read( &self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], value_out: &mut [u8], value_offset: u32, ) -> Option { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.child_storage(storage_key, child_info, key) .map(|value| { @@ -162,21 +147,20 @@ pub trait Storage { self.set_storage(key.to_vec(), value.to_vec()); } - /// Set `key` to `value` in the child storage denoted by `child_storage_key`. + /// Set `key` to `value` in the child storage denoted by `storage_key`. /// /// See `child_get` for common child api parameters. fn child_set( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], value: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); - self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); + self.set_child_storage(storage_key.to_vec(), child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -189,13 +173,12 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_clear( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.clear_child_storage(storage_key, child_info, key); } @@ -205,12 +188,11 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_storage_kill( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.kill_child_storage(storage_key, child_info); } @@ -225,13 +207,12 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_exists( &self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) -> bool { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.exists_child_storage(storage_key, child_info, key) } @@ -246,13 +227,12 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_clear_prefix( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, prefix: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.clear_child_prefix(storage_key, child_info, prefix); } @@ -275,9 +255,8 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_root( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], ) -> Vec { - let storage_key = child_storage_key_or_panic(child_storage_key); self.child_storage_root(storage_key) } @@ -300,13 +279,12 @@ pub trait Storage { /// Get the next key in storage after the given one in lexicographic order in child storage. fn child_next_key( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.next_child_storage_key(storage_key, child_info, key) } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index d905657737a8a..1ca655cdaf569 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -25,7 +25,7 @@ use sp_trie::{TrieConfiguration, default_child_trie_root}; use sp_trie::trie_types::Layout; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildStorageKey, Storage, + well_known_keys::is_child_storage_key, Storage, ChildInfo, StorageChild, }, traits::Externalities, Blake2Hasher, @@ -129,7 +129,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, key: &[u8], ) -> Option { @@ -138,7 +138,7 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -147,7 +147,7 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -156,7 +156,7 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -170,7 +170,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, key: &[u8], ) -> Option { @@ -193,12 +193,12 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { - let child_map = self.inner.children.entry(storage_key.into_owned()) + let child_map = self.inner.children.entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -212,7 +212,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, ) { self.inner.children.remove(storage_key.as_ref()); @@ -240,7 +240,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, prefix: &[u8], ) { @@ -267,10 +267,7 @@ impl Externalities for BasicExternalities { // type of child trie support. let empty_hash = default_child_trie_root::>(&[]); for storage_key in keys { - let child_root = self.child_storage_root( - ChildStorageKey::from_slice(storage_key.as_slice()) - .expect("Map only feed by valid keys; qed"), - ); + let child_root = self.child_storage_root(storage_key.as_slice()); if &empty_hash[..] == &child_root[..] { top.remove(storage_key.as_slice()); } else { @@ -283,7 +280,7 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], ) -> Vec { if let Some(child) = self.inner.children.get(storage_key.as_ref()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); @@ -340,7 +337,7 @@ mod tests { #[test] fn children_works() { - let child_storage = b":child_storage:default:test".to_vec(); + let child_storage = b"test".to_vec(); let mut ext = BasicExternalities::new(Storage { top: Default::default(), @@ -352,18 +349,18 @@ mod tests { ] }); - let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); + let child = &child_storage[..]; - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child.to_vec(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child, CHILD_INFO_1, b"dog"); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), None); - ext.kill_child_storage(child(), CHILD_INFO_1); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child, CHILD_INFO_1); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 38a2e70262d85..5b9595da9dc51 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,7 +24,7 @@ use crate::{ use hash_db::Hasher; use sp_core::{ - storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, + storage::{well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, default_child_trie_root}; @@ -205,7 +205,7 @@ where fn child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -230,7 +230,7 @@ where fn child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -255,7 +255,7 @@ where fn original_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -276,7 +276,7 @@ where fn original_child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -312,7 +312,7 @@ where fn exists_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { @@ -351,7 +351,7 @@ where fn next_child_storage_key( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -396,26 +396,26 @@ where fn place_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&storage_key), HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.set_child_storage(storage_key.into_owned(), child_info, key, value); + self.overlay.set_child_storage(storage_key, child_info, key, value); } fn kill_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", @@ -451,7 +451,7 @@ where fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) { @@ -490,7 +490,7 @@ where fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); if self.storage_transaction_cache.transaction_storage_root.is_some() { @@ -614,8 +614,7 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - + const CHILD_KEY_1: &[u8] = b"Child1"; const CHILD_UUID_1: &[u8] = b"unique_id_1"; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); @@ -730,20 +729,14 @@ mod tests { #[test] fn next_child_storage_key_works() { - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - child().as_ref().to_vec() => StorageChild { + CHILD_KEY_1.to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -758,36 +751,35 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - child().as_ref().to_vec() => StorageChild { + CHILD_KEY_1.to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -800,24 +792,24 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), + ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), None); + assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), + ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[20]), None, ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), + ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index bb62df6da4905..8bafda6aa6186 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -692,7 +692,7 @@ mod tests { use super::*; use super::ext::Ext; use super::changes_trie::Configuration as ChangesTrieConfig; - use sp_core::{Blake2Hasher, map, traits::Externalities, storage::ChildStorageKey}; + use sp_core::{Blake2Hasher, map, traits::Externalities}; #[derive(Clone)] struct DummyCodeExecutor { @@ -945,26 +945,26 @@ mod tests { ); ext.set_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild".to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild", CHILD_INFO_1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild", CHILD_INFO_1, ); assert_eq!( ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild", CHILD_INFO_1, b"abc" ), @@ -1000,20 +1000,20 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - b":child_storage:default:sub1", + b"sub1", CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - b":child_storage:default:sub1", + b"sub1", &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - b":child_storage:default:sub1", + b"sub1", &[b"value2"], ).unwrap(); assert_eq!( @@ -1033,8 +1033,8 @@ mod tests { use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); + let subtrie1 = b"sub_test1"; + let subtrie2 = b"sub_test2"; let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); @@ -1045,8 +1045,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie1.to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie2.to_vec(), CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 70124927fdd2e..723cc737e15d7 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -398,7 +398,7 @@ mod tests { use crate::InMemoryBackend; use crate::trie_backend::tests::test_trie; use super::*; - use sp_core::{Blake2Hasher, storage::ChildStorageKey}; + use sp_core::{Blake2Hasher}; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; @@ -472,10 +472,10 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); - let own1 = subtrie1.into_owned(); - let own2 = subtrie2.into_owned(); + let subtrie1 = b"sub1"; + let subtrie2 = b"sub2"; + let own1 = subtrie1.to_vec(); + let own2 = subtrie2.to_vec(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), (Some((own1.clone(), CHILD_INFO_1.to_owned())), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dbaae323c09f2..3fc35ad73fa39 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -250,8 +250,7 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - + const CHILD_KEY_1: &[u8] = b"sub1"; const CHILD_UUID_1: &[u8] = b"unique_id_1"; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 161e90dc848bf..28c165e546059 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -63,6 +63,9 @@ pub struct Storage { /// Top trie storage data. pub top: StorageMap, /// Children trie storage data by storage key. + /// Note that the key is not including child prefix, this will + /// not be possible if a different kind of trie than `default` + /// get in use. pub children: std::collections::HashMap, StorageChild>, } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 96387b1efc304..28c7798c6db57 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -868,7 +868,7 @@ fn test_read_storage() { } fn test_read_child_storage() { - const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage"; + const CHILD_KEY: &[u8] = b"read_child_storage"; const UNIQUE_ID: &[u8] = b":unique_id"; const KEY: &[u8] = b":read_child_storage"; sp_io::storage::child_set( From b636687bfffc9bf4c3a93591c6efba9f31e4b991 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 16:16:40 +0100 Subject: [PATCH 035/185] Prefixing child storage key where needed. --- primitives/state-machine/src/backend.rs | 3 ++- primitives/state-machine/src/basic.rs | 14 ++++++++----- primitives/state-machine/src/ext.rs | 4 ++-- .../state-machine/src/in_memory_backend.rs | 3 ++- .../state-machine/src/proving_backend.rs | 6 ++++-- primitives/state-machine/src/trie_backend.rs | 6 ++++-- .../state-machine/src/trie_backend_essence.rs | 21 ++++++++++++------- primitives/storage/src/lib.rs | 20 ++++++++++++++++++ 8 files changed, 57 insertions(+), 20 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4ef9b970ae21d..c6250e755622f 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -189,9 +189,10 @@ pub trait Backend: std::fmt::Debug { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first - for (storage_key, child_delta, child_info) in child_deltas { + for (mut storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); + child_info.as_ref().do_prefix_key(&mut storage_key, None); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 1ca655cdaf569..344613242ccc9 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -261,17 +261,21 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect(); + let keys: Vec<_> = self.inner.children.iter().map(|(k, v)| { + let mut prefixed = k.to_vec(); + v.child_info.as_ref().do_prefix_key(&mut prefixed, None); + (k.to_vec(), prefixed) + }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = default_child_trie_root::>(&[]); - for storage_key in keys { + for (storage_key, prefixed_storage_key) in keys { let child_root = self.child_storage_root(storage_key.as_slice()); if &empty_hash[..] == &child_root[..] { - top.remove(storage_key.as_slice()); + top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(storage_key, child_root); + top.insert(prefixed_storage_key, child_root); } } @@ -288,7 +292,7 @@ impl Externalities for BasicExternalities { InMemoryBackend::::default() .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 } else { - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(&[]) }.encode() } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 5b9595da9dc51..9268cf3782ac2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -498,7 +498,7 @@ where .storage(storage_key.as_ref()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(&[]) ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, @@ -547,7 +547,7 @@ where .storage(storage_key.as_ref()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(&[]) ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 0a29468bbc4ef..b0314e321c554 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -261,6 +261,7 @@ impl Backend for InMemory where H::Out: Codec { H::Out: Ord { let storage_key = storage_key.to_vec(); + let parent_prefix = child_info.parent_prefix(None); let child_info = Some((storage_key.clone(), child_info.to_owned())); let existing_pairs = self.inner.get(&child_info) @@ -278,7 +279,7 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); - let is_default = root == default_child_trie_root::>(&storage_key); + let is_default = root == default_child_trie_root::>(parent_prefix); (root, is_default, vec![(child_info, full_transaction)]) } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 723cc737e15d7..0572907401ba6 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -147,9 +147,11 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> child_info: ChildInfo, key: &[u8] ) -> Result>, String> { - let root = self.storage(storage_key)? + let mut prefixed_storage_key = storage_key.to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + let root = self.storage(prefixed_storage_key.as_slice())? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>(storage_key)); + .unwrap_or(default_child_trie_root::>(&[])); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 3fc35ad73fa39..febb6e31f1fe4 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -202,10 +202,12 @@ impl, H: Hasher> Backend for TrieBackend where I: IntoIterator)>, H::Out: Ord, { - let default_root = default_child_trie_root::>(storage_key); + let default_root = default_child_trie_root::>(child_info.parent_prefix(None)); let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { + let mut prefixed_storage_key = storage_key.to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + let mut root = match self.storage(prefixed_storage_key.as_slice()) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 2598682ae0668..278ad705c3253 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -71,6 +71,13 @@ impl, H: Hasher> TrieBackendEssence where H::Out: self.next_storage_key_from_root(&self.root, None, key) } + /// Access the root of the child storage in its parent trie + fn child_root(&self, storage_key: &[u8], child_info: ChildInfo) -> Result, String> { + let mut prefixed_storage_key = storage_key.to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + self.storage(prefixed_storage_key.as_slice()) + } + /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( @@ -79,7 +86,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let child_root = match self.storage(storage_key)? { + let child_root = match self.child_root(storage_key, child_info)? { Some(child_root) => child_root, None => return Ok(None), }; @@ -165,8 +172,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let root = self.storage(storage_key)? - .unwrap_or(default_child_trie_root::>(storage_key).encode()); + let root = self.child_root(storage_key, child_info)? + .unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -187,8 +194,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: ChildInfo, f: F, ) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + let root = match self.child_root(storage_key, child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -220,8 +227,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: prefix: &[u8], mut f: F, ) { - let root_vec = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + let root_vec = match self.child_root(storage_key, child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 28c165e546059..ea4dd56a1e7a9 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -225,6 +225,16 @@ impl<'a> ChildInfo<'a> { } } + /// Change a key to get prefixed with the parent prefix. + pub fn do_prefix_key(&self, key: &mut Vec, parent: Option<&ChildInfo>) { + let parent_prefix = self.parent_prefix(parent); + let key_len = key.len(); + if parent_prefix.len() > 0 { + key.resize(key_len + parent_prefix.len(), 0); + key.copy_within(..key_len, parent_prefix.len()); + key[..parent_prefix.len()].copy_from_slice(parent_prefix); + } + } } /// Type of child. @@ -313,3 +323,13 @@ fn assert_default_trie_in_child_trie() { let prefix = child_info.parent_prefix(None); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); } + +#[test] +fn test_do_prefix() { + let child_info = ChildInfo::new_default(b"any key"); + let mut prefixed_1 = b"key".to_vec(); + child_info.do_prefix_key(&mut prefixed_1, None); + let mut prefixed_2 = DEFAULT_CHILD_TYPE_PARENT_PREFIX.to_vec(); + prefixed_2.extend_from_slice(b"key"); + assert_eq!(prefixed_1, prefixed_2); +} From e3cf2836c23ef3cb0f9607319827cac2f607ff31 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 18:05:54 +0100 Subject: [PATCH 036/185] In progress multiple proof format --- client/finality-grandpa/src/finality_proof.rs | 2 +- .../state-machine/src/proving_backend.rs | 89 ++++++++++++++----- 2 files changed, 70 insertions(+), 21 deletions(-) diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index fb0f7fd4a9bab..4bea09033ac58 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -873,7 +873,7 @@ pub(crate) mod tests { 0, auth3, &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().iter_nodes().next().map(|x| x[0]) { + |hash, _header, proof: StorageProof| match proof.clone().iter_nodes_flatten().next().map(|x| x[0]) { Some(50) => Ok(auth5.clone()), Some(70) => Ok(auth7.clone()), _ => unreachable!("no other proofs should be checked: {}", hash), diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d49df322749db..1cc7fc213b788 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -34,7 +34,7 @@ use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -42,44 +42,84 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has pub(crate) proof_recorder: &'a mut Recorder, } +#[repr(u32)] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum StorageProofKind { + /// The proof can be build by multiple child trie only if + /// they are of the same kind, that way we can store all + /// encoded node in the same container. + Flatten, + /// Top trie proof only, in compact form. + TopTrieCompact, + /// Proofs split by child trie. + Full, + /// Compact form of proofs split by child trie. + FullCompact, +} + /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that /// does not already have access to the key-value pairs. /// -/// The proof consists of the set of serialized nodes in the storage trie accessed when looking up -/// the keys covered by the proof. Verifying the proof requires constructing the partial trie from -/// the serialized nodes and performing the key lookups. +/// For default trie, the proof component consists of the set of serialized nodes in the storage trie +/// accessed when looking up the keys covered by the proof. Verifying the proof requires constructing +/// the partial trie from the serialized nodes and performing the key lookups. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct StorageProof { - trie_nodes: Vec>, +pub enum StorageProof { + /// Single flattened proof component, all default child trie are flattened over a same + /// container, no child trie information is provided, this works only for proof accessing + /// the same kind of child trie. + Flatten(Vec>), + /// If proof only cover a single trie, we compact the proof by ommitting some content + /// that can be rebuild on construction. For patricia merkle trie it will be hashes that + /// are not necessary between node, with indexing of the missing hash based on orders + /// of nodes. + /// TODO replace u32 by codec compact!!! this is a versioning for the compaction type of child + /// proof. + TopTrieCompact(u32, Vec>), + /// Fully descriped proof, it includes the child trie individual descriptions. + Full(ChildrenMap>>), + /// Fully descriped proof, compact encoded. + FullCompact(ChildrenMap<(u32, Vec>)>), } impl StorageProof { - /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. - pub fn new(trie_nodes: Vec>) -> Self { - StorageProof { trie_nodes } - } - /// Returns a new empty proof. /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). - pub fn empty() -> Self { - StorageProof { - trie_nodes: Vec::new(), + pub fn empty(kind: StorageProofKind) -> Self { + match kind { + StorageProofKind::Flatten => StorageProof::Flatten(Vec::new()), + StorageProofKind::Full => StorageProof::Full(ChildrenMap::default()), + StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenMap::default()), } } /// Returns whether this is an empty proof. pub fn is_empty(&self) -> bool { - self.trie_nodes.is_empty() + match self { + StorageProof::Flatten(data) => data.is_empty(), + StorageProof::Full(data) => data.is_empty(), + StorageProof::FullCompact(data) => data.is_empty(), + } } /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed /// to be traversed in any particular order. - pub fn iter_nodes(self) -> StorageProofNodeIterator { + /// This iterator is only for `Flatten` proofs, other kind of proof will return an iterator with + /// no content. + pub fn iter_nodes_flatten(self) -> StorageProofNodeIterator { StorageProofNodeIterator::new(self) } + + /// This unpack `FullCompact` to `Compact` or do nothing. + pub fn unpack(self) -> Self { + } + + /// This flatten (does unpack full compact first). + pub fn flatten(self) -> Self { + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to @@ -90,8 +130,13 @@ pub struct StorageProofNodeIterator { impl StorageProofNodeIterator { fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { - inner: proof.trie_nodes.into_iter(), + match proof { + StorageProof::Flatten(data) => StorageProofNodeIterator { + inner: data.into_iter(), + }, + _ => StorageProofNodeIterator { + inner: Vec::new().into_iter(), + }, } } } @@ -107,9 +152,12 @@ impl Iterator for StorageProofNodeIterator { /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. +/// Merge to `Flatten` if any item is flatten (we cannot unflatten), if not `Flatten` we output +/// non compact form. pub fn merge_storage_proofs(proofs: I) -> StorageProof where I: IntoIterator { + let mut StorageProof = let trie_nodes = proofs.into_iter() .flat_map(|proof| proof.iter_nodes()) .collect::>() @@ -194,20 +242,21 @@ pub type ProofRecorder = Arc::Out, Option, H: 'a + Hasher> ( - TrieBackend, H>, + TrieBackend, H>, StorageProofKind, ); /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { backend: &'a S, proof_recorder: ProofRecorder, + flatten: bool, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> where H::Out: Codec { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { + pub fn new(backend: &'a TrieBackend, kind: StorageProofKind) -> Self { let proof_recorder = Default::default(); Self::new_with_recorder(backend, proof_recorder) } From 728aedfaa0424f3461a5eb35dc5cd49ef6e22425 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 18:44:52 +0100 Subject: [PATCH 037/185] Remove deprecated check for genesis and fix some test storage builds. --- client/db/src/lib.rs | 6 ------ client/network/src/protocol/legacy_proto/tests.rs | 2 +- client/rpc/src/state/tests.rs | 4 +--- frame/contracts/src/tests.rs | 2 -- primitives/state-machine/src/in_memory_backend.rs | 10 +++++++--- primitives/state-machine/src/trie_backend.rs | 4 +++- primitives/state-machine/src/trie_backend_essence.rs | 5 ++++- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 4 +++- 9 files changed, 20 insertions(+), 19 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index be569194972cc..407dcd4581434 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -582,12 +582,6 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - for child_key in storage.children.keys() { - if !well_known_keys::is_child_storage_key(&child_key) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - } - let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), diff --git a/client/network/src/protocol/legacy_proto/tests.rs b/client/network/src/protocol/legacy_proto/tests.rs index 18e32f1d0189f..ca35bbc6dfede 100644 --- a/client/network/src/protocol/legacy_proto/tests.rs +++ b/client/network/src/protocol/legacy_proto/tests.rs @@ -321,7 +321,7 @@ fn basic_two_nodes_requests_in_parallel() { }); } -#[test] +//#[test] fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 40597f8fa6d89..fe4ad6df16343 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -85,9 +85,7 @@ fn should_return_child_storage() { .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey( - well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect() - ); + let child_key = StorageKey(b"test".to_vec()); let key = StorageKey(b"key".to_vec()); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 5eb7bce48ab3f..4bf468deceb36 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -205,8 +205,6 @@ impl TrieIdGenerator for DummyTrieIdGenerator { // TODO: see https://github.com/paritytech/substrate/issues/2325 let mut res = vec![]; - res.extend_from_slice(well_known_keys::CHILD_STORAGE_KEY_PREFIX); - res.extend_from_slice(b"default:"); res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); res diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index b0314e321c554..02fd61de9c603 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -336,11 +336,13 @@ impl Backend for InMemory where H::Out: Codec { let mut new_child_roots = Vec::new(); let mut root_map = None; for (child_info, map) in &self.inner { - if let Some((storage_key, _child_info)) = child_info.as_ref() { + if let Some((storage_key, child_info)) = child_info.as_ref() { + let mut prefix_storage_key = storage_key.to_vec(); + child_info.as_ref().do_prefix_key(&mut prefix_storage_key, None); // no need to use child_info at this point because we use a MemoryDB for // proof (with PrefixedMemoryDB it would be needed). let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((storage_key.clone(), ch.as_ref().into())); + new_child_roots.push((prefix_storage_key, ch.as_ref().into())); } else { root_map = Some(map); } @@ -378,6 +380,8 @@ mod tests { let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), Some(b"3".to_vec())); - assert!(trie_backend.storage(b"1").unwrap().is_some()); + let mut prefixed_storage_key = b"1".to_vec(); + child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); + assert!(trie_backend.storage(prefixed_storage_key.as_slice()).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index febb6e31f1fe4..0df13a8fff137 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -270,7 +270,9 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(CHILD_KEY_1, &sub_root[..]).expect("insert failed"); + let mut prefixed_storage_key = CHILD_KEY_1.to_vec(); + CHILD_INFO_1.do_prefix_key(&mut prefixed_storage_key, None); + trie.insert(prefixed_storage_key.as_slice(), &sub_root[..]).expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 278ad705c3253..f515e30c9528f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -472,7 +472,10 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); + let mut prefixed_storage_key = b"MyChild".to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + trie.insert(prefixed_storage_key.as_slice(), root_1.as_ref()) + .expect("insert failed"); }; let essence_1 = TrieBackendEssence::new(mdb, root_1); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e95c5ad162760..1204e809bce1b 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -190,7 +190,7 @@ impl TestClientBuilder::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - (sk.clone(), state_root.encode()) + let mut prefixed_storage_key = sk.clone(); + child_content.child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); + (prefixed_storage_key, state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() From 864e9ca17ddc3bd062b5f633d76e6e8849683991 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 19:55:03 +0100 Subject: [PATCH 038/185] Fix contract to stop using the child storage prefix. --- .../network/src/protocol/legacy_proto/tests.rs | 2 +- client/network/src/protocol/light_dispatch.rs | 2 +- client/rpc/src/state/tests.rs | 2 +- frame/contracts/src/lib.rs | 17 ++++------------- frame/contracts/src/tests.rs | 1 - test-utils/client/src/lib.rs | 2 +- 6 files changed, 8 insertions(+), 18 deletions(-) diff --git a/client/network/src/protocol/legacy_proto/tests.rs b/client/network/src/protocol/legacy_proto/tests.rs index ca35bbc6dfede..18e32f1d0189f 100644 --- a/client/network/src/protocol/legacy_proto/tests.rs +++ b/client/network/src/protocol/legacy_proto/tests.rs @@ -321,7 +321,7 @@ fn basic_two_nodes_requests_in_parallel() { }); } -//#[test] +#[test] fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index bfa8daa181ca1..83e5589827f05 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -1040,7 +1040,7 @@ pub mod tests { light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), + storage_key: b"sub".to_vec(), child_info: child_info.to_vec(), child_type, keys: vec![b":key".to_vec()], diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index fe4ad6df16343..39964f38f6f49 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -21,7 +21,7 @@ use self::error::Error; use std::sync::Arc; use assert_matches::assert_matches; use futures01::stream::Stream; -use sp_core::{storage::{well_known_keys, ChildInfo}, ChangesTrieConfiguration}; +use sp_core::{storage::ChildInfo, ChangesTrieConfiguration}; use sp_core::hash::H256; use sp_io::hashing::blake2_256; use substrate_test_runtime_client::{ diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index bd1e91f1a9d66..e67b9ecc92ee3 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -127,7 +127,6 @@ use frame_support::{ }; use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; -use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; pub type CodeHash = ::Hash; @@ -233,8 +232,9 @@ impl RawAliveContractInfo child::ChildInfo { - let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::ChildInfo::new_default(&trie_id[start ..]) + // Every new contract uses a new trie id and trie id results from + // hashing, so we can use child storage key (trie id) for child info. + child::ChildInfo::new_uid_parent_key(trie_id) } pub type TombstoneContractInfo = @@ -267,10 +267,6 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. - /// - /// Also, the implementation is responsible for ensuring that `TrieId` starts with - /// `:child_storage:`. - /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -295,12 +291,7 @@ where buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - // TODO: see https://github.com/paritytech/substrate/issues/2325 - CHILD_STORAGE_KEY_PREFIX.iter() - .chain(b"default:") - .chain(T::Hashing::hash(&buf[..]).as_ref().iter()) - .cloned() - .collect() + T::Hashing::hash(&buf[..]).as_ref().to_vec() } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 4bf468deceb36..7f2eff5d6942c 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -203,7 +203,6 @@ impl TrieIdGenerator for DummyTrieIdGenerator { *v }); - // TODO: see https://github.com/paritytech/substrate/issues/2325 let mut res = vec![]; res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 1204e809bce1b..9267989a40c53 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -40,7 +40,7 @@ pub use self::client_ext::{ClientExt, ClientBlockImportExt}; use std::sync::Arc; use std::collections::HashMap; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::ChildInfo; use sp_runtime::traits::Block as BlockT; use sc_client::LocalCallExecutor; From c87d19b20438b4ccce92dc43ffc78ec7bcd2e413 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Feb 2020 10:31:26 +0100 Subject: [PATCH 039/185] Revert changes to state-db and plug keyspace above it for default child tries. --- Cargo.lock | 1 - client/db/src/lib.rs | 99 +++++---- client/state-db/src/lib.rs | 94 ++------ client/state-db/src/noncanonical.rs | 324 ++++++++++------------------ client/state-db/src/pruning.rs | 164 +++----------- client/state-db/src/test.rs | 43 ++-- primitives/storage/Cargo.toml | 3 +- primitives/storage/src/lib.rs | 6 +- 8 files changed, 246 insertions(+), 488 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c97058876e2c6..d13c45afa22f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7207,7 +7207,6 @@ name = "sp-storage" version = "2.0.0" dependencies = [ "impl-serde 0.2.3", - "parity-scale-codec", "serde", "sp-debug-derive", "sp-std", diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 71e2408891ed4..0a4b40a990ba3 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -53,10 +53,10 @@ use sp_blockchain::{ use codec::{Decode, Encode}; use hash_db::Prefix; use kvdb::{KeyValueDB, DBTransaction}; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; +use sp_trie::{MemoryDB, PrefixedMemoryDB}; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap}; +use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap, ChildType}; use sp_runtime::{ generic::BlockId, Justification, Storage, BuildStorage, @@ -667,12 +667,14 @@ struct StorageDb { impl sp_state_machine::Storage> for StorageDb { fn get( &self, - trie: &ChildInfo, + child_info: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { - let key = prefixed_key::>(key, prefix); - self.state_db.get(trie, &key, self) + // Default child trie (those with strong unique id) are put + // directly into the same address space at state_db level. + let key = keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); + self.state_db.get(&key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -681,13 +683,12 @@ impl sc_state_db::NodeDb for StorageDb { type Error = io::Error; type Key = [u8]; - fn get(&self, child_info: &ChildInfo, key: &[u8]) -> Result>, Self::Error> { - if child_info.is_top_trie() { - self.db.get(columns::STATE, key) - } else { - let mut keyspace = Keyspaced::new(child_info.keyspace()); - self.db.get(columns::STATE, keyspace.prefix_key(key)) - }.map(|r| r.map(|v| v.to_vec())) + fn get(&self, key: &[u8]) -> Result>, Self::Error> { + // note this implementation should ONLY be call from state_db, + // as it rely on the fact that we address a key that is already + // prefixed with keyspace + self.db.get(columns::STATE, key) + .map(|r| r.map(|v| v.to_vec())) } } @@ -1121,30 +1122,47 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = ChildrenMap::>>::default(); + let mut state_db_changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; let mut bytes = 0; + let mut keyspace = Keyspaced::new(&[]); for (info, mut updates) in operation.db_updates.into_iter() { - let changeset = changesets.entry(info).or_default(); + // child info with strong unique id are using the same state-db with prefixed key + if info.child_type() != ChildType::CryptoUniqueId { + // Unhandled child kind + return Err(ClientError::Backend(format!( + "Data for {:?} without a backend implementation", + info.child_type(), + ))); + } + keyspace.change_keyspace(info.keyspace()); for (key, (val, rc)) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; if rc > 0 { ops += 1; - bytes += key.len() + val.len(); + bytes += key.len() as u64 + val.len() as u64; - changeset.inserted.push((key, val.to_vec())); + state_db_changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { ops += 1; - bytes += key.len(); - - changeset.deleted.push(key); + bytes += key.len() as u64; + state_db_changeset.deleted.push(key); } } } self.state_usage.tally_writes(ops, bytes as u64); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changesets) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + let commit = self.storage.state_db.insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + state_db_changeset, + ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; apply_state_commit(&mut transaction, commit); // Check if need to finalize. Genesis is always finalized instantly. @@ -1329,25 +1347,12 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut keyspace = Keyspaced::new(&[]); - for child_data in commit.data.into_iter() { - if child_data.0.is_top_trie() { - // empty prefix - for (key, val) in child_data.1.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in child_data.1.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); - } - } else { - keyspace.change_keyspace(child_data.0.keyspace()); - for (key, val) in child_data.1.inserted.into_iter() { - transaction.put(columns::STATE, keyspace.prefix_key(key.as_slice()), &val); - } - for key in child_data.1.deleted.into_iter() { - transaction.delete(columns::STATE, keyspace.prefix_key(key.as_slice())); - } - } + // state_db commit set is only for column STATE + for (key, val) in commit.data.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in commit.data.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); } for (key, val) in commit.meta.inserted.into_iter() { transaction.put(columns::STATE_META, &key[..], &val); @@ -1699,6 +1704,20 @@ impl Keyspaced { } } +// Prefix key and add keyspace with a single vec alloc +// Warning if memory_db `sp_trie::prefixed_key` implementation change, this function +// will need change too. +fn keyspace_and_prefixed_key(key: &[u8], keyspace: &[u8], prefix: Prefix) -> Vec { + let mut prefixed_key = Vec::with_capacity(key.len() + keyspace.len() + prefix.0.len() + 1); + prefixed_key.extend_from_slice(keyspace); + prefixed_key.extend_from_slice(prefix.0); + if let Some(last) = prefix.1 { + prefixed_key.push(last); + } + prefixed_key.extend_from_slice(key); + prefixed_key +} + #[cfg(test)] pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 77373ce47649b..f2722ae308068 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,7 +40,6 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::{ChildInfo, ChildrenMap}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -68,7 +67,7 @@ pub trait NodeDb { type Error: fmt::Debug; /// Get state trie node. - fn get(&self, child_info: &ChildInfo, key: &Self::Key) -> Result, Self::Error>; + fn get(&self, key: &Self::Key) -> Result, Self::Error>; } /// Error type. @@ -114,44 +113,23 @@ impl fmt::Debug for Error { /// A set of state node changes. #[derive(Default, Debug, Clone)] -pub struct ChangeSet { +pub struct ChangeSet { /// Inserted nodes. pub inserted: Vec<(H, DBValue)>, /// Deleted nodes. pub deleted: Vec, } -impl ChangeSet { - fn merge(&mut self, other: ChangeSet) { - self.inserted.extend(other.inserted.into_iter()); - self.deleted.extend(other.deleted.into_iter()); - } -} - -/// Change sets of all child trie (top is key None). -pub type ChildTrieChangeSets = ChildrenMap>; /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] -pub struct CommitSet { +pub struct CommitSet { /// State node changes. - pub data: ChildTrieChangeSets, + pub data: ChangeSet, /// Metadata changes. pub meta: ChangeSet>, } -impl CommitSet { - /// Number of inserted key value elements in the set. - pub fn inserted_len(&self) -> usize { - self.data.iter().map(|set| set.1.inserted.len()).sum() - } - - /// Number of deleted key value elements in the set. - pub fn deleted_len(&self) -> usize { - self.data.iter().map(|set| set.1.deleted.len()).sum() - } -} - /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone, Eq, PartialEq)] pub struct Constraints { @@ -256,13 +234,7 @@ impl StateDbSync { } } - pub fn insert_block( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - mut changesets: ChildTrieChangeSets, - ) -> Result, Error> { + pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. @@ -271,17 +243,15 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { - for changeset in changesets.iter_mut() { - changeset.1.deleted.clear(); - } + changeset.deleted.clear(); // write changes immediately Ok(CommitSet { - data: changesets, + data: changeset, meta: meta, }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - let commit = self.non_canonical.insert(hash, number, parent_hash, changesets); + let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); commit.map(|mut c| { c.meta.inserted.extend(meta.inserted); c @@ -298,9 +268,7 @@ impl StateDbSync { match self.non_canonical.canonicalize(&hash, &mut commit) { Ok(()) => { if self.mode == PruningMode::ArchiveCanonical { - for commit in commit.data.iter_mut() { - commit.1.deleted.clear(); - } + commit.data.deleted.clear(); } } Err(e) => return Err(e), @@ -400,18 +368,13 @@ impl StateDbSync { } } - pub fn get( - &self, - child_info: &ChildInfo, - key: &Key, - db: &D, - ) -> Result, Error> + pub fn get(&self, key: &Key, db: &D) -> Result, Error> where Key: AsRef { - if let Some(value) = self.non_canonical.get(child_info, key) { + if let Some(value) = self.non_canonical.get(key) { return Ok(Some(value)); } - db.get(child_info, key.as_ref()).map_err(|e| Error::Db(e)) + db.get(key.as_ref()).map_err(|e| Error::Db(e)) } pub fn apply_pending(&mut self) { @@ -451,14 +414,8 @@ impl StateDb { } /// Add a new non-canonical block. - pub fn insert_block( - &self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changesets: ChildTrieChangeSets, - ) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changesets) + pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + self.db.write().insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. @@ -477,15 +434,10 @@ impl StateDb { } /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get( - &self, - child_info: &ChildInfo, - key: &Key, - db: &D, - ) -> Result, Error> + pub fn get(&self, key: &Key, db: &D) -> Result, Error> where Key: AsRef { - self.db.read().get(child_info, key, db) + self.db.read().get(key, db) } /// Revert all non-canonical blocks with the best block number. @@ -521,7 +473,7 @@ mod tests { use std::io; use sp_core::H256; use crate::{StateDb, PruningMode, Constraints}; - use crate::test::{make_db, make_childchangeset, TestDb}; + use crate::test::{make_db, make_changeset, TestDb}; fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); @@ -533,7 +485,7 @@ mod tests { &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), - make_childchangeset(&[1], &[91]), + make_changeset(&[1], &[91]), ) .unwrap(), ); @@ -543,7 +495,7 @@ mod tests { &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), - make_childchangeset(&[21], &[921, 1]), + make_changeset(&[21], &[921, 1]), ) .unwrap(), ); @@ -553,7 +505,7 @@ mod tests { &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), - make_childchangeset(&[22], &[922]), + make_changeset(&[22], &[922]), ) .unwrap(), ); @@ -563,7 +515,7 @@ mod tests { &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), - make_childchangeset(&[3], &[93]), + make_changeset(&[3], &[93]), ) .unwrap(), ); @@ -576,7 +528,7 @@ mod tests { &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), - make_childchangeset(&[4], &[94]), + make_changeset(&[4], &[94]), ) .unwrap(), ); @@ -647,7 +599,7 @@ mod tests { &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), - make_childchangeset(&[], &[]), + make_changeset(&[], &[]), ) .unwrap(), ); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 4f06d9dd52180..373c1aa0da076 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,19 +22,13 @@ use std::fmt; use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; +use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenVec}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; -type Keys = ChildrenVec>; -type KeyVals = ChildrenVec>; -type ChildKeyVals = ChildrenMap>; - /// See module documentation. pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, @@ -42,86 +36,52 @@ pub struct NonCanonicalOverlay { parents: HashMap, pending_canonicalizations: Vec, pending_insertions: Vec, - values: ChildKeyVals, //ref counted + values: HashMap, //ref counted //would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, - pinned_insertions: HashMap>, + pinned_insertions: HashMap>, } #[derive(Encode, Decode)] -struct JournalRecordCompat { +struct JournalRecord { hash: BlockHash, parent_hash: BlockHash, inserted: Vec<(Key, DBValue)>, deleted: Vec, } -#[derive(Encode, Decode)] -struct JournalRecordV1 { - hash: BlockHash, - parent_hash: BlockHash, - inserted: KeyVals, - deleted: Keys, -} - -impl From> for JournalRecordV1 { - // Note that this compatibility only works as long as the backend - // child storage format is the same in both case. - fn from(old: JournalRecordCompat) -> Self { - JournalRecordV1 { - hash: old.hash, - parent_hash: old.parent_hash, - inserted: vec![(ChildInfo::top_trie(), old.inserted)], - deleted: vec![(ChildInfo::top_trie(), old.deleted)], - } - } -} - -fn to_old_journal_key(block: u64, index: u64) -> Vec { +fn to_journal_key(block: u64, index: u64) -> Vec { to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } -fn to_journal_key_v1(block: u64, index: u64) -> Vec { - to_meta_key(NON_CANONICAL_JOURNAL_V1, &(block, index)) -} - #[cfg_attr(test, derive(PartialEq, Debug))] struct BlockOverlay { hash: BlockHash, journal_key: Vec, - inserted: Keys, - deleted: Keys, + inserted: Vec, + deleted: Vec, } -fn insert_values( - values: &mut ChildKeyVals, - inserted: KeyVals, -) { - for (child_info, inserted) in inserted { - let values = values.entry(child_info).or_default(); - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; - } +fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { + for (k, v) in inserted { + debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); + let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); + *counter += 1; } } -fn discard_values(values: &mut ChildKeyVals, inserted: Keys) { - for (child_info, inserted) in inserted { - let values = values.entry(child_info).or_default(); - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove_entry(); - } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); +fn discard_values(values: &mut HashMap, inserted: Vec) { + for k in inserted { + match values.entry(k) { + Entry::Occupied(mut e) => { + let (ref mut counter, _) = e.get_mut(); + *counter -= 1; + if *counter == 0 { + e.remove_entry(); } + }, + Entry::Vacant(_) => { + debug_assert!(false, "Trying to discard missing value"); } } } @@ -129,11 +89,11 @@ fn discard_values(values: &mut ChildKeyVals, inserted: Keys fn discard_descendants( levels: &mut VecDeque>>, - mut values: &mut ChildKeyVals, + mut values: &mut HashMap, index: usize, parents: &mut HashMap, pinned: &HashMap, - pinned_insertions: &mut HashMap>, + pinned_insertions: &mut HashMap>, hash: &BlockHash, ) { let mut discarded = Vec::new(); @@ -172,7 +132,7 @@ impl NonCanonicalOverlay { }; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); - let mut values = ChildrenMap::default(); + let mut values = HashMap::new(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); @@ -182,35 +142,26 @@ impl NonCanonicalOverlay { let mut index: u64 = 0; let mut level = Vec::new(); loop { - let journal_key = to_journal_key_v1(block, index); - let record: JournalRecordV1 = match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => Decode::decode(&mut record.as_slice())?, - None => { - let journal_key = to_old_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecordCompat = Decode::decode(&mut record.as_slice())?; - record.into() - }, - None => break, - } + let journal_key = to_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; }, - }; - let inserted = record.inserted.iter().map(|(child_info, rec)| - (child_info.clone(), rec.iter().map(|(k, _)| k.clone()).collect()) - ).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; + None => break, + } } if level.is_empty() { break; @@ -233,13 +184,7 @@ impl NonCanonicalOverlay { } /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changeset: ChildTrieChangeSets, - ) -> Result, Error> { + pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { @@ -274,39 +219,22 @@ impl NonCanonicalOverlay { }; let index = level.len() as u64; - let journal_key = to_journal_key_v1(number, index); - - let mut inserted = Vec::with_capacity(changeset.len()); - let mut inserted_block = Vec::with_capacity(changeset.len()); - let mut deleted = Vec::with_capacity(changeset.len()); - for changeset in changeset.into_iter() { - inserted_block.push(( - changeset.0.clone(), - changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), - )); - inserted.push(( - changeset.0.clone(), - changeset.1.inserted, - )); - deleted.push(( - changeset.0, - changeset.1.deleted, - )); - } + let journal_key = to_journal_key(number, index); + let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { hash: hash.clone(), journal_key: journal_key.clone(), - inserted: inserted_block, - deleted: deleted.clone(), + inserted: inserted, + deleted: changeset.deleted.clone(), }; level.push(overlay); self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecordV1 { + let journal_record = JournalRecord { hash: hash.clone(), parent_hash: parent_hash.clone(), - inserted, - deleted, + inserted: changeset.inserted, + deleted: changeset.deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); @@ -389,29 +317,9 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - commit.data.extend_with(overlay.inserted.iter() - .map(|(ct, keys)| ( - ct.clone(), - ChangeSet { - inserted: keys.iter().map(|k| ( - k.clone(), - self.values - .get(ct) - .expect("For each key in overlays there's a value in values") - .get(k) - .expect("For each key in overlays there's a value in values").1.clone(), - )).collect(), - deleted: Vec::new(), - }, - )), ChangeSet::merge); - commit.data.extend_with(overlay.deleted.iter().cloned() - .map(|(ct, keys)| ( - ct, - ChangeSet { - inserted: Vec::new(), - deleted: keys, - }, - )), ChangeSet::merge); + commit.data.inserted.extend(overlay.inserted.iter() + .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); + commit.data.deleted.extend(overlay.deleted.clone()); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -460,11 +368,9 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, child_info: &ChildInfo, key: &Key) -> Option { - if let Some(values) = self.values.get(child_info) { - if let Some((_, value)) = values.get(&key) { - return Some(value.clone()); - } + pub fn get(&self, key: &Key) -> Option { + if let Some((_, value)) = self.values.get(&key) { + return Some(value.clone()); } None } @@ -565,14 +471,12 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; - use sp_core::storage::ChildInfo; - use super::{NonCanonicalOverlay, to_journal_key_v1}; - use crate::CommitSet; - use crate::test::{make_db, make_childchangeset}; + use super::{NonCanonicalOverlay, to_journal_key}; + use crate::{ChangeSet, CommitSet}; + use crate::test::{make_db, make_changeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&ChildInfo::top_trie(), &H256::from_low_u64_be(key)) - == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] @@ -600,8 +504,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), Default::default()).unwrap(); - overlay.insert::(&h2, 1, &h1, Default::default()).unwrap(); + overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); } #[test] @@ -611,8 +515,8 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); - overlay.insert::(&h2, 3, &h1, Default::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); } #[test] @@ -622,8 +526,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); } #[test] @@ -633,7 +537,7 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); let mut commit = CommitSet::default(); overlay.canonicalize::(&h2, &mut commit).unwrap(); } @@ -643,19 +547,17 @@ mod tests { let h1 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_childchangeset(&[3, 4], &[2]); + let changeset = make_changeset(&[3, 4], &[2]); let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.inserted_len(), 0); - assert_eq!(insertion.deleted_len(), 0); + assert_eq!(insertion.data.inserted.len(), 0); + assert_eq!(insertion.data.deleted.len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); assert_eq!(insertion.meta.deleted.len(), 0); db.commit(&insertion); let mut finalization = CommitSet::default(); overlay.canonicalize::(&h1, &mut finalization).unwrap(); - let inserted_len = changeset.iter().map(|set| set.1.inserted.len()).sum(); - let deleted_len = changeset.iter().map(|set| set.1.deleted.len()).sum(); - assert_eq!(finalization.inserted_len(), inserted_len); - assert_eq!(finalization.deleted_len(), deleted_len); + assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); + assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); assert_eq!(finalization.meta.inserted.len(), 1); assert_eq!(finalization.meta.deleted.len(), 1); db.commit(&finalization); @@ -668,8 +570,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -684,8 +586,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&h1, &mut commit).unwrap(); db.commit(&commit); @@ -704,8 +606,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_childchangeset(&[5, 6], &[2]); - let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); assert!(contains(&overlay, 5)); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); @@ -736,8 +638,8 @@ mod tests { #[test] fn insert_same_key() { let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -758,7 +660,7 @@ mod tests { let h3 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_childchangeset(&[], &[]); + let changeset = make_changeset(&[], &[]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); @@ -786,19 +688,19 @@ mod tests { // // 1_2_2 is the winner - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - let (h_1_1, c_1_1) = (H256::random(), make_childchangeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_childchangeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_childchangeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_childchangeset(&[22], &[])); + let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); + let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); + let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); + let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); - let (h_1_1_1, c_1_1_1) = (H256::random(), make_childchangeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_childchangeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_childchangeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_childchangeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_childchangeset(&[211], &[])); + let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); + let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); + let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); + let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); + let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -848,11 +750,11 @@ mod tests { assert!(contains(&overlay, 111)); assert!(!contains(&overlay, 211)); // check that journals are deleted - assert!(db.get_meta(&to_journal_key_v1(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key_v1(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key_v1(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key_v1(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key_v1(2, 3)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); @@ -889,8 +791,8 @@ mod tests { let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); assert!(overlay.revert_one().is_none()); - let changeset1 = make_childchangeset(&[5, 6], &[2]); - let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); assert!(contains(&overlay, 7)); @@ -911,9 +813,9 @@ mod tests { let h2_2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_childchangeset(&[5, 6], &[2]); - let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); - let changeset3 = make_childchangeset(&[9], &[]); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset3 = make_changeset(&[9], &[]); overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); assert!(contains(&overlay, 5)); overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); @@ -936,8 +838,8 @@ mod tests { // - 0 - 1_1 // \ 1_2 - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -964,9 +866,9 @@ mod tests { // \ 1_3 // 1_1 and 1_2 both make the same change - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_3, c_3) = (H256::random(), make_childchangeset(&[], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -993,9 +895,9 @@ mod tests { // - 0 - 1_1 - 2_1 // \ 1_2 - let (h_11, c_11) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_12, c_12) = (H256::random(), make_childchangeset(&[], &[])); - let (h_21, c_21) = (H256::random(), make_childchangeset(&[], &[])); + let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); + let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); + let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_11, 1, &H256::default(), c_11).unwrap()); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index a4e6fe1473fa1..a993df4f111ac 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,21 +26,16 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::{ChildInfo, ChildrenVec, ChildrenMap}; -use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; -const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; -const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; - -type Keys = ChildrenVec>; +const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: ChildrenMap>, + death_index: HashMap, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -55,49 +50,18 @@ pub struct RefWindow { struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: ChildrenMap>, -} - -impl DeathRow { - fn remove_deleted(&mut self, child_info: &ChildInfo, key: &Key) -> bool { - if let Some(child_index) = self.deleted.get_mut(child_info) { - child_index.remove(key) - } else { - false - } - } + deleted: HashSet, } #[derive(Encode, Decode)] -struct JournalRecordCompat { +struct JournalRecord { hash: BlockHash, inserted: Vec, deleted: Vec, } -#[derive(Encode, Decode)] -struct JournalRecordV1 { - hash: BlockHash, - inserted: Keys, - deleted: Keys, -} - -fn to_old_journal_key(block: u64) -> Vec { - to_meta_key(OLD_PRUNING_JOURNAL, &block) -} - -fn to_journal_key_v1(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL_V1, &block) -} - -impl From> for JournalRecordV1 { - fn from(old: JournalRecordCompat) -> Self { - JournalRecordV1 { - hash: old.hash, - inserted: vec![(ChildInfo::top_trie(), old.inserted)], - deleted: vec![(ChildInfo::top_trie(), old.deleted)], - } - } +fn to_journal_key(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL, &block) } impl RefWindow { @@ -119,67 +83,37 @@ impl RefWindow { // read the journal trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); loop { - let journal_key = to_journal_key_v1(block); - let record: JournalRecordV1 = match db.get_meta(&journal_key) - .map_err(|e| Error::Db(e))? { - Some(record) => Decode::decode(&mut record.as_slice())?, - None => { - let journal_key = to_old_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => JournalRecordCompat::decode(&mut record.as_slice())?.into(), - None => break, - } + let journal_key = to_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); + pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); }, - }; - trace!( - target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", - block, - record.inserted.len(), - record.deleted.len(), - ); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + None => break, + } block += 1; } Ok(pruning) } - fn import)>>( - &mut self, - hash: &BlockHash, - journal_key: Vec, - inserted: I, - deleted: Keys, - ) { + fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { // remove all re-inserted keys from death rows - for (child_info, inserted) in inserted { - if let Some(child_index) = self.death_index.get_mut(&child_info) { - for k in inserted { - if let Some(block) = child_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize] - .remove_deleted(&child_info, &k); - } - } + for k in inserted { + if let Some(block) = self.death_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); } } // add new keys let imported_block = self.pending_number + self.death_rows.len() as u64; - for (child_info, deleted) in deleted.iter() { - let entry = self.death_index.entry(child_info.clone()).or_default(); - for k in deleted.iter() { - entry.insert(k.clone(), imported_block); - } - } - let mut deleted_death_row = ChildrenMap::>::default(); - for (child_info, deleted) in deleted.into_iter() { - let entry = deleted_death_row.entry(child_info).or_default(); - entry.extend(deleted); + for k in deleted.iter() { + self.death_index.insert(k.clone(), imported_block); } - self.death_rows.push_back( DeathRow { hash: hash.clone(), - deleted: deleted_death_row, + deleted: deleted.into_iter().collect(), journal_key: journal_key, } ); @@ -210,16 +144,7 @@ impl RefWindow { if let Some(pruned) = self.death_rows.get(self.pending_prunings) { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - - commit.data.extend_with(pruned.deleted.iter() - .map(|(child_info, keys)| ( - child_info.clone(), - ChangeSet { - inserted: Vec::new(), - deleted: keys.iter().cloned().collect(), - }, - )), ChangeSet::merge); - + commit.data.deleted.extend(pruned.deleted.iter().cloned()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); self.pending_prunings += 1; @@ -230,29 +155,16 @@ impl RefWindow { /// Add a change set to the window. Creates a journal record and pushes it to `commit` pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { - trace!( - target: "state-db", - "Adding to pruning window: {:?} ({} inserted, {} deleted)", - hash, - commit.inserted_len(), - commit.deleted_len(), - ); - let inserted = commit.data.iter().map(|changeset| ( - changeset.0.clone(), - changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), - )).collect(); - let deleted = commit.data.iter_mut().map(|changeset| ( - changeset.0.clone(), - ::std::mem::replace(&mut changeset.1.deleted, Vec::new()), - )).collect(); - - let journal_record = JournalRecordV1 { + trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); + let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); + let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); + let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key_v1(block); + let journal_key = to_journal_key(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); self.pending_canonicalizations += 1; @@ -264,12 +176,8 @@ impl RefWindow { for _ in 0 .. self.pending_prunings { let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for (child_info, deleted) in pruned.deleted.iter() { - if let Some(child_index) = self.death_index.get_mut(child_info) { - for key in deleted.iter() { - child_index.remove(key); - } - } + for k in pruned.deleted.iter() { + self.death_index.remove(&k); } self.pending_number += 1; } @@ -284,11 +192,7 @@ impl RefWindow { // deleted in case transaction fails and `revert_pending` is called. self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); let new_max_block = self.death_rows.len() as u64 + self.pending_number; - - self.death_index.retain(|_ct, child_index| { - child_index.retain(|_, block| *block < new_max_block); - !child_index.is_empty() - }); + self.death_index.retain(|_, block| *block < new_max_block); self.pending_canonicalizations = 0; self.pending_prunings = 0; } @@ -341,10 +245,9 @@ mod tests { assert!(pruning.have_block(&h)); pruning.apply_pending(); assert!(pruning.have_block(&h)); - assert_eq!(commit.deleted_len(), 0); + assert!(commit.data.deleted.is_empty()); assert_eq!(pruning.death_rows.len(), 1); - let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); - assert_eq!(death_index_len, 2); + assert_eq!(pruning.death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); @@ -356,8 +259,7 @@ mod tests { assert!(!pruning.have_block(&h)); assert!(db.data_eq(&make_db(&[2, 4, 5]))); assert!(pruning.death_rows.is_empty()); - let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); - assert!(death_index_len == 0); + assert!(pruning.death_index.is_empty()); assert_eq!(pruning.pending_number, 1); } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index b9f2941bcc5e0..accafa9bf831f 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,12 +18,11 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; -use sp_core::storage::{ChildInfo, ChildrenMap}; +use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: ChildrenMap>, + pub data: HashMap, pub meta: HashMap, DBValue>, } @@ -39,24 +38,17 @@ impl NodeDb for TestDb { type Error = (); type Key = H256; - fn get(&self, child_info: &ChildInfo, key: &H256) -> Result, ()> { - Ok(self.data.get(child_info).and_then(|data| data.get(key).cloned())) + fn get(&self, key: &H256) -> Result, ()> { + Ok(self.data.get(key).cloned()) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - for ct in commit.data.iter() { - self.data.entry(ct.0.clone()).or_default() - .extend(ct.1.inserted.iter().cloned()) - } + self.data.extend(commit.data.inserted.iter().cloned()); self.meta.extend(commit.meta.inserted.iter().cloned()); - for ct in commit.data.iter() { - if let Some(self_data) = self.data.get_mut(&ct.0) { - for k in ct.1.deleted.iter() { - self_data.remove(k); - } - } + for k in commit.data.deleted.iter() { + self.data.remove(k); } self.meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { @@ -81,28 +73,21 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } } -pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { - let mut result = ChildTrieChangeSets::default(); - result.insert(ChildInfo::top_trie(), make_changeset(inserted, deleted)); - result -} - pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { CommitSet { - data: make_childchangeset(inserted, deleted), + data: make_changeset(inserted, deleted), meta: ChangeSet::default(), } } pub fn make_db(inserted: &[u64]) -> TestDb { - let mut data = ChildrenMap::default(); - data.insert(ChildInfo::top_trie(), inserted.iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect()); TestDb { - data, + data: inserted + .iter() + .map(|v| { + (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) + }) + .collect(), meta: Default::default(), } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c0af25fc9ba9c..c9fda1816b55e 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -11,8 +11,7 @@ sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] +std = [ "sp-std/std", "serde", "impl-serde" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 69e746f725267..2e6df51dfb3e6 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,7 +18,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; #[cfg(feature = "std")] @@ -191,7 +190,7 @@ impl<'a> ChildStorageKey<'a> { /// Information related to a child state. -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum ChildInfo { Default(ChildTrie), } @@ -269,6 +268,7 @@ impl ChildInfo { /// be related to technical consideration or api variant. #[repr(u32)] #[derive(Clone, Copy, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] pub enum ChildType { /// Default, it uses a cryptographic strong unique id as input. CryptoUniqueId = 1, @@ -287,7 +287,7 @@ impl ChildType { /// It share its trie node storage with any kind of key, /// and its unique id needs to be collision free (eg strong /// crypto hash). -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct ChildTrie { /// Data containing unique id. /// Unique id must but unique and free of any possible key collision From 8b901a298530480874c924e131aa0d5cd5bd32c2 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Feb 2020 11:13:36 +0100 Subject: [PATCH 040/185] Update client-db benches. --- client/db/src/bench.rs | 61 +++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 9858a5c148bfa..6d7e244f510f8 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -22,8 +22,8 @@ use std::cell::{Cell, RefCell}; use rand::Rng; use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; -use sp_core::storage::ChildInfo; +use sp_trie::MemoryDB; +use sp_core::storage::{ChildInfo, ChildType}; use sp_runtime::traits::{Block as BlockT, HasherFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend}; @@ -40,8 +40,13 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); + fn get( + &self, + child_info: &ChildInfo, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { + let key = crate::keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); self.db.get(0, &key) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -82,9 +87,10 @@ impl BenchmarkingState { child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info )); - let (root, transaction) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let (root, transaction, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, + false, ); state.genesis = transaction.clone(); state.commit(root, transaction)?; @@ -142,7 +148,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key) @@ -155,7 +161,7 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key) @@ -168,7 +174,7 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key) @@ -189,7 +195,7 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { @@ -200,7 +206,7 @@ impl StateBackend> for BenchmarkingState { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -218,7 +224,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, @@ -237,7 +243,7 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix)) @@ -249,17 +255,34 @@ impl StateBackend> for BenchmarkingState { None } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) + fn commit(&self, storage_root: as Hasher>::Out, transaction: Self::Transaction) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); - - for (key, (val, rc)) in transaction.drain() { - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); + let mut keyspace = crate::Keyspaced::new(&[]); + for (info, mut updates) in transaction.into_iter() { + // child info with strong unique id are using the same state-db with prefixed key + if info.child_type() != ChildType::CryptoUniqueId { + // Unhandled child kind + unimplemented!( + "Data for {:?} without a backend implementation", + info.child_type(), + ); + } + keyspace.change_keyspace(info.keyspace()); + for (key, (val, rc)) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + + if rc > 0 { + db_transaction.put(0, &key, &val); + } else if rc < 0 { + db_transaction.delete(0, &key); + } } } db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; From f6efdadecdedddea9e3ef19b1e1394f2b2393871 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Feb 2020 11:23:39 +0100 Subject: [PATCH 041/185] Bump runtime impl version. --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index be9dbb96869a4..ca6ce955e665d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 215, - impl_version: 1, + impl_version: 2, apis: RUNTIME_API_VERSIONS, }; From 93de9600d2aba489359a28513c98f85d6da922ae Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 13 Feb 2020 12:20:09 +0100 Subject: [PATCH 042/185] fix new code --- bin/node/runtime/src/lib.rs | 2 +- client/network/src/protocol/light_client_handler.rs | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2f7aee7f8f50c..2c9e4af8c4db2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 216, - impl_version: 2, + impl_version: 3, apis: RUNTIME_API_VERSIONS, }; diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f5be23c0d4d49..b787838ff4c13 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -512,7 +512,7 @@ where let proof = if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { + match self.chain.read_child_proof(&block, &request.storage_key, &info, &request.keys) { Ok(proof) => proof, Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", @@ -1141,7 +1141,7 @@ mod tests { use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; use void::Void; - const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); + const CHILD_UUID: &[u8] = b"foobarbaz"; type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler>, Block>; @@ -1636,7 +1636,8 @@ mod tests { #[test] fn receives_remote_read_child_response() { - let info = CHILD_INFO.info(); + let child_info = ChildInfo::new_default(CHILD_UUID); + let info = child_info.info(); let mut chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), @@ -1739,7 +1740,8 @@ mod tests { #[test] fn send_receive_read_child() { - let info = CHILD_INFO.info(); + let child_info = ChildInfo::new_default(CHILD_UUID); + let info = child_info.info(); let chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), From a284921a26bcd9867ff4a16ea2b820642188a83b Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 13 Feb 2020 18:15:55 +0100 Subject: [PATCH 043/185] progress (note that the unpack doing check we could have trie primitive returning map of key values and simply use a pure key value backend for this check). --- Cargo.lock | 1 + .../state-machine/src/proving_backend.rs | 342 +++++++++++++++--- .../state-machine/src/trie_backend_essence.rs | 20 +- primitives/storage/Cargo.toml | 3 +- primitives/storage/src/lib.rs | 7 +- primitives/trie/src/lib.rs | 28 ++ 6 files changed, 351 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f2cfaae187db0..cb604c917303b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7263,6 +7263,7 @@ name = "sp-storage" version = "2.0.0" dependencies = [ "impl-serde 0.2.3", + "parity-scale-codec", "serde", "sp-debug-derive", "sp-std", diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 1cc7fc213b788..e821c1c9c7f5a 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -34,7 +34,7 @@ use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; -use sp_core::storage::{ChildInfo, ChildrenMap}; +use sp_core::storage::{ChildInfo, ChildType, ChildrenMap}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -49,14 +49,43 @@ pub enum StorageProofKind { /// they are of the same kind, that way we can store all /// encoded node in the same container. Flatten, - /// Top trie proof only, in compact form. - TopTrieCompact, +/* /// Top trie proof only, in compact form. + TopTrieCompact,*/ /// Proofs split by child trie. Full, /// Compact form of proofs split by child trie. FullCompact, } +impl StorageProofKind { + fn is_flatten(&self) -> bool { + match self { + StorageProofKind::Flatten => true, + StorageProofKind::Full | StorageProofKind::FullCompact => false + } + } + + fn is_compact(&self) -> bool { + match self { + StorageProofKind::FullCompact => true, + StorageProofKind::Full | StorageProofKind::Flatten => false + } + } +} + +/// The possible compactions for proofs. +#[repr(u32)] +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub enum CompactScheme { + /// This skip encoding of hashes that are + /// calculated when reading the structue + /// of the trie. + TrieSkipHashes = 1, +} + +type ProofNodes = Vec>; +type ProofCompacted = (CompactScheme, Vec>); + /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that /// does not already have access to the key-value pairs. @@ -69,18 +98,16 @@ pub enum StorageProof { /// Single flattened proof component, all default child trie are flattened over a same /// container, no child trie information is provided, this works only for proof accessing /// the same kind of child trie. - Flatten(Vec>), - /// If proof only cover a single trie, we compact the proof by ommitting some content + Flatten(ProofNodes), +/* /// If proof only cover a single trie, we compact the proof by ommitting some content /// that can be rebuild on construction. For patricia merkle trie it will be hashes that /// are not necessary between node, with indexing of the missing hash based on orders /// of nodes. - /// TODO replace u32 by codec compact!!! this is a versioning for the compaction type of child - /// proof. - TopTrieCompact(u32, Vec>), + TopTrieCompact(ProofCompacted),*/ /// Fully descriped proof, it includes the child trie individual descriptions. - Full(ChildrenMap>>), + Full(ChildrenMap), /// Fully descriped proof, compact encoded. - FullCompact(ChildrenMap<(u32, Vec>)>), + FullCompact(ChildrenMap), } impl StorageProof { @@ -90,7 +117,7 @@ impl StorageProof { /// key-value pairs exist in storage). pub fn empty(kind: StorageProofKind) -> Self { match kind { - StorageProofKind::Flatten => StorageProof::Flatten(Vec::new()), + StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), StorageProofKind::Full => StorageProof::Full(ChildrenMap::default()), StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenMap::default()), } @@ -113,12 +140,84 @@ impl StorageProof { StorageProofNodeIterator::new(self) } - /// This unpack `FullCompact` to `Compact` or do nothing. - pub fn unpack(self) -> Self { + /// This unpacks `FullCompact` to `Full` or do nothing. + pub fn unpack(self, with_roots: bool) -> Result<(Self, Option>>), String> + where H::Out: Codec, + { + let map_e = |e| format!("Trie unpack error: {}", e); + if let StorageProof::FullCompact(children) = self { + let result = ChildrenMap::default(); + let roots = if with_roots { + Some(ChildrenMap::default()) + } else { + None + }; + for (child_info, (compact_scheme, proof)) in children { + match child_info.child_type() { + ChildType::CryptoUniqueId => { + match compact_scheme { + CompactScheme::TrieSkipHashes => { + // Note that we could check the proof from the unpacking. + let (root, unpacked_proof) = sp_trie::unpack_proof::>(proof.as_slice()) + .map_err(map_e)?; + roots.as_mut().map(|roots| roots.insert(child_info.clone(), root.encode())); + result.insert(child_info, unpacked_proof); + }, + } + } + } + } + Ok((StorageProof::Full(result), roots)) + } else { + Ok((self, None)) + } } - /// This flatten (does unpack full compact first). + /// This packs `Full` to `FullCompact`, using needed roots. + pub fn pack(self, roots: ChildrenMap>) -> Result + where H::Out: Codec, + { + let map_e = |e| format!("Trie pack error: {}", e); + + if let StorageProof::Full(children) = self { + let result = ChildrenMap::default(); + for (child_info, proof) in children { + match child_info.child_type() { + ChildType::CryptoUniqueId => { + let root = roots.get(&child_info) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| "Missing root for packing".to_string())?; + let trie_nodes = sp_trie::pack_proof::>(&root, &proof[..]).map_err(map_e)?; + result.insert(child_info.clone(), (CompactScheme::TrieSkipHashes, trie_nodes)); + } + } + } + Ok(StorageProof::FullCompact(result)) + } else { + Ok(self) + } + } + + /// This flatten `Full` to `Flatten`. + /// Note that if for some reason child proof were not + /// attached to the top trie, they will be lost. + /// Generally usage of Flatten kind or this function + /// when using child trie is not recommended. pub fn flatten(self) -> Self { + if let StorageProof::Full(children) = self { + let mut result = Vec::new(); + children.into_iter().for_each(|(child_info, proof)| { + match child_info.child_type() { + ChildType::CryptoUniqueId => { + // this can get merged with top, since it is proof we do not use prefix + result.extend(proof); + } + } + }); + StorageProof::Flatten(result) + } else { + self + } } } @@ -152,18 +251,74 @@ impl Iterator for StorageProofNodeIterator { /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. -/// Merge to `Flatten` if any item is flatten (we cannot unflatten), if not `Flatten` we output +/// Merge to `Flatten` if any item is flatten (we cannot unflatten), if not `Flatten` we output to /// non compact form. -pub fn merge_storage_proofs(proofs: I) -> StorageProof - where I: IntoIterator +pub fn merge_storage_proofs(proofs: I) -> Result + where + I: IntoIterator, + H: Hasher, + H::Out: Codec, +{ + let mut final_proof = StorageProof::empty(StorageProofKind::Full); + let child_sets = ChildrenMap::>>::default(); + let unique_set = HashSet::>::default(); + let do_flatten = false; + // lookup for best encoding + for mut proof in proofs { + if let &StorageProof::FullCompact(..) = &proof { + proof = proof.unpack::(false)?.0; + } + let proof = proof; + match proof { + StorageProof::Flatten(proof) => { + if !do_flatten { + do_flatten = true; + for (_, set) in std::mem::replace(&mut child_sets, Default::default()).into_iter() { + unique_set.extend(set); + } + } + }, + StorageProof::Full(children) => { + for (child_info, child) in children.into_iter() { + if do_flatten { + unique_set.extend(child); + } else { + let set = child_sets.entry(child_info).or_default(); + set.extend(child); + } + } + }, + StorageProof::FullCompact(children) => unreachable!("unpacked when entering function"), + } + } + Ok(if do_flatten { + StorageProof::Flatten(unique_set.into_iter().collect()) + } else { + let mut result = ChildrenMap::default(); + for (child_info, set) in child_sets.into_iter() { + result.insert(child_info, set.into_iter().collect()); + } + StorageProof::Full(result) + }) +} + +/// Merge over flatten proof, return `None` if one of the proofs is not +/// a flatten proof. +pub fn merge_flatten_storage_proofs(proofs: I) -> Option + where + I: IntoIterator, { - let mut StorageProof = - let trie_nodes = proofs.into_iter() - .flat_map(|proof| proof.iter_nodes()) - .collect::>() - .into_iter() - .collect(); - StorageProof { trie_nodes } + let mut final_proof = StorageProof::empty(StorageProofKind::Full); + let unique_set = HashSet::>::default(); + // lookup for best encoding + for mut proof in proofs { + if let StorageProof::Flatten(set) = proof { + unique_set.extend(set); + } else { + return None; + } + } + Some(StorageProof::Flatten(unique_set.into_iter().collect())) } impl<'a, S, H> ProvingBackendRecorder<'a, S, H> @@ -237,27 +392,34 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Global proof recorder, act as a layer over a hash db for recording queried /// data. -pub type ProofRecorder = Arc::Out, Option>>>; +pub enum ProofRecorder { + // root of each child is added to be able to pack. + Full(Arc::Out, Option>>>>), + Flat(Arc::Out, Option>>>), +} /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( - TrieBackend, H>, StorageProofKind, + TrieBackend, H>, ); /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { backend: &'a S, proof_recorder: ProofRecorder, - flatten: bool, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> where H::Out: Codec { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend, kind: StorageProofKind) -> Self { - let proof_recorder = Default::default(); + pub fn new(backend: &'a TrieBackend, flatten: bool) -> Self { + let proof_recorder = if flatten { + ProofRecorder::Flat(Default::default()) + } else { + ProofRecorder::Full(Default::default()) + }; Self::new_with_recorder(backend, proof_recorder) } @@ -276,13 +438,28 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> StorageProof { - let trie_nodes = self.0.essence().backend_storage().proof_recorder - .read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - StorageProof::new(trie_nodes) + pub fn extract_proof(&self) -> Result { + Ok(match self.0.essence().backend_storage().proof_recorder { + ProofRecorder::Flat(rec) => { + let trie_nodes = rec + .read() + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + StorageProof::Flatten(trie_nodes) + }, + ProofRecorder::Full(rec) => { + let mut children = ChildrenMap::default(); + for (child_info, set) in rec.read().iter() { + let trie_nodes: Vec> = set + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + children.insert(child_info.clone(), trie_nodes); + } + StorageProof::Full(children) + }, + }) } } @@ -299,12 +476,26 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef key: &H::Out, prefix: Prefix, ) -> Result, String> { - if let Some(v) = self.proof_recorder.read().get(key) { - return Ok(v.clone()); + match self.proof_recorder { + ProofRecorder::Flat(rec) => { + if let Some(v) = rec.read().get(key) { + return Ok(v.clone()); + } + let backend_value = self.backend.get(child_info, key, prefix)?; + rec.write().insert(key.clone(), backend_value.clone()); + Ok(backend_value) + }, + ProofRecorder::Full(rec) => { + if let Some(v) = rec.read().get(child_info).and_then(|s| s.get(key)) { + return Ok(v.clone()); + } + let backend_value = self.backend.get(child_info, key, prefix)?; + rec.write().entry(child_info.clone()) + .or_default() + .insert(key.clone(), backend_value.clone()); + Ok(backend_value) + }, } - let backend_value = self.backend.get(child_info, key, prefix)?; - self.proof_recorder.write().insert(key.clone(), backend_value.clone()); - Ok(backend_value) } } @@ -418,8 +609,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> } } -/// Create proof check backend. -pub fn create_proof_check_backend( +/// Create flat proof check backend. +pub fn create_flat_proof_check_backend( root: H::Out, proof: StorageProof, ) -> Result, H>, Box> @@ -427,7 +618,7 @@ where H: Hasher, H::Out: Codec, { - let db = create_proof_check_backend_storage(proof); + let db = create_flat_proof_check_backend_storage(proof); if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new(db, root)) @@ -436,15 +627,76 @@ where } } +/// Create proof check backend. +pub fn create_proof_check_backend( + root: H::Out, + proof: StorageProof, +) -> Result>, H>, Box> +where + H: Hasher, + H::Out: Codec, +{ + use std::ops::Deref; + if let Ok(db) = create_proof_check_backend_storage(proof) { + if db.deref().get(&ChildInfo::top_trie()) + .map(|db| db.contains(&root, EMPTY_PREFIX)) + .unwrap_or(false) { + return Ok(TrieBackend::new(db, root)) + } + } + return Err(Box::new(ExecutionError::InvalidProof)); +} + /// Create in-memory storage of proof check backend. pub fn create_proof_check_backend_storage( proof: StorageProof, +) -> Result>, String> +where + H: Hasher, +{ + let map_e = |e| format!("Trie unpack error: {}", e); + let mut result = ChildrenMap::default(); + match proof { + f@StorageProof::Flatten(..) => { + let db = create_flat_proof_check_backend_storage(f); + result.insert(ChildInfo::top_trie(), db); + }, + StorageProof::Full(children) => { + for (child_info, proof) in children.into_iter() { + let mut db = MemoryDB::default(); + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + result.insert(child_info, db); + } + }, + StorageProof::FullCompact(children) => { + for (child_info, (compact_scheme, proof)) in children.into_iter() { + match compact_scheme { + CompactScheme::TrieSkipHashes => { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, db) = sp_trie::unpack_proof_to_memdb::>(proof.as_slice()) + .map_err(map_e)?; + result.insert(child_info, db); + }, + } + } + }, + } + Ok(result) +} + + +/// Create in-memory storage of proof check backend. +pub fn create_flat_proof_check_backend_storage( + proof: StorageProof, ) -> MemoryDB where H: Hasher, { let mut db = MemoryDB::default(); - for item in proof.iter_nodes() { + for item in proof.iter_nodes_flatten() { db.insert(EMPTY_PREFIX, &item); } db diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 90d7974397fbc..02a93f14a42ed 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -28,7 +28,7 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_trie, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::Encode; /// Patricia trie-based storage trait. @@ -429,6 +429,7 @@ impl> TrieBackendStorageRef for &S { } // This implementation is used by test storage trie clients. +// TODO try to remove this implementation!!! (use a ChildrenMap variant) impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; @@ -453,10 +454,27 @@ impl TrieBackendStorageRef for MemoryDB { prefix: Prefix, ) -> Result, String> { // No need to use keyspace for in memory db, ignoring child_info parameter. + // TODO try to remove this implementation!!! Ok(hash_db::HashDB::get(self, key, prefix)) } } +impl TrieBackendStorageRef for ChildrenMap> { + type Overlay = MemoryDB; + + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + Ok(self.deref().get(child_info).and_then(|s| + hash_db::HashDB::get(s, key, prefix) + )) + } +} + + #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c9fda1816b55e..c6f12d3160a41 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -7,6 +7,7 @@ description = "Storage related primitives" license = "GPL-3.0" [dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } @@ -14,4 +15,4 @@ sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 2e6df51dfb3e6..9fab4a29da1c3 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,6 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; #[cfg(feature = "std")] @@ -190,7 +191,7 @@ impl<'a> ChildStorageKey<'a> { /// Information related to a child state. -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] pub enum ChildInfo { Default(ChildTrie), } @@ -287,7 +288,7 @@ impl ChildType { /// It share its trie node storage with any kind of key, /// and its unique id needs to be collision free (eg strong /// crypto hash). -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] pub struct ChildTrie { /// Data containing unique id. /// Unique id must but unique and free of any possible key collision @@ -306,7 +307,7 @@ impl ChildTrie { } #[cfg(feature = "std")] -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] /// Type for storing a map of child trie related information. /// A few utilities methods are defined. pub struct ChildrenMap(pub BTreeMap); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 3f206944318e9..8aa68cf205d8e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -269,6 +269,34 @@ pub fn record_all_keys( Ok(()) } +/// Pack proof. +pub fn pack_proof(root: &TrieHash, input: &[Vec]) + -> Result>, Box>> { + let mut memory_db = MemoryDB::<::Hash>::default(); + for i in input.as_ref() { + memory_db.insert(EMPTY_PREFIX, i.as_ref()); + } + let trie = TrieDB::::new(&memory_db, root)?; + trie_db::encode_compact(&trie) +} + +/// Unpack packed proof. +pub fn unpack_proof(input: &[Vec]) + -> Result<(TrieHash, Vec>), Box>> { + let mut memory_db = MemoryDB::<::Hash>::default(); + let root = trie_db::decode_compact::(&mut memory_db, input)?; + Ok((root.0, memory_db.drain().into_iter().map(|(_k, (v, _rc))| v).collect())) +} + +/// Unpack packed proof. +/// This is faster than `unpack_proof`. +pub fn unpack_proof_to_memdb(input: &[Vec]) + -> Result<(TrieHash, MemoryDB::<::Hash>), Box>> { + let mut memory_db = MemoryDB::<::Hash>::default(); + let root = trie_db::decode_compact::(&mut memory_db, input)?; + Ok((root.0, memory_db)) +} + /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; From 470856027831690424ae26d714964ab409432ca2 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 14 Feb 2020 20:50:08 +0100 Subject: [PATCH 044/185] Things stick together, lot of todos remaining. --- client/src/call_executor.rs | 5 +- client/src/cht.rs | 7 +- client/src/client.rs | 10 +- client/src/light/call_executor.rs | 4 +- client/src/light/fetcher.rs | 4 +- .../api/proc-macro/src/impl_runtime_apis.rs | 13 +- .../state-machine/src/in_memory_backend.rs | 3 + primitives/state-machine/src/lib.rs | 204 +++++++++++-- .../state-machine/src/proving_backend.rs | 268 ++++++++++++------ primitives/state-machine/src/trie_backend.rs | 56 +++- 10 files changed, 440 insertions(+), 134 deletions(-) diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 18ad5b113e983..0926acf5384ca 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -21,7 +21,7 @@ use sp_runtime::{ }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, + backend::Backend as _, StorageProof, StorageProofKind, }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; @@ -212,12 +212,15 @@ where method: &str, call_data: &[u8] ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + // TODO this switch execution proof to full compact, should we move the choice to + // caller?? sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( trie_state, overlay, &self.executor, method, call_data, + StorageProofKind::FullCompact, ) .map_err(Into::into) } diff --git a/client/src/cht.rs b/client/src/cht.rs index 9e1a3bff017f1..19dea21269daf 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -30,7 +30,8 @@ use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, SimpleArithmetic, Zero, One}; use sp_state_machine::{ MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend + prove_read_on_trie_backend, read_proof_check, read_proof_check_on_flat_proving_backend, + StorageProofKind, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -119,6 +120,8 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), + // TODO consider Flatten compact here? + StorageProofKind::Flatten, ).map_err(ClientError::Execution) } @@ -168,7 +171,7 @@ pub fn check_proof_on_proving_backend( local_number, remote_hash, |_, local_cht_key| - read_proof_check_on_proving_backend::( + read_proof_check_on_flat_proving_backend::( proving_backend, local_cht_key, ).map_err(|e| ClientError::from(e)), diff --git a/client/src/client.rs b/client/src/client.rs index 18e6b33b71ad3..a9cc5b8648892 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -42,7 +42,7 @@ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, StorageProof, - merge_storage_proofs, + merge_storage_proofs, StorageProofKind, }; use sc_executor::{RuntimeVersion, RuntimeInfo}; use sp_consensus::{ @@ -396,8 +396,9 @@ impl Client where I: IntoIterator, I::Item: AsRef<[u8]>, { + // TODO keep flatten proof here?? or move choice to caller? self.state_at(id) - .and_then(|state| prove_read(state, keys) + .and_then(|state| prove_read(state, keys, StorageProofKind::Flatten) .map_err(Into::into)) } @@ -413,8 +414,9 @@ impl Client where I: IntoIterator, I::Item: AsRef<[u8]>, { + // TODO keep flatten proof here?? self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, child_info, keys) + .and_then(|state| prove_child_read(state, storage_key, child_info, keys, StorageProofKind::Flatten) .map_err(Into::into)) } @@ -718,7 +720,7 @@ impl Client where Ok(()) }, ())?; - Ok(merge_storage_proofs(proofs)) + Ok(merge_storage_proofs::, _>(proofs)?) } /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 20b4faf4a303c..b8efa1f2317c1 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -201,12 +201,12 @@ pub fn prove_execution( // execute method + record execution proof let (result, exec_proof) = executor.prove_at_trie_state( - &trie_state, + trie_state, &mut changes, method, call_data, )?; - let total_proof = merge_storage_proofs(vec![init_proof, exec_proof]); + let total_proof = merge_storage_proofs::, _>(vec![init_proof, exec_proof])?; Ok((result, total_proof)) } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index a4168f356e609..316186eb2c562 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -31,7 +31,7 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - create_proof_check_backend_storage, read_child_proof_check, + create_flat_proof_check_backend_storage, read_child_proof_check, }; pub use sp_state_machine::StorageProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -156,7 +156,7 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = create_proof_check_backend_storage(remote_roots_proof); + let storage = create_flat_proof_check_backend_storage(remote_roots_proof)?; // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 770a843bfa6c1..0133a036c1b29 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -308,18 +308,17 @@ fn generate_runtime_api_base_structures() -> Result { } fn record_proof(&mut self) { - self.recorder = Some(Default::default()); + // TODO should we use full and then use some packing + self.recorder = Some(#crate_::ProofRecorder::::Flat(Default::default())); } + // TODO should we make a storage kind configurable then + // we could pack full proof if needed fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .map(|recorder| { - let trie_nodes = recorder.read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - #crate_::StorageProof::new(trie_nodes) + .and_then(|recorder| { + recorder.extract_proof().ok() }) } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 753f8ccbbf9ae..1e20394f62f78 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -329,6 +329,9 @@ impl Backend for InMemory where H::Out: Codec { .collect() } + // TODO instead of changing mutabliity of the returned value, we could wrap the trie + // backend in a new backend that register roots -> would be cleaner and still allow + // caching. fn as_trie_backend(&mut self)-> Option<&TrieBackend> { let mut mdb = MemoryDB::default(); let mut new_child_roots = Vec::new(); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 802d7937c73d8..b6b261aed7c6a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,7 +23,7 @@ use log::{warn, trace}; pub use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode, Codec}; use sp_core::{ - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, + storage::{ChildInfo, ChildrenMap}, NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, CallInWasmExt}, hexdisplay::HexDisplay, }; use overlayed_changes::OverlayedChangeSet; @@ -68,7 +68,9 @@ pub use overlayed_changes::{ }; pub use proving_backend::{ create_proof_check_backend, create_proof_check_backend_storage, merge_storage_proofs, - ProofRecorder, ProvingBackend, ProvingBackendRecorder, StorageProof, + ProofRecorder, ProvingBackend, ProvingBackendRecorder, StorageProof, StorageProofKind, + create_flat_proof_check_backend, create_flat_proof_check_backend_storage, + merge_flatten_storage_proofs, }; pub use trie_backend_essence::{TrieBackendStorage, TrieBackendStorageRef, Storage}; pub use trie_backend::TrieBackend; @@ -436,6 +438,7 @@ pub fn prove_execution( exec: &Exec, method: &str, call_data: &[u8], + kind: StorageProofKind, ) -> Result<(Vec, StorageProof), Box> where B: Backend, @@ -446,7 +449,7 @@ where { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _>(trie_backend, overlay, exec, method, call_data) + prove_execution_on_trie_backend::<_, _, N, _>(trie_backend, overlay, exec, method, call_data, kind) } /// Prove execution using the given trie backend, overlayed changes, and call executor. @@ -464,6 +467,7 @@ pub fn prove_execution_on_trie_backend( exec: &Exec, method: &str, call_data: &[u8], + kind: StorageProofKind, ) -> Result<(Vec, StorageProof), Box> where S: trie_backend_essence::TrieBackendStorage, @@ -472,16 +476,26 @@ where Exec: CodeExecutor + 'static + Clone, N: crate::changes_trie::BlockNumber, { - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); + let proving_backend = proving_backend::ProvingBackend::new(trie_backend, kind.is_flatten()); let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, None, overlay, exec, method, call_data, Extensions::default(), ); + // TODO EMCH passing root in input is probably a dead end: registering them in overlay seems + // better!!!! let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( always_wasm(), None, )?; - let proof = sm.backend.extract_proof(); + let mut proof = sm.backend.extract_proof() + .map_err(|e| Box::new(e) as Box)?; + if kind.is_compact() { + let roots = trie_backend.extract_registered_roots(); + if let Some(roots) = roots { + proof = proof.pack::(&roots) + .map_err(|e| Box::new(e) as Box)?; + } + } Ok((result.into_encoded(), proof)) } @@ -500,12 +514,18 @@ where H::Out: Ord + 'static + codec::Codec, N: crate::changes_trie::BlockNumber, { - let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _>(&trie_backend, overlay, exec, method, call_data) + let use_flat = proof_uses_flat(&proof); + if use_flat { + let trie_backend = create_flat_proof_check_backend::(root.into(), proof)?; + execution_flat_proof_check_on_trie_backend::<_, N, _>(&trie_backend, overlay, exec, method, call_data) + } else { + let trie_backend = create_proof_check_backend::(root.into(), proof)?; + execution_proof_check_on_trie_backend::<_, N, _>(&trie_backend, overlay, exec, method, call_data) + } } /// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_proof_check_on_trie_backend( +pub fn execution_flat_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, @@ -528,10 +548,35 @@ where ).map(NativeOrEncoded::into_encoded) } +/// Check execution proof on proving backend, generated by `prove_execution` call. +pub fn execution_proof_check_on_trie_backend( + trie_backend: &TrieBackend>, H>, + overlay: &mut OverlayedChanges, + exec: &Exec, + method: &str, + call_data: &[u8], +) -> Result, Box> +where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, +{ + let mut sm = StateMachine::<_, H, N, Exec>::new( + trie_backend, None, overlay, exec, method, call_data, Extensions::default(), + ); + + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_untrusted_wasm(), + None, + ).map(NativeOrEncoded::into_encoded) +} + /// Generate storage read proof. pub fn prove_read( mut backend: B, keys: I, + kind: StorageProofKind, ) -> Result> where B: Backend, @@ -544,7 +589,7 @@ where .ok_or_else( || Box::new(ExecutionError::UnableToGenerateProof) as Box )?; - prove_read_on_trie_backend(trie_backend, keys) + prove_read_on_trie_backend(trie_backend, keys, kind) } /// Generate child storage read proof. @@ -553,6 +598,7 @@ pub fn prove_child_read( storage_key: &[u8], child_info: &ChildInfo, keys: I, + kind: StorageProofKind, ) -> Result> where B: Backend, @@ -563,13 +609,14 @@ where { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) + prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys, kind) } /// Generate storage read proof on pre-created trie backend. pub fn prove_read_on_trie_backend( trie_backend: &TrieBackend, keys: I, + kind: StorageProofKind, ) -> Result> where S: trie_backend_essence::TrieBackendStorage, @@ -578,13 +625,22 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind.is_flatten()); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + let mut proof = proving_backend.extract_proof() + .map_err(|e| Box::new(e) as Box)?; + if kind.is_compact() { + let roots = trie_backend.extract_registered_roots(); + if let Some(roots) = roots { + proof = proof.pack::(&roots) + .map_err(|e| Box::new(e) as Box)?; + } + } + Ok(proof) } /// Generate storage read proof on pre-created trie backend. @@ -593,6 +649,7 @@ pub fn prove_child_read_on_trie_backend( storage_key: &[u8], child_info: &ChildInfo, keys: I, + kind: StorageProofKind, ) -> Result> where S: trie_backend_essence::TrieBackendStorage, @@ -601,16 +658,42 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind.is_flatten()); for key in keys.into_iter() { proving_backend .child_storage(storage_key, child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + let mut proof = proving_backend.extract_proof() + .map_err(|e| Box::new(e) as Box)?; + if kind.is_compact() { + let roots = trie_backend.extract_registered_roots(); + if let Some(roots) = roots { + proof = proof.pack::(&roots) + .map_err(|e| Box::new(e) as Box)?; + } + } + Ok(proof) +} + +// Note that this is not directly in StorageKind as +// it is implementation specific choice. +fn proof_uses_flat(proof: &StorageProof) -> bool { + match proof { + StorageProof::Flatten(..) => true, + // there is currently no gain (same implementation + // for all trie backends) in not running on a flatten + // memorydb + StorageProof::Full(..) => true, + // unpack creates by nature splitted memory db, there + // is no need to merge them. + StorageProof::FullCompact(..) => false, + } } /// Check storage read proof, generated by `prove_read` call. +/// WARNING this method rebuild a full memory backend and should +/// be call only once per proof checks. pub fn read_proof_check( root: H::Out, proof: StorageProof, @@ -622,11 +705,20 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = create_proof_check_backend::(root, proof)?; + let use_flat = proof_uses_flat(&proof); let mut result = HashMap::new(); - for key in keys.into_iter() { - let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); + if use_flat { + let proving_backend = create_flat_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_proof_check_on_flat_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } + } else { + let proving_backend = create_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } } Ok(result) } @@ -644,22 +736,47 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = create_proof_check_backend::(root, proof)?; + let use_flat = proof_uses_flat(&proof); let mut result = HashMap::new(); - for key in keys.into_iter() { - let value = read_child_proof_check_on_proving_backend( - &proving_backend, - storage_key, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); + if use_flat { + let proving_backend = create_flat_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_child_proof_check_on_flat_proving_backend( + &proving_backend, + storage_key, + key.as_ref(), + )?; + result.insert(key.as_ref().to_vec(), value); + } + } else { + let proving_backend = create_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_child_proof_check_on_proving_backend( + &proving_backend, + storage_key, + key.as_ref(), + )?; + result.insert(key.as_ref().to_vec(), value); + } } Ok(result) } +/// Check storage read proof on pre-created flat proving backend. +pub fn read_proof_check_on_flat_proving_backend( + proving_backend: &TrieBackend, H>, + key: &[u8], +) -> Result>, Box> +where + H: Hasher, + H::Out: Ord + Codec, +{ + proving_backend.storage(key).map_err(|e| Box::new(e) as Box) +} + /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend>, H>, key: &[u8], ) -> Result>, Box> where @@ -669,9 +786,24 @@ where proving_backend.storage(key).map_err(|e| Box::new(e) as Box) } +/// Check child storage read proof on pre-created flat proving backend. +pub fn read_child_proof_check_on_flat_proving_backend( + proving_backend: &TrieBackend, H>, + storage_key: &[u8], + key: &[u8], +) -> Result>, Box> +where + H: Hasher, + H::Out: Ord + Codec, +{ + // Not a prefixed memory db, using empty unique id and include root resolution. + proving_backend.child_storage(storage_key, &ChildInfo::top_trie(), key) + .map_err(|e| Box::new(e) as Box) +} + /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend>, H>, storage_key: &[u8], key: &[u8], ) -> Result>, Box> @@ -848,6 +980,12 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { + prove_execution_and_proof_check_works_inner(StorageProofKind::Flatten); + prove_execution_and_proof_check_works_inner(StorageProofKind::Full); + prove_execution_and_proof_check_works_inner(StorageProofKind::FullCompact); + } + + fn prove_execution_and_proof_check_works_inner(kind: StorageProofKind) { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -864,6 +1002,7 @@ mod tests { &executor, "test", &[], + kind ).unwrap(); // check proof locally @@ -976,12 +1115,18 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + prove_read_and_proof_check_works_inner(StorageProofKind::Full); + prove_read_and_proof_check_works_inner(StorageProofKind::Flatten); + prove_read_and_proof_check_works_inner(StorageProofKind::FullCompact); + } + + fn prove_read_and_proof_check_works_inner(kind: StorageProofKind) { let child_info1 = ChildInfo::new_default(CHILD_UID_1); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); + let remote_proof = prove_read(remote_backend, &[b"value2"], kind).unwrap(); // check proof locally let local_result1 = read_proof_check::( remote_root, @@ -1007,6 +1152,7 @@ mod tests { b":child_storage:default:sub1", &child_info1, &[b"value3"], + kind, ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e821c1c9c7f5a..9a3b9bf35de8b 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -42,8 +42,11 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has pub(crate) proof_recorder: &'a mut Recorder, } +/// Different kind of proof representation are allowed. +/// This definition is used as input parameter when producing +/// a storage proof. #[repr(u32)] -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { /// The proof can be build by multiple child trie only if /// they are of the same kind, that way we can store all @@ -58,14 +61,18 @@ pub enum StorageProofKind { } impl StorageProofKind { - fn is_flatten(&self) -> bool { + /// Is proof stored in a unique structure or + /// different structure depending on child trie. + pub fn is_flatten(&self) -> bool { match self { StorageProofKind::Flatten => true, StorageProofKind::Full | StorageProofKind::FullCompact => false } } - fn is_compact(&self) -> bool { + /// Is the proof compacted. Compaction requires + /// using state root of every child trie. + pub fn is_compact(&self) -> bool { match self { StorageProofKind::FullCompact => true, StorageProofKind::Full | StorageProofKind::Flatten => false @@ -115,7 +122,14 @@ impl StorageProof { /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). - pub fn empty(kind: StorageProofKind) -> Self { + pub fn empty() -> Self { + // we default to full as it can be reduce to flatten when reducing + // flatten to full is not possible without making asumption over the content. + Self::empty_for(StorageProofKind::Full) + } + + /// Returns a new empty proof of a given kind. + pub fn empty_for(kind: StorageProofKind) -> Self { match kind { StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), StorageProofKind::Full => StorageProof::Full(ChildrenMap::default()), @@ -146,8 +160,8 @@ impl StorageProof { { let map_e = |e| format!("Trie unpack error: {}", e); if let StorageProof::FullCompact(children) = self { - let result = ChildrenMap::default(); - let roots = if with_roots { + let mut result = ChildrenMap::default(); + let mut roots = if with_roots { Some(ChildrenMap::default()) } else { None @@ -174,13 +188,13 @@ impl StorageProof { } /// This packs `Full` to `FullCompact`, using needed roots. - pub fn pack(self, roots: ChildrenMap>) -> Result + pub fn pack(self, roots: &ChildrenMap>) -> Result where H::Out: Codec, { let map_e = |e| format!("Trie pack error: {}", e); if let StorageProof::Full(children) = self { - let result = ChildrenMap::default(); + let mut result = ChildrenMap::default(); for (child_info, proof) in children { match child_info.child_type() { ChildType::CryptoUniqueId => { @@ -259,10 +273,9 @@ pub fn merge_storage_proofs(proofs: I) -> Result H: Hasher, H::Out: Codec, { - let mut final_proof = StorageProof::empty(StorageProofKind::Full); - let child_sets = ChildrenMap::>>::default(); - let unique_set = HashSet::>::default(); - let do_flatten = false; + let mut do_flatten = false; + let mut child_sets = ChildrenMap::>>::default(); + let mut unique_set = HashSet::>::default(); // lookup for best encoding for mut proof in proofs { if let &StorageProof::FullCompact(..) = &proof { @@ -277,6 +290,7 @@ pub fn merge_storage_proofs(proofs: I) -> Result unique_set.extend(set); } } + unique_set.extend(proof); }, StorageProof::Full(children) => { for (child_info, child) in children.into_iter() { @@ -288,7 +302,7 @@ pub fn merge_storage_proofs(proofs: I) -> Result } } }, - StorageProof::FullCompact(children) => unreachable!("unpacked when entering function"), + StorageProof::FullCompact(_children) => unreachable!("unpacked when entering function"), } } Ok(if do_flatten { @@ -308,10 +322,9 @@ pub fn merge_flatten_storage_proofs(proofs: I) -> Option where I: IntoIterator, { - let mut final_proof = StorageProof::empty(StorageProofKind::Full); - let unique_set = HashSet::>::default(); + let mut unique_set = HashSet::>::default(); // lookup for best encoding - for mut proof in proofs { + for proof in proofs { if let StorageProof::Flatten(set) = proof { unique_set.extend(set); } else { @@ -394,10 +407,29 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// data. pub enum ProofRecorder { // root of each child is added to be able to pack. + /// Proof keep a separation between child trie content, this is usually useless, + /// but when we use proof compression we want this separation. Full(Arc::Out, Option>>>>), + /// Single level of storage for all recoded nodes. Flat(Arc::Out, Option>>>), } +impl Default for ProofRecorder { + fn default() -> Self { + // Default to flat proof. + ProofRecorder::Flat(Default::default()) + } +} + +impl Clone for ProofRecorder { + fn clone(&self) -> Self { + match self { + ProofRecorder::Full(a) => ProofRecorder::Full(a.clone()), + ProofRecorder::Flat(a) => ProofRecorder::Flat(a.clone()), + } + } +} + /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( @@ -434,12 +466,19 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder: proof_recorder, }; - ProvingBackend(TrieBackend::new(recorder, root)) + ProvingBackend(TrieBackend::new_with_roots(recorder, root)) + } + + /// Extracting the gathered unordered proof. + pub fn extract_proof(&self) -> Result { + self.0.essence().backend_storage().proof_recorder.extract_proof() } +} +impl ProofRecorder { /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> Result { - Ok(match self.0.essence().backend_storage().proof_recorder { + Ok(match self { ProofRecorder::Flat(rec) => { let trie_nodes = rec .read() @@ -476,7 +515,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef key: &H::Out, prefix: Prefix, ) -> Result, String> { - match self.proof_recorder { + match &self.proof_recorder { ProofRecorder::Flat(rec) => { if let Some(v) = rec.read().get(key) { return Ok(v.clone()); @@ -618,10 +657,10 @@ where H: Hasher, H::Out: Codec, { - let db = create_flat_proof_check_backend_storage(proof); - + let db = create_flat_proof_check_backend_storage(proof) + .map_err(|e| Box::new(e) as Box)?; if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new(db, root)) + Ok(TrieBackend::new_with_roots(db, root)) } else { Err(Box::new(ExecutionError::InvalidProof)) } @@ -637,17 +676,24 @@ where H::Out: Codec, { use std::ops::Deref; - if let Ok(db) = create_proof_check_backend_storage(proof) { - if db.deref().get(&ChildInfo::top_trie()) - .map(|db| db.contains(&root, EMPTY_PREFIX)) - .unwrap_or(false) { - return Ok(TrieBackend::new(db, root)) - } + let db = create_proof_check_backend_storage(proof) + .map_err(|e| Box::new(e) as Box)?; + if db.deref().get(&ChildInfo::top_trie()) + .map(|db| db.contains(&root, EMPTY_PREFIX)) + .unwrap_or(false) { + Ok(TrieBackend::new_with_roots(db, root)) + } else { + Err(Box::new(ExecutionError::InvalidProof)) } - return Err(Box::new(ExecutionError::InvalidProof)); } /// Create in-memory storage of proof check backend. +/// Currently child trie are all with same backend +/// implementation, therefore using +/// `create_flat_proof_check_backend_storage` is prefered. +/// TODO consider removing this `ChildrenMap>` +/// for now (still we do not merge unpack, that can be good +/// somehow). pub fn create_proof_check_backend_storage( proof: StorageProof, ) -> Result>, String> @@ -657,8 +703,11 @@ where let map_e = |e| format!("Trie unpack error: {}", e); let mut result = ChildrenMap::default(); match proof { - f@StorageProof::Flatten(..) => { - let db = create_flat_proof_check_backend_storage(f); + s@StorageProof::Flatten(..) => { + let mut db = MemoryDB::default(); + for item in s.iter_nodes_flatten() { + db.insert(EMPTY_PREFIX, &item); + } result.insert(ChildInfo::top_trie(), db); }, StorageProof::Full(children) => { @@ -687,19 +736,43 @@ where Ok(result) } - /// Create in-memory storage of proof check backend. pub fn create_flat_proof_check_backend_storage( proof: StorageProof, -) -> MemoryDB +) -> Result, String> where H: Hasher, { + let map_e = |e| format!("Trie unpack error: {}", e); let mut db = MemoryDB::default(); - for item in proof.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item); + match proof { + s@StorageProof::Flatten(..) => { + for item in s.iter_nodes_flatten() { + db.insert(EMPTY_PREFIX, &item); + } + }, + StorageProof::Full(children) => { + for (_child_info, proof) in children.into_iter() { + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + } + }, + StorageProof::FullCompact(children) => { + for (_child_info, (compact_scheme, proof)) in children.into_iter() { + match compact_scheme { + CompactScheme::TrieSkipHashes => { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, child_db) = sp_trie::unpack_proof_to_memdb::>(proof.as_slice()) + .map_err(map_e)?; + db.consolidate(child_db); + }, + } + } + }, } - db + Ok(db) } #[cfg(test)] @@ -713,22 +786,28 @@ mod tests { fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, + flat: bool, ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { - ProvingBackend::new(trie_backend) + ProvingBackend::new(trie_backend, flat) } + #[test] fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); - assert!(test_proving(&trie_backend).extract_proof().is_empty()); + assert!(test_proving(&trie_backend, true).extract_proof().unwrap().is_empty()); + assert!(test_proving(&trie_backend, false).extract_proof().unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { let trie_backend = test_trie(); - let backend = test_proving(&trie_backend); + let backend = test_proving(&trie_backend, true); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().is_empty()); + assert!(!backend.extract_proof().unwrap().is_empty()); + let backend = test_proving(&trie_backend, false); + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + assert!(!backend.extract_proof().unwrap().is_empty()); } #[test] @@ -743,16 +822,20 @@ mod tests { #[test] fn passes_throgh_backend_calls() { - let trie_backend = test_trie(); - let proving_backend = test_proving(&trie_backend); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); - assert_eq!(trie_root, proving_root); - let mut trie_mdb = trie_mdb.remove(&ChildInfo::top_trie()).unwrap(); - assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); + let test = |flat| { + let trie_backend = test_trie(); + let proving_backend = test_proving(&trie_backend, flat); + assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); + let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + assert_eq!(trie_root, proving_root); + let mut trie_mdb = trie_mdb.remove(&ChildInfo::top_trie()).unwrap(); + assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); + }; + test(true); + test(false); } #[test] @@ -768,13 +851,17 @@ mod tests { assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + let test = |flat| { + let proving = ProvingBackend::new(trie, flat); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(); + let proof = proving.extract_proof().unwrap(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + }; + test(true); + test(false); } #[test] @@ -820,32 +907,49 @@ mod tests { vec![i] )); - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert!(proof_check.storage(&[0]).is_err()); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - // note that it is include in root because proof close - assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); - assert_eq!(proof_check.storage(&[64]).unwrap(), None); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], &child_info1, &[64]), Ok(Some(vec![64]))); - - let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert_eq!( - proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), - vec![64] - ); + let test = |flat| { + let proving = ProvingBackend::new(trie, flat); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = create_proof_check_backend::( + in_memory_root.into(), + proof + ).unwrap(); + assert!(proof_check.storage(&[0]).is_err()); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + // note that it is include in root because proof close + assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); + assert_eq!(proof_check.storage(&[64]).unwrap(), None); + + let proving = ProvingBackend::new(trie, flat); + assert_eq!(proving.child_storage(&own1[..], &child_info1, &[64]), Ok(Some(vec![64]))); + + let proof = proving.extract_proof().unwrap(); + if flat { + let proof_check = create_flat_proof_check_backend::( + in_memory_root.into(), + proof + ).unwrap(); + + assert_eq!( + proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), + vec![64] + ); + } else { + let proof_check = create_proof_check_backend::( + in_memory_root.into(), + proof + ).unwrap(); + + assert_eq!( + proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), + vec![64] + ); + } + }; + test(true); + test(false); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 6f9bd8b810c6a..4a62fb10b65db 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,11 +20,13 @@ use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildrenMap}; -use codec::{Codec, Decode}; +use codec::{Codec, Decode, Encode}; use crate::{ StorageKey, StorageValue, Backend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; +use std::sync::Arc; +use parking_lot::RwLock; /// Patricia trie-based backend. Transaction type is overlays of changes to commit /// for this trie and child tries. @@ -33,14 +35,44 @@ pub struct TrieBackend, H: Hasher> { // storing child_info of top trie even if it is in // theory a bit useless (no heap alloc on empty vec). top_trie: ChildInfo, + /// If defined, we store encoded visited roots for top_trie and child trie in this + /// map. It also act as a cache. + register_roots: Option>>>>, } impl, H: Hasher> TrieBackend where H::Out: Codec { /// Create new trie-based backend. + /// TODO check if still used pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), top_trie: ChildInfo::top_trie(), + register_roots: None, + } + } + + /// Activate storage of roots (can be use + /// to pack proofs and does small caching of child trie root)). + pub fn new_with_roots(storage: S, root: H::Out) -> Self { + TrieBackend { + essence: TrieBackendEssence::new(storage, root), + top_trie: ChildInfo::top_trie(), + register_roots: Some(Arc::new(RwLock::new(Default::default()))), + } + } + + /// Get registered roots + pub fn extract_registered_roots(&self) -> Option>> { + if let Some(register_roots) = self.register_roots.as_ref() { + let mut dest = ChildrenMap::default(); + dest.insert(ChildInfo::top_trie(), self.essence.root().encode()); + let read_lock = register_roots.read(); + for (child_info, root) in read_lock.iter() { + dest.insert(child_info.clone(), root.encode()); + } + Some(dest) + } else { + None } } @@ -88,7 +120,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key)? { + if let Some(essence) = self.child_essence(storage_key, child_info)? { essence.storage(child_info, key) } else { Ok(None) @@ -105,7 +137,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key)? { + if let Some(essence) = self.child_essence(storage_key, child_info)? { essence.next_storage_key(child_info, key) } else { Ok(None) @@ -126,7 +158,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key) { + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { essence.for_keys(child_info, f) } } @@ -138,7 +170,7 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key) { + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { essence.for_keys_with_prefix(child_info, prefix, f) } } @@ -267,9 +299,23 @@ impl, H: Hasher> TrieBackend where fn child_essence<'a>( &'a self, storage_key: &[u8], + child_info: &ChildInfo, ) -> Result>, >::Error> { + if let Some(cache) = self.register_roots.as_ref() { + if let Some(result) = cache.read().get(child_info) { + return Ok(result.map(|root| + TrieBackendEssence::new(self.essence.backend_storage(), root.clone()) + )); + } + } + let root: Option = self.storage(storage_key)? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + + if let Some(cache) = self.register_roots.as_ref() { + cache.write().insert(child_info.clone(), root.clone()); + } + Ok(if let Some(root) = root { Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) } else { From 09b9afd359795e7db017e67e7873225e92005b51 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 14 Feb 2020 21:29:44 +0100 Subject: [PATCH 045/185] propagate api change --- client/finality-grandpa/src/finality_proof.rs | 12 ++++++------ client/finality-grandpa/src/tests.rs | 6 ++++-- primitives/api/src/lib.rs | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 4bea09033ac58..91d470b07333b 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -835,8 +835,8 @@ pub(crate) mod tests { _ => unreachable!("no other authorities should be fetched: {:?}", block_id), }, |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::new(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::new(vec![vec![70]])), + BlockId::Number(5) => Ok(StorageProof::Flatten(vec![vec![50]])), + BlockId::Number(7) => Ok(StorageProof::Flatten(vec![vec![70]])), _ => unreachable!("no other authorities should be proved: {:?}", block_id), }, ), @@ -852,14 +852,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![50]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::new(vec![vec![70]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), }, ]); @@ -934,7 +934,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::new(vec![vec![42]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -984,7 +984,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![42]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index cf340c695451c..e2ce4adf3a28c 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -30,7 +30,9 @@ use sp_keyring::Ed25519Keyring; use sc_client::LongestChain; use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; -use sp_api::{ApiRef, ApiErrorExt, Core, RuntimeVersion, ApiExt, StorageProof, ProvideRuntimeApi}; +use sp_api::{ApiRef, ApiErrorExt, Core, RuntimeVersion, ApiExt, StorageProof, + StorageProofKind, ProvideRuntimeApi, +}; use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, @@ -330,7 +332,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"]) + let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flatten) .expect("failure proving read from in-memory storage backend"); Ok(proof) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 97f24de2d4a5b..f47f600e702dd 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -36,7 +36,7 @@ extern crate self as sp_api; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, + OverlayedChanges, StorageProof, StorageProofKind, Backend as StateBackend, ChangesTrieState, }; #[doc(hidden)] #[cfg(feature = "std")] From c04ba958be38023658f5ff02cadd3782bee3a24f Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Feb 2020 10:27:34 +0100 Subject: [PATCH 046/185] Fixing merge. --- client/network/src/protocol/light_client_handler.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f5be23c0d4d49..16daaeb506334 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -511,7 +511,11 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let proof = - if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + if let Some(info) = ChildInfo::resolve_child_info( + request.child_type, + &request.child_info[..], + &request.storage_key[..], + ) { match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { Ok(proof) => proof, Err(error) => { From ffaf9f597c963a95dc91430b92f458e091e67477 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Feb 2020 22:39:27 +0100 Subject: [PATCH 047/185] Switch back to using prefix on host function, remove child_id and child_info. --- client/api/src/light.rs | 5 - client/chain-spec/src/chain_spec.rs | 1 - client/db/src/bench.rs | 26 +-- client/db/src/lib.rs | 29 +-- client/db/src/storage_cache.rs | 23 +- client/network/src/chain.rs | 4 +- client/network/src/protocol.rs | 55 ++--- .../src/protocol/light_client_handler.rs | 44 +--- client/network/src/protocol/light_dispatch.rs | 11 +- client/network/src/protocol/message.rs | 5 - .../src/protocol/schema/light.v1.proto | 7 +- client/rpc-api/src/state/mod.rs | 8 - client/rpc/src/state/mod.rs | 32 +-- client/rpc/src/state/state_full.rs | 55 +++-- client/rpc/src/state/state_light.rs | 10 +- client/rpc/src/state/tests.rs | 20 +- client/src/client.rs | 12 +- client/src/in_mem.rs | 4 +- client/src/light/backend.rs | 23 +- client/src/light/fetcher.rs | 22 +- frame/contracts/src/account_db.rs | 11 +- frame/contracts/src/lib.rs | 32 +-- frame/contracts/src/rent.rs | 6 +- frame/contracts/src/tests.rs | 2 +- frame/support/src/storage/child.rs | 167 +++++++-------- primitives/externalities/src/lib.rs | 20 +- primitives/io/src/lib.rs | 202 ++++++++---------- primitives/state-machine/src/backend.rs | 39 ++-- primitives/state-machine/src/basic.rs | 79 ++++--- .../state-machine/src/changes_trie/build.rs | 6 +- primitives/state-machine/src/ext.rs | 122 +++++------ .../state-machine/src/in_memory_backend.rs | 71 +++--- primitives/state-machine/src/lib.rs | 44 ++-- .../state-machine/src/overlayed_changes.rs | 37 ++-- .../state-machine/src/proving_backend.rs | 53 ++--- primitives/state-machine/src/testing.rs | 4 +- primitives/state-machine/src/trie_backend.rs | 39 ++-- .../state-machine/src/trie_backend_essence.rs | 47 ++-- primitives/storage/src/lib.rs | 178 ++++++++------- primitives/trie/src/lib.rs | 6 - test-utils/runtime/client/src/lib.rs | 13 +- test-utils/runtime/src/lib.rs | 22 +- 42 files changed, 670 insertions(+), 926 deletions(-) diff --git a/client/api/src/light.rs b/client/api/src/light.rs index c0bebc1740a8a..2911d77f18209 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -82,11 +82,6 @@ pub struct RemoteReadChildRequest { pub header: Header, /// Storage key for child. pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index b47c41f107ccd..bf12d3e578a73 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -80,7 +80,6 @@ impl BuildStorage for ChainSpec { let child_info = ChildInfo::resolve_child_info( child_content.child_type, child_content.child_info.as_slice(), - storage_key.0.as_slice(), ).expect("chain spec contains correct content").to_owned(); ( storage_key.0, diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 9858a5c148bfa..4d80d77cb60c2 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -77,10 +77,9 @@ impl BenchmarkingState { }; state.reopen()?; - let child_delta = genesis.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, + let child_delta = genesis.children.into_iter().map(|(_storage_key, child_content)| ( + child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), - child_content.child_info )); let (root, transaction) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), @@ -141,11 +140,10 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -154,11 +152,10 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -167,11 +164,10 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -188,24 +184,22 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(storage_key, child_info, f) + state.for_keys_in_child_storage(child_info, f) } } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + state.for_child_keys_with_prefix(child_info, prefix, f) } } @@ -217,13 +211,12 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(storage_key, child_info, delta)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -236,11 +229,10 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } fn as_trie_backend(&mut self) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 38935928a3c0d..efbcb26ff8fd8 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -152,11 +152,10 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.child_storage(storage_key, child_info, key) + self.state.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -165,11 +164,10 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) + self.state.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -178,11 +176,10 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) + self.state.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -195,21 +192,19 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) + self.state.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.state.for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -221,14 +216,13 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, child_info, delta) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -241,11 +235,10 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) + self.state.child_keys(child_info, prefix) } fn as_trie_backend(&mut self) @@ -588,10 +581,10 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, - child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), - ); + let child_delta = storage.children.into_iter().map(|(_storage_key, child_content)|( + child_content.child_info, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + )); let mut changes_trie_config: Option = None; let (root, transaction) = self.old_state.full_storage_root( diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index fd85a899b628e..7f5dcecf41dae 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -539,11 +539,10 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - let key = (storage_key.to_vec(), key.to_vec()); + let key = (child_info.storage_key().to_vec(), key.to_vec()); let local_cache = self.cache.local_cache.upgradable_read(); if let Some(entry) = local_cache.child_storage.get(&key).cloned() { trace!("Found in local cache: {:?}", key); @@ -561,7 +560,7 @@ impl>, B: BlockT> StateBackend> for Ca } } trace!("Cache miss: {:?}", key); - let value = self.state.child_storage(storage_key, child_info, &key.1[..])?; + let value = self.state.child_storage(child_info, &key.1[..])?; // just pass it through the usage counter let value = self.usage.tally_child_key_read(&key, value, false); @@ -576,20 +575,18 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) + self.state.exists_child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) + self.state.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -598,11 +595,10 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) + self.state.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -615,12 +611,11 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.state.for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -632,14 +627,13 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, child_info, delta) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -652,11 +646,10 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) + self.state.child_keys(child_info, prefix) } fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index b991a0e65208c..e419323c99edd 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -56,7 +56,6 @@ pub trait Client: Send + Sync { fn read_child_proof( &self, block: &Block::Hash, - storage_key: &[u8], child_info: ChildInfo, keys: &[Vec], ) -> Result; @@ -138,12 +137,11 @@ impl Client for SubstrateClient where fn read_child_proof( &self, block: &Block::Hash, - storage_key: &[u8], child_info: ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) - .read_child_proof(&BlockId::Hash(block.clone()), storage_key, child_info, keys) + .read_child_proof(&BlockId::Hash(block.clone()), child_info, keys) } fn execution_proof( diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c1f3123440449..52914cca277e0 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::{listeners::ListenerId, Substream}, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::storage::{StorageKey, OwnedChildInfo}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -251,16 +251,12 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id: RequestId, block: ::Hash, storage_key: Vec, - child_info: Vec, - child_type: u32, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest(message::RemoteReadChildRequest { id, block, storage_key, - child_info, - child_type, keys, }); @@ -1571,41 +1567,24 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info( - request.child_type, - &request.child_info[..], - &request.storage_key[..], + let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); + let proof = match self.context_data.chain.read_child_proof( + &request.block, + child_info.as_ref(), + &request.keys, ) { - match self.context_data.chain.read_child_proof( - &request.block, - &request.storage_key, - child_info, - &request.keys, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + request.storage_key.to_hex::(), + keys_str(), + request.block, + error + ); + StorageProof::empty() } - } else { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - "invalid child info and type", - ); - - StorageProof::empty() }; self.send_message( &who, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 16daaeb506334..3480de1bb5700 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -48,7 +48,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, StorageKey}; +use sp_core::storage::{ChildInfo, OwnedChildInfo, StorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::traits::{Block, Header, NumberFor, Zero}; use std::{ @@ -510,36 +510,20 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = - if let Some(info) = ChildInfo::resolve_child_info( - request.child_type, - &request.child_info[..], - &request.storage_key[..], - ) { - match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - } - } else { + let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); + let proof = match self.chain.read_child_proof(&block, child_info.as_ref(), &request.keys) { + Ok(proof) => proof, + Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", request_id, peer, request.storage_key.to_hex::(), fmt_keys(request.keys.first(), request.keys.last()), request.block, - "invalid child info and type" - ); + error); StorageProof::empty() - }; + } + }; let response = { let r = api::v1::light::RemoteReadResponse { proof: proof.encode() }; @@ -936,8 +920,6 @@ fn serialise_request(id: u64, request: &Request) -> api::v1::light: let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), - child_type: request.child_type.clone(), - child_info: request.child_info.clone(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -1145,8 +1127,6 @@ mod tests { use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; use void::Void; - const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); - type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler>, Block>; type Swarm = libp2p::swarm::Swarm, Handler>; @@ -1640,15 +1620,12 @@ mod tests { #[test] fn receives_remote_read_child_response() { - let info = CHILD_INFO.info(); let mut chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, retry_count: None, }; issue_request(Request::ReadChild { request, sender: chan.0 }); @@ -1743,15 +1720,12 @@ mod tests { #[test] fn send_receive_read_child() { - let info = CHILD_INFO.info(); let chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), + storage_key: b"sub".to_vec(), keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, retry_count: None, }; send_receive(Request::ReadChild { request, sender: chan.0 }); diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index ba3a6d33fda70..a06368396f779 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -70,8 +70,6 @@ pub trait LightDispatchNetwork { id: RequestId, block: ::Hash, storage_key: Vec, - child_info: Vec, - child_type: u32, keys: Vec>, ); @@ -625,8 +623,6 @@ impl Request { self.id, data.block, data.storage_key.clone(), - data.child_info.clone(), - data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -682,7 +678,6 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; - use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -823,7 +818,7 @@ pub mod tests { fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: Vec, _: u32, _: Vec>) {} + _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1045,14 +1040,10 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - let child_info = ChildInfo::new_default(b"unique_id_1"); - let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), - child_info: child_info.to_vec(), - child_type, keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ef7d550de6cbe..d9e12c7596273 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -424,11 +424,6 @@ pub mod generic { pub block: H, /// Child Storage key. pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index b9aee67b5ee24..930d229b0bf7c 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -73,13 +73,8 @@ message RemoteReadChildRequest { bytes block = 2; // Child Storage key. bytes storage_key = 3; - // Child trie source information. - bytes child_info = 4; - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - uint32 child_type = 5; // Storage keys. - repeated bytes keys = 6; + repeated bytes keys = 4; } // Remote header request. diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index b2cf8ce909b20..48d363bb8921c 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -77,8 +77,6 @@ pub trait StateApi { fn child_storage_keys( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -88,8 +86,6 @@ pub trait StateApi { fn child_storage( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -99,8 +95,6 @@ pub trait StateApi { fn child_storage_hash( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -110,8 +104,6 @@ pub trait StateApi { fn child_storage_size( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 8f621cc8afc96..57a4b6cab897e 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -108,8 +108,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -118,8 +116,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -128,8 +124,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -138,11 +132,9 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) + Box::new(self.child_storage(block, child_storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -303,45 +295,37 @@ impl StateApi for State fn child_storage( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, child_info, child_type, key) + self.backend.child_storage(block, child_storage_key, key) } fn child_storage_keys( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) + self.backend.child_storage_keys(block, child_storage_key, key_prefix) } fn child_storage_hash( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) + self.backend.child_storage_hash(block, child_storage_key, key) } fn child_storage_size( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) + self.backend.child_storage_size(block, child_storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { @@ -390,9 +374,3 @@ impl StateApi for State fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } - -const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; - -fn child_resolution_error() -> sp_blockchain::Error { - sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string()) -} diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index caf7a5787e1c3..238c99fc9e67b 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -42,7 +42,7 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi}; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error}; +use super::{StateBackend, error::{FutureResult, Error, Result}, client_err}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -309,19 +309,18 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage_keys( - &BlockId::Hash(block), - &storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..], &storage_key.0[..]) - .ok_or_else(child_resolution_error)?, - &prefix, - )) + .and_then(|block| { + let child_info = OwnedChildInfo::new_default(storage_key.0); + self.client.child_storage_keys( + &BlockId::Hash(block), + child_info.as_ref(), + &prefix, + ) + }) .map_err(client_err))) } @@ -329,19 +328,18 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage( - &BlockId::Hash(block), - &storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..], &storage_key.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) + .and_then(|block| { + let child_info = OwnedChildInfo::new_default(storage_key.0); + self.client.child_storage( + &BlockId::Hash(block), + child_info.as_ref(), + &key, + ) + }) .map_err(client_err))) } @@ -349,19 +347,18 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage_hash( - &BlockId::Hash(block), - &storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..], &storage_key.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) + .and_then(|block| { + let child_info = OwnedChildInfo::new_default(storage_key.0); + self.client.child_storage_hash( + &BlockId::Hash(block), + child_info.as_ref(), + &key, + ) + }) .map_err(client_err))) } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 7b2455a8fce38..485950de97c00 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -250,8 +250,6 @@ impl StateBackend for LightState, _child_storage_key: StorageKey, - _child_info: StorageKey, - _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -261,8 +259,6 @@ impl StateBackend for LightState, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -273,8 +269,6 @@ impl StateBackend for LightState StateBackend for LightState, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, child_storage_key, child_info, child_type, key) + .child_storage(block, child_storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 39964f38f6f49..e78010b7648cb 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,26 +30,26 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); +const STORAGE_KEY: &[u8] = b"child"; +const CHILD_INFO: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:child" +); #[test] fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; - const STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"hello world !"; let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -67,7 +67,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, child_info, child_type, key, Some(genesis_hash).into()) + client.child_storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -77,8 +77,6 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) @@ -92,8 +90,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage( child_key.clone(), - child_info.clone(), - child_type, key.clone(), Some(genesis_hash).into(), ).wait(), @@ -102,8 +98,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_hash( child_key.clone(), - child_info.clone(), - child_type, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), @@ -112,8 +106,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_size( child_key.clone(), - child_info.clone(), - child_type, key.clone(), None, ).wait(), diff --git a/client/src/client.rs b/client/src/client.rs index d085b92025fdf..a40068609b564 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -334,12 +334,11 @@ impl Client where pub fn child_storage_keys( &self, id: &BlockId, - child_storage_key: &StorageKey, child_info: ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? - .child_keys(&child_storage_key.0, child_info, &key_prefix.0) + .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) .collect(); @@ -350,12 +349,11 @@ impl Client where pub fn child_storage( &self, id: &BlockId, - storage_key: &StorageKey, child_info: ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage(&storage_key.0, child_info, &key.0) + .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) } @@ -364,12 +362,11 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, - storage_key: &StorageKey, child_info: ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage_hash(&storage_key.0, child_info, &key.0) + .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? ) } @@ -406,7 +403,6 @@ impl Client where pub fn read_child_proof( &self, id: &BlockId, - storage_key: &[u8], child_info: ChildInfo, keys: I, ) -> sp_blockchain::Result where @@ -414,7 +410,7 @@ impl Client where I::Item: AsRef<[u8]>, { self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, child_info, keys) + .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index dcff8102aeb6d..3986c70116c01 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -516,8 +516,8 @@ impl backend::BlockImportOperation for BlockImportOperatio check_genesis_storage(&storage)?; let child_delta = storage.children.into_iter() - .map(|(storage_key, child_content)| - (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); + .map(|(_storage_key, child_content)| + (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); let (root, transaction) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index ad9f43587e4cd..e4e5d681813b9 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -312,17 +312,17 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + let mut storage: HashMap, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for let child_delta = input.children.iter() - .map(|(storage_key, storage_child)| (storage_key.clone(), None, storage_child.child_info.clone())) + .map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None)) .collect::>(); // make sure to persist the child storage - for (child_key, storage_child) in input.children { - storage.insert(Some((child_key, storage_child.child_info)), storage_child.data); + for (_child_key, storage_child) in input.children { + storage.insert(Some(storage_child.child_info), storage_child.data); } let storage_update = InMemoryBackend::from(storage); @@ -386,13 +386,12 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(storage_key, child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), + Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } @@ -407,13 +406,12 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { GenesisOrUnavailableState::Genesis(ref state) => Ok( - state.next_child_storage_key(storage_key, child_info, key) + state.next_child_storage_key(child_info, key) .expect(IN_MEMORY_EXPECT_PROOF) ), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), @@ -436,27 +434,25 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(storage_key, child_info, action), + state.for_keys_in_child_storage(child_info, action), GenesisOrUnavailableState::Unavailable => (), } } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(storage_key, child_info, prefix, action), + state.for_child_keys_with_prefix(child_info, prefix, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -474,7 +470,6 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -483,7 +478,7 @@ impl StateBackend for GenesisOrUnavailableState { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index e28b1832c29f6..cb0115409405e 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,6 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; +use sp_core::storage::OwnedChildInfo; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -240,10 +241,11 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { + let child_trie = OwnedChildInfo::new_default(request.storage_key.clone()); read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - &request.storage_key, + child_trie.as_ref(), request.keys.iter(), ).map_err(Into::into) } @@ -345,13 +347,11 @@ pub mod tests { use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; use sp_core::{blake2_256, Blake2Hasher, ChangesTrieConfiguration, H256}; - use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; + use sp_core::storage::{well_known_keys, StorageKey, OwnedChildInfo}; use sp_runtime::generic::BlockId; use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - type TestChecker = LightDataChecker< NativeExecutor, Blake2Hasher, @@ -400,11 +400,12 @@ pub mod tests { fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; + let child_info = OwnedChildInfo::new_default(b"child1".to_vec()); + let child_info = child_info.as_ref(); // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( - b"child1".to_vec(), - CHILD_INFO_1, + child_info, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -417,15 +418,13 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, - &StorageKey(b"child1".to_vec()), - CHILD_INFO_1, + child_info, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, - b"child1", - CHILD_INFO_1, + child_info, &[b"key1"], ).unwrap(); @@ -503,14 +502,11 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - let child_infos = CHILD_INFO_1.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b"child1".to_vec(), - child_info: child_infos.0.to_vec(), - child_type: child_infos.1, keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 5204f1003a6c5..cd9f595665b07 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(crate::trie_unique_id(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -175,13 +175,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -220,7 +220,6 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - &new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), ) { @@ -228,9 +227,9 @@ impl AccountDb for DirectAccountDb { } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(new_info.child_trie_unique_id(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index e88474b508437..ecb2107bbd650 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -121,9 +121,9 @@ use sp_runtime::{ }; use frame_support::dispatch::{DispatchResult, Dispatchable}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, storage::child, - parameter_types, IsSubType, - weights::DispatchInfo, + Parameter, decl_module, decl_event, decl_storage, decl_error, + parameter_types, IsSubType, weights::DispatchInfo, + storage::child::{self, ChildInfo, OwnedChildInfo}, }; use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; @@ -225,16 +225,14 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::ChildInfo { + pub fn child_trie_unique_id(&self) -> ChildInfo { trie_unique_id(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { - // Every new contract uses a new trie id and trie id results from - // hashing, so we can use child storage key (trie id) for child info. - child::ChildInfo::new_uid_parent_key(trie_id) +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> ChildInfo { + ChildInfo::default_unchecked(trie_id) } pub type TombstoneContractInfo = @@ -267,6 +265,10 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. + /// + /// Also, the implementation is responsible for ensuring that `TrieId` starts with + /// `:child_storage:`. + /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -290,8 +292,9 @@ where let mut buf = Vec::new(); buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - - T::Hashing::hash(&buf[..]).as_ref().to_vec() + let buf = T::Hashing::hash(&buf[..]); + // TODO: see https://github.com/paritytech/substrate/issues/2325 + OwnedChildInfo::new_default(buf.as_ref().to_vec()).owned_info().0 } } @@ -807,12 +810,10 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - &origin_contract.trie_id, origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( - &origin_contract.trie_id, origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -825,8 +826,8 @@ impl Module { let tombstone = >::new( // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. - &child::child_root( - &origin_contract.trie_id, + &child::root( + origin_contract.child_trie_unique_id(), )[..], code_hash, ); @@ -834,7 +835,6 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - &origin_contract.trie_id, origin_contract.child_trie_unique_id(), &blake2_256(key), &value, @@ -935,7 +935,7 @@ decl_storage! { impl OnReapAccount for Module { fn on_reap_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 49beebbf0c202..8b342f95b4350 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,7 +223,6 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - &alive_contract_info.trie_id, alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); @@ -235,7 +234,9 @@ fn enact_verdict( } // Note: this operation is heavy. - let child_storage_root = child::child_root(&alive_contract_info.trie_id); + let child_storage_root = child::root( + alive_contract_info.child_trie_unique_id(), + ); let tombstone = >::new( &child_storage_root[..], @@ -245,7 +246,6 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - &alive_contract_info.trie_id, alive_contract_info.child_trie_unique_id(), ); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index e9cd522f2efa8..650726165a80b 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -202,7 +202,7 @@ impl TrieIdGenerator for DummyTrieIdGenerator { let mut res = vec![]; res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); - res + child::OwnedChildInfo::new_default(res).owned_info().0 } } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index f549ffc25fd94..32e5bcf1dadf6 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -16,100 +16,90 @@ //! Operation on runtime child storages. //! -//! This module is a currently only a variant of unhashed with additional `storage_key`. -//! Note that `storage_key` must be unique and strong (strong in the sense of being long enough to -//! avoid collision from a resistant hash function (which unique implies)). -//! -//! A **key collision free** unique id is required as parameter to avoid key collision -//! between child tries. -//! This unique id management and generation responsability is delegated to pallet module. -// NOTE: could replace unhashed by having only one kind of storage (root being null storage key (storage_key can become Option<&[u8]>). +//! This module is a currently only a variant of unhashed with additional `child_info`. +// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info +// of null length parent storage key). use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::ChildInfo; +pub use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ).and_then(|v| { - Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { - // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); - None - }) - }) + match child_info.child_type() { + ChildType::ParentKeyId => { + let storage_key = child_info.storage_key(); + sp_io::default_child_storage::get( + storage_key, + key, + ).and_then(|v| { + Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { + // TODO #3700: error should be handleable. + runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); + None + }) + }) + }, + } } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> T { - get(storage_key, child_info, key).unwrap_or_else(Default::default) + get(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: T, ) -> T { - get(storage_key, child_info, key).unwrap_or(default_value) + get(child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: F, ) -> T { - get(storage_key, child_info, key).unwrap_or_else(default_value) + get(child_info, key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. pub fn put( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], value: &T, ) { - let (data, child_type) = child_info.info(); - value.using_encoded(|slice| - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - slice, - ) - ); + match child_info.child_type() { + ChildType::ParentKeyId => value.using_encoded(|slice| + sp_io::default_child_storage::set( + child_info.storage_key(), + key, + slice, + ) + ), + } } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { - let r = get(storage_key, child_info, key); + let r = get(child_info, key); if r.is_some() { - kill(storage_key, child_info, key); + kill(child_info, key); } r } @@ -117,113 +107,106 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> T { - take(storage_key, child_info, key).unwrap_or_else(Default::default) + take(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: T, ) -> T { - take(storage_key, child_info, key).unwrap_or(default_value) + take(child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: F, ) -> T { - take(storage_key, child_info, key).unwrap_or_else(default_value) + take(child_info, key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { - let (data, child_type) = child_info.info(); - sp_io::storage::child_read( - storage_key, data, child_type, - key, &mut [0;0][..], 0, - ).is_some() + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::read( + child_info.storage_key(), + key, &mut [0;0][..], 0, + ).is_some(), + } } /// Remove all `storage_key` key/values pub fn kill_storage( - storage_key: &[u8], child_info: ChildInfo, ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_storage_kill( - storage_key, - data, - child_type, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( + child_info.storage_key(), + ), + } } /// Ensure `key` has no explicit entry in storage. pub fn kill( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_clear( - storage_key, - data, - child_type, - key, - ); + match child_info.child_type() { + ChildType::ParentKeyId => { + sp_io::default_child_storage::clear( + child_info.storage_key(), + key, + ); + }, + } } /// Get a Vec of bytes from storage. pub fn get_raw( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::get( + child_info.storage_key(), + key, + ), + } } /// Put a raw byte slice into storage. pub fn put_raw( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], value: &[u8], ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - value, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::set( + child_info.storage_key(), + key, + value, + ), + } } /// Calculate current child root value. -pub fn child_root( - storage_key: &[u8], +pub fn root( + child_info: ChildInfo, ) -> Vec { - sp_io::storage::child_root( - storage_key, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::root( + child_info.storage_key(), + ), + } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 891557ab2c1d4..2bdc6600f8a01 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -47,7 +47,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -60,7 +59,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -77,7 +75,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -87,7 +84,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -100,12 +96,11 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - storage_key: Vec, child_info: ChildInfo, key: Vec, value: Vec, ) { - self.place_child_storage(storage_key, child_info, key, Some(value)) + self.place_child_storage(child_info, key, Some(value)) } /// Clear a storage entry (`key`) of current contract being called (effective immediately). @@ -116,11 +111,10 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) { - self.place_child_storage(storage_key.to_vec(), child_info, key.to_vec(), None) + self.place_child_storage(child_info, key.to_vec(), None) } /// Whether a storage entry exists. @@ -131,11 +125,10 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { - self.child_storage(storage_key, child_info, key).is_some() + self.child_storage(child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. @@ -144,13 +137,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: &[u8], child_info: ChildInfo); + fn kill_child_storage(&mut self, child_info: ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -158,7 +150,6 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ); @@ -169,7 +160,6 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, - storage_key: Vec, child_info: ChildInfo, key: Vec, value: Option>, @@ -192,7 +182,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - storage_key: &[u8], + child_info: ChildInfo, ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index d2ceea582051a..9ee9b76ac265f 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -76,29 +76,6 @@ pub trait Storage { self.storage(key).map(|s| s.to_vec()) } - /// All Child api uses : - /// - A `child_storage_key` to define the anchor point for the child proof - /// (commonly the location where the child root is stored in its parent trie). - /// - A `child_storage_types` to identify the kind of the child type and how its - /// `child definition` parameter is encoded. - /// - A `child_definition_parameter` which is the additional information required - /// to use the child trie. For instance defaults child tries requires this to - /// contain a collision free unique id. - /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. - fn child_get( - &self, - storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) - } - /// Get `key` from storage, placing the value into `value_out` and return the number of /// bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -114,6 +91,71 @@ pub trait Storage { }) } + /// Set `key` to `value` in the storage. + fn set(&mut self, key: &[u8], value: &[u8]) { + self.set_storage(key.to_vec(), value.to_vec()); + } + + /// Clear the storage of the given `key` and its value. + fn clear(&mut self, key: &[u8]) { + self.clear_storage(key) + } + + /// Check whether the given `key` exists in storage. + fn exists(&self, key: &[u8]) -> bool { + self.exists_storage(key) + } + + /// Clear the storage of each key-value pair where the key starts with the given `prefix`. + fn clear_prefix(&mut self, prefix: &[u8]) { + Externalities::clear_prefix(*self, prefix) + } + + /// "Commit" all existing operations and compute the resulting storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns the SCALE encoded hash. + fn root(&mut self) -> Vec { + self.storage_root() + } + + /// "Commit" all existing operations and get the resulting storage change root. + /// `parent_hash` is a SCALE encoded hash. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns an `Option` that holds the SCALE encoded hash. + fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { + self.storage_changes_root(parent_hash) + .expect("Invalid `parent_hash` given to `changes_root`.") + } + + /// Get the next key in storage after the given one in lexicographic order. + fn next_key(&mut self, key: &[u8]) -> Option> { + self.next_storage_key(&key) + } + +} + + +/// Interface for accessing the child storage for default child trie, +/// from within the runtime. +#[runtime_interface] +pub trait DefaultChildStorage { + /// `storage_key` is the full location of the root of the child trie in the parent trie. + /// + /// This function specifically returns the data for `key` in the child storage or `None` + /// if the key can not be found. + fn get( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Option> { + let child_info = ChildInfo::default_unchecked(storage_key); + self.child_storage(child_info, key).map(|s| s.to_vec()) + } + /// Get `key` from child storage, placing the value into `value_out` and return the number /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -121,18 +163,15 @@ pub trait Storage { /// are copied into `value_out`. /// /// See `child_get` for common child api parameters. - fn child_read( + fn read( &self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], value_out: &mut [u8], value_offset: u32, ) -> Option { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.child_storage(child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -142,108 +181,64 @@ pub trait Storage { }) } - /// Set `key` to `value` in the storage. - fn set(&mut self, key: &[u8], value: &[u8]) { - self.set_storage(key.to_vec(), value.to_vec()); - } - /// Set `key` to `value` in the child storage denoted by `storage_key`. /// /// See `child_get` for common child api parameters. - fn child_set( + fn set( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], value: &[u8], ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.set_child_storage(storage_key.to_vec(), child_info, key.to_vec(), value.to_vec()); - } - - /// Clear the storage of the given `key` and its value. - fn clear(&mut self, key: &[u8]) { - self.clear_storage(key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.set_child_storage(child_info, key.to_vec(), value.to_vec()); } /// Clear the given child storage of the given `key` and its value. /// /// See `child_get` for common child api parameters. - fn child_clear( + fn clear ( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.clear_child_storage(storage_key, child_info, key); + let child_info = ChildInfo::default_unchecked(storage_key); + self.clear_child_storage(child_info, key); } /// Clear an entire child storage. /// /// See `child_get` for common child api parameters. - fn child_storage_kill( + fn storage_kill( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.kill_child_storage(storage_key, child_info); - } - - /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { - self.exists_storage(key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.kill_child_storage(child_info); } /// Check whether the given `key` exists in storage. /// /// See `child_get` for common child api parameters. - fn child_exists( + fn exists( &self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], ) -> bool { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.exists_child_storage(storage_key, child_info, key) - } - - /// Clear the storage of each key-value pair where the key starts with the given `prefix`. - fn clear_prefix(&mut self, prefix: &[u8]) { - Externalities::clear_prefix(*self, prefix) + let child_info = ChildInfo::default_unchecked(storage_key); + self.exists_child_storage(child_info, key) } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. /// /// See `child_get` for common child api parameters. - fn child_clear_prefix( + fn clear_prefix( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, prefix: &[u8], ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.clear_child_prefix(storage_key, child_info, prefix); - } - - /// "Commit" all existing operations and compute the resulting storage root. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns the SCALE encoded hash. - fn root(&mut self) -> Vec { - self.storage_root() + let child_info = ChildInfo::default_unchecked(storage_key); + self.clear_child_prefix(child_info, prefix); } /// "Commit" all existing operations and compute the resulting child storage root. @@ -253,40 +248,22 @@ pub trait Storage { /// Returns the SCALE encoded hash. /// /// See `child_get` for common child api parameters. - fn child_root( + fn root( &mut self, storage_key: &[u8], ) -> Vec { - self.child_storage_root(storage_key) - } - - /// "Commit" all existing operations and get the resulting storage change root. - /// `parent_hash` is a SCALE encoded hash. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns an `Option` that holds the SCALE encoded hash. - fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { - self.storage_changes_root(parent_hash) - .expect("Invalid `parent_hash` given to `changes_root`.") - } - - /// Get the next key in storage after the given one in lexicographic order. - fn next_key(&mut self, key: &[u8]) -> Option> { - self.next_storage_key(&key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.child_storage_root(child_info) } /// Get the next key in storage after the given one in lexicographic order in child storage. - fn child_next_key( + fn next_key( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], ) -> Option> { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, child_info, key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.next_child_storage_key(child_info, key) } } @@ -917,6 +894,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error>; @@ -62,11 +61,10 @@ pub trait Backend: std::fmt::Debug { /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) + self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. @@ -77,11 +75,10 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - Ok(self.child_storage(storage_key, child_info, key)?.is_some()) + Ok(self.child_storage(child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -90,7 +87,6 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8] ) -> Result, Self::Error>; @@ -98,7 +94,6 @@ pub trait Backend: std::fmt::Debug { /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ); @@ -118,7 +113,6 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, @@ -137,7 +131,6 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -158,12 +151,11 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); - self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); + self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec())); all } @@ -183,16 +175,16 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first - for (mut storage_key, child_delta, child_info) in child_deltas { + for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); - child_info.as_ref().do_prefix_key(&mut storage_key, None); + self.child_storage_root(child_info.as_ref(), child_delta); + let storage_key = child_info.storage_key(); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); @@ -237,20 +229,18 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - (*self).child_storage(storage_key, child_info, key) + (*self).child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - (*self).for_keys_in_child_storage(storage_key, child_info, f) + (*self).for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -259,11 +249,10 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - (*self).next_child_storage_key(storage_key, child_info, key) + (*self).next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -272,12 +261,11 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - (*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f) + (*self).for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -290,7 +278,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -298,7 +285,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { I: IntoIterator)>, H::Out: Ord, { - (*self).child_storage_root(storage_key, child_info, delta) + (*self).child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -327,7 +314,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 344613242ccc9..e0be6e18fd567 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,38 +129,35 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, key: &[u8], ) -> Option { - self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() + let storage_key = child_info.storage_key(); + self.inner.children.get(storage_key).and_then(|child| child.data.get(key)).cloned() } fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { - self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) + self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn original_child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { - self.child_storage_hash(storage_key, child_info, key) + self.child_storage_hash(child_info, key) } fn original_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { - Externalities::child_storage(self, storage_key, child_info, key) + Externalities::child_storage(self, child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Option { @@ -170,10 +167,10 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, key: &[u8], ) -> Option { + let storage_key = child_info.storage_key(); let range = (Bound::Excluded(key), Bound::Unbounded); self.inner.children.get(storage_key.as_ref()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) @@ -193,11 +190,11 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { + let storage_key = child_info.storage_key().to_vec(); let child_map = self.inner.children.entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), @@ -212,10 +209,10 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, ) { - self.inner.children.remove(storage_key.as_ref()); + let storage_key = child_info.storage_key(); + self.inner.children.remove(storage_key); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -240,11 +237,11 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, prefix: &[u8], ) { - if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { + let storage_key = child_info.storage_key(); + if let Some(child) = self.inner.children.get_mut(storage_key) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) @@ -262,20 +259,18 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); let keys: Vec<_> = self.inner.children.iter().map(|(k, v)| { - let mut prefixed = k.to_vec(); - v.child_info.as_ref().do_prefix_key(&mut prefixed, None); - (k.to_vec(), prefixed) + (k.to_vec(), v.child_info.clone()) }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = default_child_trie_root::>(&[]); - for (storage_key, prefixed_storage_key) in keys { - let child_root = self.child_storage_root(storage_key.as_slice()); + let empty_hash = default_child_trie_root::>(); + for (storage_key, child_info) in keys { + let child_root = self.child_storage_root(child_info.as_ref()); if &empty_hash[..] == &child_root[..] { - top.remove(prefixed_storage_key.as_slice()); + top.remove(storage_key.as_slice()); } else { - top.insert(prefixed_storage_key, child_root); + top.insert(storage_key, child_root); } } @@ -284,15 +279,15 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - storage_key: &[u8], + child_info: ChildInfo, ) -> Vec { - if let Some(child) = self.inner.children.get(storage_key.as_ref()) { + if let Some(child) = self.inner.children.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 + .child_storage_root(child.child_info.as_ref(), delta).0 } else { - default_child_trie_root::>(&[]) + default_child_trie_root::>() }.encode() } @@ -316,7 +311,9 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:unique_id_1" + ); #[test] fn commit_should_work() { @@ -341,30 +338,26 @@ mod tests { #[test] fn children_works() { - let child_storage = b"test".to_vec(); - let mut ext = BasicExternalities::new(Storage { top: Default::default(), children: map![ - child_storage.clone() => StorageChild { + CHILD_INFO_1.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: CHILD_INFO_1.to_owned(), } ] }); - let child = &child_storage[..]; - - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child.to_vec(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child, CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(CHILD_INFO_1, b"dog"); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), None); - ext.kill_child_storage(child, CHILD_INFO_1); - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(CHILD_INFO_1); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c731d4104b260..d3dadebf8d977 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -158,7 +158,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(sk, child_info.as_ref(), k) + if !backend.exists_child_storage(child_info.as_ref(), k) .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -351,8 +351,8 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_2"); fn prepare_for_build(zero: u64) -> ( InMemoryBackend, diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d073846b5b8c3..aa2a7d5fa2ea4 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -205,22 +205,21 @@ where fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(storage_key.as_ref(), key) + .child_storage(child_info.storage_key(), key) .map(|x| x.map(|x| x.to_vec())) .unwrap_or_else(|| - self.backend.child_storage(storage_key.as_ref(), child_info, key) + self.backend.child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result.as_ref().map(HexDisplay::from) ); @@ -230,22 +229,21 @@ where fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(storage_key.as_ref(), key) + .child_storage(child_info.storage_key(), key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| - self.backend.child_storage_hash(storage_key.as_ref(), child_info, key) + self.backend.child_storage_hash(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -255,18 +253,17 @@ where fn original_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend - .child_storage(storage_key.as_ref(), child_info, key) + .child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); trace!(target: "state-trace", "{:04x}: ChildOriginal({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result.as_ref().map(HexDisplay::from), ); @@ -276,18 +273,17 @@ where fn original_child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend - .child_storage_hash(storage_key.as_ref(), child_info, key) + .child_storage_hash(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); trace!(target: "state-trace", "{}: ChildHashOriginal({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -312,22 +308,21 @@ where fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = match self.overlay.child_storage(storage_key.as_ref(), key) { + let result = match self.overlay.child_storage(child_info.storage_key(), key) { Some(x) => x.is_some(), _ => self.backend - .exists_child_storage(storage_key.as_ref(), child_info, key) + .exists_child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL), }; trace!(target: "state-trace", "{:04x}: ChildExists({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -351,15 +346,14 @@ where fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend - .next_child_storage_key(storage_key.as_ref(), child_info, key) + .next_child_storage_key(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); let next_overlay_key_change = self.overlay.next_child_storage_key_change( - storage_key.as_ref(), + child_info.storage_key(), key ); @@ -370,7 +364,6 @@ where Some(overlay_key.0.to_vec()) } else { self.next_child_storage_key( - storage_key, child_info, &overlay_key.0[..], ) @@ -396,38 +389,36 @@ where fn place_child_storage( &mut self, - storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.set_child_storage(storage_key, child_info, key, value); + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_storage(storage_key.as_ref(), child_info); - self.backend.for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); + self.overlay.clear_child_storage(child_info); + self.backend.for_keys_in_child_storage(child_info, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); }); } @@ -451,21 +442,20 @@ where fn clear_child_prefix( &mut self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&prefix), ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_prefix(storage_key.as_ref(), child_info, prefix); - self.backend.for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); + self.overlay.clear_child_prefix(child_info, prefix); + self.backend.for_child_keys_with_prefix(child_info, prefix, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); }); } @@ -490,24 +480,24 @@ where fn child_storage_root( &mut self, - storage_key: &[u8], + child_info: ChildInfo, ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); + let storage_key = child_info.storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { let root = self - .storage(storage_key.as_ref()) + .storage(storage_key) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(&[]) + default_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&storage_key), HexDisplay::from(&root.as_ref()), ); root.encode() } else { - let storage_key = storage_key.as_ref(); if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { let (root, is_empty, _) = { @@ -520,7 +510,7 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) + self.backend.child_storage_root(child_info.as_ref(), delta) }; let root = root.encode(); @@ -547,7 +537,7 @@ where .storage(storage_key.as_ref()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(&[]) + default_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, @@ -633,9 +623,9 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_KEY_1: &[u8] = b"Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:Child1" + ); fn prepare_overlay_with_changes() -> OverlayedChanges { @@ -750,12 +740,12 @@ mod tests { fn next_child_storage_key_works() { let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_KEY_1.to_vec() => StorageChild { + CHILD_INFO_1.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -770,35 +760,35 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(CHILD_INFO_1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_KEY_1.to_vec() => StorageChild { + CHILD_INFO_1.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -811,24 +801,24 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[10]), + ext.child_storage_hash(CHILD_INFO_1, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(CHILD_INFO_1, &[20]), None); + assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[20]), + ext.child_storage_hash(CHILD_INFO_1, &[20]), None, ); - assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(CHILD_INFO_1, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[30]), + ext.child_storage_hash(CHILD_INFO_1, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 02fd61de9c603..1a977e1d14076 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,7 +27,7 @@ use sp_trie::{ MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; +use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -47,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -88,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -107,10 +107,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -121,8 +121,8 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); + let mut inner: HashMap, BTreeMap> + = inners.children.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { inner, @@ -144,12 +144,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, + inner: Vec<(Option, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -165,9 +165,9 @@ impl From, StorageCollectio impl InMemory { /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { + pub fn child_storage_keys(&self) -> impl Iterator { self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) + item.0.as_ref().map(|v| v.as_ref()) ) } } @@ -175,7 +175,7 @@ impl InMemory { impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -186,11 +186,10 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + Ok(self.inner.get(&Some(child_info.to_owned())) .and_then(|map| map.get(key).map(Clone::clone))) } @@ -210,22 +209,20 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, mut f: F, ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .map(|map| map.keys().for_each(|k| f(&k))); } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } @@ -252,7 +249,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -260,9 +256,8 @@ impl Backend for InMemory where H::Out: Codec { I: IntoIterator, Option>)>, H::Out: Ord { - let storage_key = storage_key.to_vec(); - let parent_prefix = child_info.parent_prefix(None); - let child_info = Some((storage_key.clone(), child_info.to_owned())); + let child_type = child_info.child_type(); + let child_info = Some(child_info.to_owned()); let existing_pairs = self.inner.get(&child_info) .into_iter() @@ -270,7 +265,6 @@ impl Backend for InMemory where H::Out: Codec { let transaction: Vec<_> = delta.into_iter().collect(); let root = child_trie_root::, _, _, _>( - &storage_key, existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() @@ -279,7 +273,9 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); - let is_default = root == default_child_trie_root::>(parent_prefix); + let is_default = match child_type { + ChildType::ParentKeyId => root == default_child_trie_root::>(), + }; (root, is_default, vec![(child_info, full_transaction)]) } @@ -294,12 +290,11 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + let next_key = self.inner.get(&Some(child_info.to_owned())) .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); Ok(next_key) @@ -321,11 +316,10 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() @@ -336,11 +330,8 @@ impl Backend for InMemory where H::Out: Codec { let mut new_child_roots = Vec::new(); let mut root_map = None; for (child_info, map) in &self.inner { - if let Some((storage_key, child_info)) = child_info.as_ref() { - let mut prefix_storage_key = storage_key.to_vec(); - child_info.as_ref().do_prefix_key(&mut prefix_storage_key, None); - // no need to use child_info at this point because we use a MemoryDB for - // proof (with PrefixedMemoryDB it would be needed). + if let Some(child_info) = child_info.as_ref() { + let prefix_storage_key = child_info.as_ref().storage_key().to_vec(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; new_child_roots.push((prefix_storage_key, ch.as_ref().into())); } else { @@ -370,18 +361,18 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); + let child_info = OwnedChildInfo::new_default(b"1".to_vec()); let mut storage = storage.update( vec![( - Some((b"1".to_vec(), child_info.clone())), + Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(child_info.as_ref(), b"2").unwrap(), Some(b"3".to_vec())); - let mut prefixed_storage_key = b"1".to_vec(); - child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); - assert!(trie_backend.storage(prefixed_storage_key.as_slice()).unwrap().is_some()); + let child_info = child_info.as_ref(); + let storage_key = child_info.storage_key(); + assert!(trie_backend.storage(storage_key).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8bafda6aa6186..8f63aa0da8e40 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -550,7 +550,6 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, - storage_key: &[u8], child_info: ChildInfo, keys: I, ) -> Result> @@ -563,7 +562,7 @@ where { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) + prove_child_read_on_trie_backend(trie_backend, child_info, keys) } /// Generate storage read proof on pre-created trie backend. @@ -590,7 +589,6 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, - storage_key: &[u8], child_info: ChildInfo, keys: I, ) -> Result> @@ -604,7 +602,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(storage_key, child_info.clone(), key.as_ref()) + .child_storage(child_info.clone(), key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -635,7 +633,7 @@ where pub fn read_child_proof_check( root: H::Out, proof: StorageProof, - storage_key: &[u8], + child_info: ChildInfo, keys: I, ) -> Result, Option>>, Box> where @@ -649,7 +647,7 @@ where for key in keys.into_iter() { let value = read_child_proof_check_on_proving_backend( &proving_backend, - storage_key, + child_info, key.as_ref(), )?; result.insert(key.as_ref().to_vec(), value); @@ -672,15 +670,14 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( proving_backend: &TrieBackend, H>, - storage_key: &[u8], + child_info: ChildInfo, key: &[u8], ) -> Result>, Box> where H: Hasher, H::Out: Ord + Codec, { - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) + proving_backend.child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } @@ -702,7 +699,9 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub1" + ); impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -945,26 +944,22 @@ mod tests { ); ext.set_child_storage( - b"testchild".to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - b"testchild", CHILD_INFO_1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - b"testchild", CHILD_INFO_1, ); assert_eq!( ext.child_storage( - b"testchild", CHILD_INFO_1, b"abc" ), @@ -1000,20 +995,19 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - b"sub1", CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - b"sub1", + CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - b"sub1", + CHILD_INFO_1, &[b"value2"], ).unwrap(); assert_eq!( @@ -1028,13 +1022,17 @@ mod tests { #[test] fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub_test1" + ); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub_test2" + ); + use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let subtrie1 = b"sub_test1"; - let subtrie2 = b"sub_test2"; let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); @@ -1045,8 +1043,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(subtrie1.to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2.to_vec(), CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 37187e163fe1c..7dcbbdd2a0e40 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -247,12 +247,12 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, - storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, val: Option, ) { let extrinsic_index = self.extrinsic_index(); + let storage_key = child_info.storage_key().to_vec(); let map_entry = self.prospective.children.entry(storage_key) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); @@ -275,10 +275,10 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); + let storage_key = child_info.storage_key(); let map_entry = self.prospective.children.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); @@ -349,11 +349,11 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); + let storage_key = child_info.storage_key(); let map_entry = self.prospective.children.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); @@ -538,7 +538,8 @@ impl OverlayedChanges { .chain(self.committed.children.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( - storage_key.clone(), + self.child_info(storage_key).cloned() + .expect("child info initialized in either committed or prospective"), self.committed.children.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) @@ -547,8 +548,6 @@ impl OverlayedChanges { .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), - self.child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), ) ); @@ -852,38 +851,40 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = OwnedChildInfo::new_default(child.clone()); + let child_info = child_info.as_ref(); + let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), child_info, vec![30], None); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed - let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); + let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); assert_eq!(next_to_5.1.value, Some(vec![10])); // next_committed < next_prospective - let next_to_10 = overlay.next_child_storage_key_change(&child, &[10]).unwrap(); + let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); assert_eq!(next_to_10.1.value, Some(vec![20])); // next_committed == next_prospective - let next_to_20 = overlay.next_child_storage_key_change(&child, &[20]).unwrap(); + let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); assert_eq!(next_to_20.1.value, None); // next_committed, no next_prospective - let next_to_30 = overlay.next_child_storage_key_change(&child, &[30]).unwrap(); + let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); - overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed - let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); + let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); assert_eq!(next_to_40.1.value, Some(vec![50])); } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0572907401ba6..ec0ef6a4692ee 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -143,15 +143,13 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8] ) -> Result>, String> { - let mut prefixed_storage_key = storage_key.to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - let root = self.storage(prefixed_storage_key.as_slice())? + let storage_key = child_info.storage_key(); + let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>(&[])); + .unwrap_or(default_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( @@ -162,7 +160,6 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); read_child_trie_value_with::, _, _>( - storage_key, child_info.keyspace(), &eph, &root.as_ref(), @@ -279,20 +276,18 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.child_storage(storage_key, child_info, key) + self.0.child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(storage_key, child_info, f) + self.0.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -301,11 +296,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.next_child_storage_key(storage_key, child_info, key) + self.0.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -318,12 +312,11 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.0.for_child_keys_with_prefix( child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -336,11 +329,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.0.child_keys(storage_key, child_info, prefix) + self.0.child_keys(child_info, prefix) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -351,7 +343,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -359,7 +350,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(storage_key, child_info, delta) + self.0.child_storage_root(child_info, delta) } } @@ -404,8 +395,12 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub1" + ); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub2" + ); fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, @@ -474,33 +469,29 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let subtrie1 = b"sub1"; - let subtrie2 = b"sub2"; - let own1 = subtrie1.to_vec(); - let own2 = subtrie2.to_vec(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), CHILD_INFO_1.to_owned())), + (Some(CHILD_INFO_1.to_owned()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), CHILD_INFO_2.to_owned())), + (Some(CHILD_INFO_2.to_owned()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) + in_memory.child_storage_keys().map(|k|(k.to_owned(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(CHILD_INFO_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(CHILD_INFO_2, &[i]).unwrap().unwrap(), vec![i] )); @@ -528,7 +519,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -536,7 +527,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(CHILD_INFO_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 39a34509b720b..6ff6d42aba3f8 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -128,9 +128,9 @@ impl TestExternalities self.overlay.committed.children.clone().into_iter() .chain(self.overlay.prospective.children.clone().into_iter()) - .for_each(|(keyspace, (map, child_info))| { + .for_each(|(_storage_key, (map, child_info))| { transaction.push(( - Some((keyspace, child_info)), + Some(child_info), map.into_iter() .map(|(k, v)| (k, v.value)) .collect::>(), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 0df13a8fff137..29a31be210c77 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,7 +20,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildType}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -80,11 +80,10 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.child_storage(storage_key, child_info, key) + self.essence.child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -93,11 +92,10 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.next_child_storage_key(storage_key, child_info, key) + self.essence.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -110,21 +108,19 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(storage_key, child_info, f) + self.essence.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.essence.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -194,7 +190,6 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -202,12 +197,13 @@ impl, H: Hasher> Backend for TrieBackend where I: IntoIterator)>, H::Out: Ord, { - let default_root = default_child_trie_root::>(child_info.parent_prefix(None)); + let default_root = match child_info.child_type() { + ChildType::ParentKeyId => default_child_trie_root::>() + }; let mut write_overlay = S::Overlay::default(); - let mut prefixed_storage_key = storage_key.to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - let mut root = match self.storage(prefixed_storage_key.as_slice()) { + let storage_key = child_info.storage_key(); + let mut root = match self.storage(storage_key) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -223,7 +219,6 @@ impl, H: Hasher> Backend for TrieBackend where ); match child_delta_trie_root::, _, _, _, _, _>( - storage_key, child_info.keyspace(), &mut eph, root, @@ -252,15 +247,15 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - const CHILD_KEY_1: &[u8] = b"sub1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub1" + ); fn test_db() -> (PrefixedMemoryDB, H256) { let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); + let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_INFO_1.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -270,9 +265,7 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - let mut prefixed_storage_key = CHILD_KEY_1.to_vec(); - CHILD_INFO_1.do_prefix_key(&mut prefixed_storage_key, None); - trie.insert(prefixed_storage_key.as_slice(), &sub_root[..]).expect("insert failed"); + trie.insert(CHILD_INFO_1.storage_key(), &sub_root[..]).expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); @@ -298,7 +291,7 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(CHILD_INFO_1, b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 980bf13ad53cb..9a8ad14445c5f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -72,21 +72,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Access the root of the child storage in its parent trie - fn child_root(&self, storage_key: &[u8], child_info: ChildInfo) -> Result, String> { - let mut prefixed_storage_key = storage_key.to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - self.storage(prefixed_storage_key.as_slice()) + fn child_root(&self, child_info: ChildInfo) -> Result, String> { + self.storage(child_info.storage_key()) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let child_root = match self.child_root(storage_key, child_info)? { + let child_root = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), }; @@ -94,7 +91,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let mut hash = H::Out::default(); if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", storage_key)); + return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); } // note: child_root and hash must be same size, panics otherwise. hash.as_mut().copy_from_slice(&child_root[..]); @@ -168,12 +165,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of child storage at given key. pub fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let root = self.child_root(storage_key, child_info)? - .unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()); + let root = self.child_root(child_info)? + .unwrap_or(default_child_trie_root::>().encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -183,19 +179,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) + read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) .map_err(map_e) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. pub fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - let root = match self.child_root(storage_key, child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), + let root = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -209,7 +204,6 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - storage_key, child_info.keyspace(), &eph, &root, @@ -222,13 +216,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Execute given closure for all keys starting with prefix. pub fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], mut f: F, ) { - let root_vec = match self.child_root(storage_key, child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), + let root_vec = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -443,7 +436,9 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::default_unchecked( + b":child_storage:default:MyChild" + ); // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -467,9 +462,7 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - let mut prefixed_storage_key = b"MyChild".to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - trie.insert(prefixed_storage_key.as_slice(), root_1.as_ref()) + trie.insert(child_info.storage_key(), root_1.as_ref()) .expect("insert failed"); }; @@ -485,19 +478,19 @@ mod test { let essence_2 = TrieBackendEssence::new(mdb, root_2); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) + essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) + essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) + essence_2.next_child_storage_key(child_info, b"6"), Ok(None) ); } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index ea4dd56a1e7a9..df0b9a932af10 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -62,7 +62,7 @@ pub struct StorageChild { pub struct Storage { /// Top trie storage data. pub top: StorageMap, - /// Children trie storage data by storage key. + /// Children trie storage data. /// Note that the key is not including child prefix, this will /// not be possible if a different kind of trie than `default` /// get in use. @@ -133,7 +133,6 @@ pub mod well_known_keys { /// Information related to a child state. pub enum ChildInfo<'a> { ParentKeyId(ChildTrie<'a>), - Default(ChildTrie<'a>), } /// Owned version of `ChildInfo`. @@ -142,31 +141,46 @@ pub enum ChildInfo<'a> { #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum OwnedChildInfo { ParentKeyId(OwnedChildTrie), - Default(OwnedChildTrie), } impl<'a> ChildInfo<'a> { /// Instantiates information for a default child trie. - pub const fn new_uid_parent_key(storage_key: &'a[u8]) -> Self { + /// This is a rather unsafe method and requires to be + /// use from a valid payload such as: + /// ``` + /// use sp_storage::{ChildInfo, ChildType, OwnedChildInfo}; + /// + /// let info1 = ChildInfo::default_unchecked( + /// b":child_storage:default:stor_key", + /// ); + /// let info2 = OwnedChildInfo::new_default( + /// b"stor_key".to_vec(), + /// ); + /// + /// assert!(info1.info() == info2.as_ref().info()); + /// ``` + pub const fn default_unchecked(encoded: &'a[u8]) -> Self { ChildInfo::ParentKeyId(ChildTrie { - data: storage_key, + data: encoded, }) } - /// Instantiates information for a default child trie. - pub const fn new_default(unique_id: &'a[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id, - }) + /// Create child info from a linear byte packed value and a given type. + pub fn resolve_child_info(child_type: u32, info: &'a [u8]) -> Option { + match child_type { + x if x == ChildType::ParentKeyId as u32 => { + debug_assert!( + info.starts_with(ChildType::ParentKeyId.parent_prefix()) + ); + Some(Self::default_unchecked(info)) + }, + _ => None, + } } /// Instantiates a owned version of this child info. pub fn to_owned(&self) -> OwnedChildInfo { match self { - ChildInfo::Default(ChildTrie { data }) - => OwnedChildInfo::Default(OwnedChildTrie { - data: data.to_vec(), - }), ChildInfo::ParentKeyId(ChildTrie { data }) => OwnedChildInfo::ParentKeyId(OwnedChildTrie { data: data.to_vec(), @@ -174,21 +188,6 @@ impl<'a> ChildInfo<'a> { } } - /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8], storage_key: &'a[u8]) -> Option { - match child_type { - x if x == ChildType::ParentKeyId as u32 => { - if !data.len() == 0 { - // do not allow anything for additional data. - return None; - } - Some(ChildInfo::new_uid_parent_key(storage_key)) - }, - x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), - _ => None, - } - } - /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { @@ -196,9 +195,6 @@ impl<'a> ChildInfo<'a> { ChildInfo::ParentKeyId(ChildTrie { data, }) => (data, ChildType::ParentKeyId as u32), - ChildInfo::Default(ChildTrie { - data, - }) => (data, ChildType::CryptoUniqueId as u32), } } @@ -206,33 +202,43 @@ impl<'a> ChildInfo<'a> { /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { + match self { + ChildInfo::ParentKeyId(..) => self.unprefixed_storage_key(), + } + } + + /// Return a reference to the full location in the direct parent of + /// this trie. + /// If the trie got no parent this returns the empty slice, + /// so by nature an empty slice is not a valid parent location. + /// This does not include child type related prefix. + pub fn storage_key(&self) -> &[u8] { match self { ChildInfo::ParentKeyId(ChildTrie { data, }) => &data[..], - ChildInfo::Default(ChildTrie { - data, - }) => &data[..], } } - /// Return the location reserved for this child trie in their parent trie if there - /// is one. - pub fn parent_prefix(&self, _parent: Option<&'a ChildInfo>) -> &'a [u8] { + /// Return a reference to the location in the direct parent of + /// this trie. + /// The static part of the storage key is omitted. + pub fn unprefixed_storage_key(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(..) - | ChildInfo::Default(..) => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => if data.len() != 0 { + &data[ChildType::ParentKeyId.parent_prefix().len()..] + } else { + &[] + }, } } - /// Change a key to get prefixed with the parent prefix. - pub fn do_prefix_key(&self, key: &mut Vec, parent: Option<&ChildInfo>) { - let parent_prefix = self.parent_prefix(parent); - let key_len = key.len(); - if parent_prefix.len() > 0 { - key.resize(key_len + parent_prefix.len(), 0); - key.copy_within(..key_len, parent_prefix.len()); - key[..parent_prefix.len()].copy_from_slice(parent_prefix); + /// Return the type for this child info. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, } } } @@ -244,17 +250,37 @@ impl<'a> ChildInfo<'a> { pub enum ChildType { /// If runtime module ensures that the child key is a unique id that will /// only be used once, this parent key is used as a child trie unique id. - ParentKeyId = 0, - /// Default, this uses a cryptographic strong unique id as input, this id - /// is used as a unique child trie identifier. - CryptoUniqueId = 1, + ParentKeyId = 1, +} + +impl ChildType { + /// Change a key to get prefixed with the parent prefix. + /// TODO try to make this method non public + pub fn do_prefix_key(&self, key: &mut Vec) { + let parent_prefix = self.parent_prefix(); + let key_len = key.len(); + if parent_prefix.len() > 0 { + key.resize(key_len + parent_prefix.len(), 0); + key.copy_within(..key_len, parent_prefix.len()); + key[..parent_prefix.len()].copy_from_slice(parent_prefix); + } + } + + /// Return the location reserved for this child trie in their parent trie if there + /// is one. + fn parent_prefix(&self) -> &'static [u8] { + match self { + &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + } + } } impl OwnedChildInfo { - /// Instantiates info for a default child trie. - pub fn new_default(unique_id: Vec) -> Self { - OwnedChildInfo::Default(OwnedChildTrie { - data: unique_id, + /// Instantiates info for a default child trie with a default parent. + pub fn new_default(mut storage_key: Vec) -> Self { + ChildType::ParentKeyId.do_prefix_key(&mut storage_key); + OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data: storage_key, }) } @@ -262,18 +288,32 @@ impl OwnedChildInfo { /// are not compatible. pub fn try_update(&mut self, other: ChildInfo) -> bool { match self { - OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), OwnedChildInfo::ParentKeyId(owned_child_trie) => owned_child_trie.try_update(other), } } + /// Owned variant of `info`. + pub fn owned_info(self) -> (Vec, u32) { + match self { + OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data, + }) => (data, ChildType::ParentKeyId as u32), + } + } + + /// Return a reference to the full location in the direct parent of + /// this trie. + pub fn storage_key(self) -> Vec { + match self { + OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data, + }) => data, + } + } + /// Get `ChildInfo` reference to this owned child info. pub fn as_ref(&self) -> ChildInfo { match self { - OwnedChildInfo::Default(OwnedChildTrie { data }) - => ChildInfo::Default(ChildTrie { - data: data.as_slice(), - }), OwnedChildInfo::ParentKeyId(OwnedChildTrie { data }) => ChildInfo::ParentKeyId(ChildTrie { data: data.as_slice(), @@ -309,7 +349,6 @@ impl OwnedChildTrie { /// are not compatible. fn try_update(&mut self, other: ChildInfo) -> bool { match other { - ChildInfo::Default(other) => self.data[..] == other.data[..], ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } } @@ -319,17 +358,8 @@ const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default #[test] fn assert_default_trie_in_child_trie() { - let child_info = ChildInfo::new_default(b"any key"); - let prefix = child_info.parent_prefix(None); + let child_info = OwnedChildInfo::new_default(b"any key".to_vec()); + let child_info = child_info.as_ref(); + let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); } - -#[test] -fn test_do_prefix() { - let child_info = ChildInfo::new_default(b"any key"); - let mut prefixed_1 = b"key".to_vec(); - child_info.do_prefix_key(&mut prefixed_1, None); - let mut prefixed_2 = DEFAULT_CHILD_TYPE_PARENT_PREFIX.to_vec(); - prefixed_2.extend_from_slice(b"key"); - assert_eq!(prefixed_1, prefixed_2); -} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 0cf268856bb45..b037a27b7b47a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -212,7 +212,6 @@ pub fn read_trie_value_with< /// Determine the default child trie root. pub fn default_child_trie_root( - _storage_key: &[u8], ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } @@ -220,7 +219,6 @@ pub fn default_child_trie_root( /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_trie_root( - _storage_key: &[u8], input: I, ) -> ::Out where @@ -234,7 +232,6 @@ pub fn child_trie_root( /// Determine a child trie root given a hash DB and delta values. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( - _storage_key: &[u8], keyspace: &[u8], db: &mut DB, root_data: RD, @@ -269,7 +266,6 @@ pub fn child_delta_trie_root( /// Call `f` for all keys in a child trie. pub fn for_keys_in_child_trie( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -320,7 +316,6 @@ pub fn record_all_keys( /// Read a value from the child trie. pub fn read_child_trie_value( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -340,7 +335,6 @@ pub fn read_child_trie_value( /// Read a value from the child trie with given query. pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 2cb08db6ff472..2c6967ff2e0f5 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -123,13 +123,13 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let mut storage = self.genesis_config().genesis_map(); - let child_roots = storage.children.iter().map(|(sk, child_content)| { + let child_roots = storage.children.iter().map(|(_sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - let mut prefixed_storage_key = sk.clone(); - child_content.child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); - (prefixed_storage_key, state_root.encode()) + let child_info = child_content.child_info.as_ref(); + let storage_key = child_info.storage_key().to_vec(); + (storage_key, state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() @@ -194,14 +194,13 @@ pub trait TestClientBuilderExt: Sized { /// # Panics /// /// Panics if the key is empty. - fn add_extra_child_storage>, K: Into>, V: Into>>( + fn add_extra_child_storage>, V: Into>>( mut self, - storage_key: SK, child_info: ChildInfo, key: K, value: V, ) -> Self { - let storage_key = storage_key.into(); + let storage_key = child_info.storage_key().to_vec(); let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 944932052fb32..0c3459bbb7f18 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -49,7 +49,6 @@ use sp_version::NativeVersion; use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; -use sp_core::storage::ChildType; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::AuthorityId; @@ -873,22 +872,17 @@ fn test_read_storage() { } fn test_read_child_storage() { - const CHILD_KEY: &[u8] = b"read_child_storage"; - const UNIQUE_ID: &[u8] = b":unique_id"; + const STORAGE_KEY: &[u8] = b":child_storage:default:unique_id_1"; const KEY: &[u8] = b":read_child_storage"; - sp_io::storage::child_set( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + sp_io::default_child_storage::set( + STORAGE_KEY, KEY, b"test", ); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + let r = sp_io::default_child_storage::read( + STORAGE_KEY, KEY, &mut v, 0, @@ -897,10 +891,8 @@ fn test_read_child_storage() { assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + let r = sp_io::default_child_storage::read( + STORAGE_KEY, KEY, &mut v, 8, From 87bd97c24d9cf8b384ff14fa52df1f779ee8d033 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Feb 2020 23:17:48 +0100 Subject: [PATCH 048/185] fix polka ref issue. --- primitives/state-machine/src/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index e0be6e18fd567..4f7d7bfb43e16 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -172,7 +172,7 @@ impl Externalities for BasicExternalities { ) -> Option { let storage_key = child_info.storage_key(); let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(storage_key.as_ref()) + self.inner.children.get(storage_key) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } From 17cf6130ee8933290bd24c37b4ec3a8e3958c50f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 Feb 2020 12:40:18 +0100 Subject: [PATCH 049/185] Switching back to unprefixed in child info (all temporary struct are not general child struct, but default child struct only). Applying merge of ChildInfo and OwnedChildInfo. --- client/chain-spec/src/chain_spec.rs | 3 +- client/db/src/bench.rs | 14 +- client/db/src/lib.rs | 14 +- client/db/src/storage_cache.rs | 14 +- client/network/src/chain.rs | 4 +- client/network/src/protocol.rs | 6 +- .../src/protocol/light_client_handler.rs | 6 +- client/rpc/src/state/state_full.rs | 14 +- client/rpc/src/state/tests.rs | 11 +- client/src/client.rs | 8 +- client/src/light/backend.rs | 14 +- client/src/light/fetcher.rs | 12 +- frame/contracts/src/account_db.rs | 12 +- frame/contracts/src/lib.rs | 22 +- frame/contracts/src/rent.rs | 6 +- frame/contracts/src/tests.rs | 2 +- frame/support/src/storage/child.rs | 32 +-- primitives/externalities/src/lib.rs | 24 +- primitives/io/src/lib.rs | 36 +-- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 40 ++-- primitives/state-machine/src/basic.rs | 56 +++-- .../state-machine/src/changes_trie/build.rs | 56 +++-- primitives/state-machine/src/ext.rs | 89 +++---- .../state-machine/src/in_memory_backend.rs | 52 ++--- primitives/state-machine/src/lib.rs | 44 ++-- .../state-machine/src/overlayed_changes.rs | 21 +- .../state-machine/src/proving_backend.rs | 39 ++-- primitives/state-machine/src/trie_backend.rs | 26 +-- .../state-machine/src/trie_backend_essence.rs | 23 +- primitives/storage/src/lib.rs | 217 ++++++++---------- test-utils/client/src/lib.rs | 10 +- test-utils/runtime/client/src/lib.rs | 9 +- test-utils/runtime/src/lib.rs | 2 +- 34 files changed, 455 insertions(+), 485 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index bf12d3e578a73..af75f7c3c04f3 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -287,8 +287,7 @@ impl ChainSpec { .collect(); let children = storage.children.into_iter() .map(|(sk, child)| { - let info = child.child_info.as_ref(); - let (info, ci_type) = info.info(); + let (info, ci_type) = child.child_info.info(); ( StorageKey(sk), ChildRawStorage { diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 4d80d77cb60c2..fce759590e531 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -140,7 +140,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) @@ -152,7 +152,7 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) @@ -164,7 +164,7 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) @@ -184,7 +184,7 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { @@ -194,7 +194,7 @@ impl StateBackend> for BenchmarkingState { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -211,7 +211,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, @@ -229,7 +229,7 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index efbcb26ff8fd8..fe3d707579291 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -152,7 +152,7 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.child_storage(child_info, key) @@ -164,7 +164,7 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(child_info, key) @@ -176,7 +176,7 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(child_info, key) @@ -192,7 +192,7 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(child_info, f) @@ -200,7 +200,7 @@ impl StateBackend> for RefTrackingState { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -216,7 +216,7 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -235,7 +235,7 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(child_info, prefix) diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 7f5dcecf41dae..44d84e5689a83 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -539,7 +539,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { let key = (child_info.storage_key().to_vec(), key.to_vec()); @@ -575,7 +575,7 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(child_info, key) @@ -583,7 +583,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(child_info, f) @@ -595,7 +595,7 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(child_info, key) @@ -611,7 +611,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -627,7 +627,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -646,7 +646,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(child_info, prefix) diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index e419323c99edd..442334cb4f015 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -56,7 +56,7 @@ pub trait Client: Send + Sync { fn read_child_proof( &self, block: &Block::Hash, - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result; @@ -137,7 +137,7 @@ impl Client for SubstrateClient where fn read_child_proof( &self, block: &Block::Hash, - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 52914cca277e0..df0156f77e92f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::{listeners::ListenerId, Substream}, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, OwnedChildInfo}; +use sp_core::storage::{StorageKey, ChildInfo}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1567,10 +1567,10 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); + let child_info = ChildInfo::new_default(&request.storage_key); let proof = match self.context_data.chain.read_child_proof( &request.block, - child_info.as_ref(), + &child_info, &request.keys, ) { Ok(proof) => proof, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 3480de1bb5700..a9accd7f158d1 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -48,7 +48,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, OwnedChildInfo, StorageKey}; +use sp_core::storage::{ChildInfo, StorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::traits::{Block, Header, NumberFor, Zero}; use std::{ @@ -510,8 +510,8 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); - let proof = match self.chain.read_child_proof(&block, child_info.as_ref(), &request.keys) { + let child_info = ChildInfo::new_default(&request.storage_key); + let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 238c99fc9e67b..a949cee862845 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -314,10 +314,10 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState = ChildInfo::default_unchecked( - b":child_storage:default:child" -); #[test] fn should_return_storage() { @@ -41,10 +38,11 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const CHILD_VALUE: &[u8] = b"hello world !"; + let child_info = ChildInfo::new_default(STORAGE_KEY); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); @@ -77,13 +75,14 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { + let child_info = ChildInfo::new_default(STORAGE_KEY); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) + .add_child_storage(&child_info, "key", vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(b"test".to_vec()); + let child_key = StorageKey(STORAGE_KEY.to_vec()); let key = StorageKey(b"key".to_vec()); diff --git a/client/src/client.rs b/client/src/client.rs index a40068609b564..2f69d21a41ec1 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -334,7 +334,7 @@ impl Client where pub fn child_storage_keys( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? @@ -349,7 +349,7 @@ impl Client where pub fn child_storage( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -362,7 +362,7 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -403,7 +403,7 @@ impl Client where pub fn read_child_proof( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> sp_blockchain::Result where I: IntoIterator, diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index e4e5d681813b9..4fba83b882c68 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use codec::{Decode, Encode}; use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, @@ -312,7 +312,7 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, _> = HashMap::new(); + let mut storage: HashMap, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for @@ -386,7 +386,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { @@ -406,7 +406,7 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { @@ -434,7 +434,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, action: A, ) { match *self { @@ -446,7 +446,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], action: A, ) { @@ -470,7 +470,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index cb0115409405e..f37c06bea247d 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,7 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::ChildInfo; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -241,11 +241,11 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_trie = OwnedChildInfo::new_default(request.storage_key.clone()); + let child_trie = ChildInfo::new_default(&request.storage_key); read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - child_trie.as_ref(), + &child_trie, request.keys.iter(), ).map_err(Into::into) } @@ -347,7 +347,7 @@ pub mod tests { use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; use sp_core::{blake2_256, Blake2Hasher, ChangesTrieConfiguration, H256}; - use sp_core::storage::{well_known_keys, StorageKey, OwnedChildInfo}; + use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_runtime::generic::BlockId; use sp_state_machine::Backend; use super::*; @@ -400,8 +400,8 @@ pub mod tests { fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; - let child_info = OwnedChildInfo::new_default(b"child1".to_vec()); - let child_info = child_info.as_ref(); + let child_info = ChildInfo::new_default(b"child1"); + let child_info = &child_info; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index cd9f595665b07..7617546c0aa1e 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(&crate::trie_unique_id(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -175,13 +175,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -220,16 +220,16 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - new_info.child_trie_unique_id(), + &new_info.child_trie_unique_id(), &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); } else { - child::kill(new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.child_trie_unique_id(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index ecb2107bbd650..ae9bbbe3f42b9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -123,7 +123,7 @@ use frame_support::dispatch::{DispatchResult, Dispatchable}; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, parameter_types, IsSubType, weights::DispatchInfo, - storage::child::{self, ChildInfo, OwnedChildInfo}, + storage::child::{self, ChildInfo}, }; use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; @@ -232,7 +232,7 @@ impl RawAliveContractInfo ChildInfo { - ChildInfo::default_unchecked(trie_id) + ChildInfo::new_default(trie_id) } pub type TombstoneContractInfo = @@ -265,10 +265,6 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. - /// - /// Also, the implementation is responsible for ensuring that `TrieId` starts with - /// `:child_storage:`. - /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -292,9 +288,7 @@ where let mut buf = Vec::new(); buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - let buf = T::Hashing::hash(&buf[..]); - // TODO: see https://github.com/paritytech/substrate/issues/2325 - OwnedChildInfo::new_default(buf.as_ref().to_vec()).owned_info().0 + T::Hashing::hash(&buf[..]).as_ref().into() } } @@ -810,11 +804,11 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -827,7 +821,7 @@ impl Module { // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. &child::root( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), )[..], code_hash, ); @@ -835,7 +829,7 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), &blake2_256(key), &value, ); @@ -935,7 +929,7 @@ decl_storage! { impl OnReapAccount for Module { fn on_reap_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 8b342f95b4350..e48ea9a1c2707 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,7 +223,7 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -235,7 +235,7 @@ fn enact_verdict( // Note: this operation is heavy. let child_storage_root = child::root( - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); let tombstone = >::new( @@ -246,7 +246,7 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 650726165a80b..e9cd522f2efa8 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -202,7 +202,7 @@ impl TrieIdGenerator for DummyTrieIdGenerator { let mut res = vec![]; res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); - child::OwnedChildInfo::new_default(res).owned_info().0 + res } } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 32e5bcf1dadf6..658908d258a2f 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -22,11 +22,11 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType}; +pub use sp_core::storage::{ChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { match child_info.child_type() { @@ -49,7 +49,7 @@ pub fn get( /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { get(child_info, key).unwrap_or_else(Default::default) @@ -58,7 +58,7 @@ pub fn get_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -68,7 +68,7 @@ pub fn get_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -77,7 +77,7 @@ pub fn get_or_else T>( /// Put `value` in storage under `key`. pub fn put( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &T, ) { @@ -94,7 +94,7 @@ pub fn put( /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let r = get(child_info, key); @@ -107,7 +107,7 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { take(child_info, key).unwrap_or_else(Default::default) @@ -116,7 +116,7 @@ pub fn take_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -126,7 +126,7 @@ pub fn take_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -135,7 +135,7 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { match child_info.child_type() { @@ -148,7 +148,7 @@ pub fn exists( /// Remove all `storage_key` key/values pub fn kill_storage( - child_info: ChildInfo, + child_info: &ChildInfo, ) { match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( @@ -159,7 +159,7 @@ pub fn kill_storage( /// Ensure `key` has no explicit entry in storage. pub fn kill( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { match child_info.child_type() { @@ -174,7 +174,7 @@ pub fn kill( /// Get a Vec of bytes from storage. pub fn get_raw( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { match child_info.child_type() { @@ -187,7 +187,7 @@ pub fn get_raw( /// Put a raw byte slice into storage. pub fn put_raw( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &[u8], ) { @@ -202,7 +202,7 @@ pub fn put_raw( /// Calculate current child root value. pub fn root( - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec { match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::root( diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 2bdc6600f8a01..beb59745e8831 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -47,7 +47,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -59,7 +59,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -75,7 +75,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -84,7 +84,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -96,7 +96,7 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Vec, ) { @@ -111,7 +111,7 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { self.place_child_storage(child_info, key.to_vec(), None) @@ -125,7 +125,7 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { self.child_storage(child_info, key).is_some() @@ -137,12 +137,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, child_info: ChildInfo); + fn kill_child_storage(&mut self, child_info: &ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -150,7 +150,7 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ); @@ -160,7 +160,7 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Option>, ); @@ -182,7 +182,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 9ee9b76ac265f..befc3434761d8 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -152,8 +152,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> Option> { - let child_info = ChildInfo::default_unchecked(storage_key); - self.child_storage(child_info, key).map(|s| s.to_vec()) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage(&child_info, key).map(|s| s.to_vec()) } /// Get `key` from child storage, placing the value into `value_out` and return the number @@ -170,8 +170,8 @@ pub trait DefaultChildStorage { value_out: &mut [u8], value_offset: u32, ) -> Option { - let child_info = ChildInfo::default_unchecked(storage_key); - self.child_storage(child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage(&child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -190,8 +190,8 @@ pub trait DefaultChildStorage { key: &[u8], value: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.set_child_storage(child_info, key.to_vec(), value.to_vec()); + let child_info = ChildInfo::new_default(storage_key); + self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } /// Clear the given child storage of the given `key` and its value. @@ -202,8 +202,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.clear_child_storage(child_info, key); + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_storage(&child_info, key); } /// Clear an entire child storage. @@ -213,8 +213,8 @@ pub trait DefaultChildStorage { &mut self, storage_key: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.kill_child_storage(child_info); + let child_info = ChildInfo::new_default(storage_key); + self.kill_child_storage(&child_info); } /// Check whether the given `key` exists in storage. @@ -225,8 +225,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> bool { - let child_info = ChildInfo::default_unchecked(storage_key); - self.exists_child_storage(child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.exists_child_storage(&child_info, key) } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. @@ -237,8 +237,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], prefix: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.clear_child_prefix(child_info, prefix); + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_prefix(&child_info, prefix); } /// "Commit" all existing operations and compute the resulting child storage root. @@ -252,8 +252,8 @@ pub trait DefaultChildStorage { &mut self, storage_key: &[u8], ) -> Vec { - let child_info = ChildInfo::default_unchecked(storage_key); - self.child_storage_root(child_info) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info) } /// Get the next key in storage after the given one in lexicographic order in child storage. @@ -262,8 +262,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> Option> { - let child_info = ChildInfo::default_unchecked(storage_key); - self.next_child_storage_key(child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.next_child_storage_key(&child_info, key) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 517141a210e06..5049d7be1369d 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,7 +136,7 @@ impl BuildStorage for sp_core::storage::Storage { let k = k.clone(); if let Some(map) = storage.children.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(other_map.child_info.as_ref()) { + if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 55b7e988dad07..aa089bab9e920 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,7 +20,7 @@ use log::warn; use hash_db::Hasher; use codec::Encode; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ @@ -54,14 +54,14 @@ pub trait Backend: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) @@ -75,7 +75,7 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { Ok(self.child_storage(child_info, key)?.is_some()) @@ -87,14 +87,14 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ); @@ -113,7 +113,7 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ); @@ -131,7 +131,7 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -151,7 +151,7 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); @@ -175,7 +175,7 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -183,13 +183,13 @@ pub trait Backend: std::fmt::Debug { // child first for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(child_info.as_ref(), child_delta); - let storage_key = child_info.storage_key(); + self.child_storage_root(&child_info, child_delta); + let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((storage_key, None)); + child_roots.push((prefixed_storage_key, None)); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key, Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( @@ -229,7 +229,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).child_storage(child_info, key) @@ -237,7 +237,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { (*self).for_keys_in_child_storage(child_info, f) @@ -249,7 +249,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).next_child_storage_key(child_info, key) @@ -261,7 +261,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -278,7 +278,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -314,7 +314,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option, + Option, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 4f7d7bfb43e16..61ec462491b50 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,7 +129,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let storage_key = child_info.storage_key(); @@ -138,7 +138,7 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) @@ -146,7 +146,7 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage_hash(child_info, key) @@ -154,7 +154,7 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { Externalities::child_storage(self, child_info, key) @@ -167,7 +167,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let storage_key = child_info.storage_key(); @@ -190,7 +190,7 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -209,7 +209,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) { let storage_key = child_info.storage_key(); self.inner.children.remove(storage_key); @@ -237,7 +237,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let storage_key = child_info.storage_key(); @@ -258,19 +258,19 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.iter().map(|(k, v)| { - (k.to_vec(), v.child_info.clone()) + let keys: Vec<_> = self.inner.children.iter().map(|(_k, v)| { + (v.child_info.prefixed_storage_key(), v.child_info.clone()) }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = default_child_trie_root::>(); - for (storage_key, child_info) in keys { - let child_root = self.child_storage_root(child_info.as_ref()); + for (prefixed_storage_key, child_info) in keys { + let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { - top.remove(storage_key.as_slice()); + top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(storage_key, child_root); + top.insert(prefixed_storage_key, child_root); } } @@ -279,13 +279,13 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec { if let Some(child) = self.inner.children.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(child.child_info.as_ref(), delta).0 + .child_storage_root(&child.child_info, delta).0 } else { default_child_trie_root::>() }.encode() @@ -311,10 +311,6 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:unique_id_1" - ); - #[test] fn commit_should_work() { let mut ext = BasicExternalities::default(); @@ -338,26 +334,28 @@ mod tests { #[test] fn children_works() { + let child_info = ChildInfo::new_default(b"storage_key"); + let child_info = &child_info; let mut ext = BasicExternalities::new(Storage { top: Default::default(), children: map![ - CHILD_INFO_1.storage_key().to_vec() => StorageChild { + child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ] }); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child_info, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child_info, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child_info, b"dog"); + assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(CHILD_INFO_1); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child_info); + assert_eq!(ext.child_storage(child_info, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index d3dadebf8d977..3d5ca3d41ba21 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,7 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).cloned(); + let child_info = changes.default_child_info(sk).cloned(); ( changes.committed.children.get(sk).map(|c| &c.0), changes.prospective.children.get(sk).map(|c| &c.0), @@ -158,7 +158,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(child_info.as_ref(), k) + if !backend.exists_child_storage(&child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -351,15 +351,14 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_2"); - fn prepare_for_build(zero: u64) -> ( InMemoryBackend, InMemoryStorage, OverlayedChanges, Configuration, ) { + let child_info_1 = ChildInfo::new_default(b"storage_key1"); + let child_info_2 = ChildInfo::new_default(b"storage_key2"); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -368,8 +367,8 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = b"1".to_vec(); - let child_trie_key2 = b"2".to_vec(); + let child_trie_key1 = child_info_1.storage_key().to_vec(); + let child_trie_key2 = child_info_2.storage_key().to_vec(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -436,13 +435,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info_1.to_owned())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_2.to_owned())), + ].into_iter().collect(), child_info_2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -465,7 +464,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info_1.to_owned())), ].into_iter().collect(), }, collect_extrinsics: true, @@ -486,6 +485,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_non_digest_block() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; let changes_trie_nodes = prepare_input( @@ -502,11 +503,11 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), ]), - (ChildIndex { block: zero + 5, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), ]), @@ -522,6 +523,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l1() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; let changes_trie_nodes = prepare_input( @@ -543,7 +546,7 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -552,7 +555,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -567,6 +570,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l2() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; let changes_trie_nodes = prepare_input( @@ -589,13 +594,13 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), ]), - (ChildIndex { block: zero + 16, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), ]), @@ -656,6 +661,8 @@ mod test { #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, mut changes, config) = prepare_for_build(zero); // 110: missing from backend, set to None in overlay @@ -684,7 +691,7 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -693,7 +700,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -708,6 +715,8 @@ mod test { #[test] fn cache_is_used_when_changes_trie_is_built() { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, mut storage, changes, config) = prepare_for_build(0); let parent = AnchorBlockId { hash: Default::default(), number: 15 }; @@ -727,8 +736,8 @@ mod test { let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) .set_digest_input_blocks(vec![1, 2, 3]) .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect()) + .insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect()) + .insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect()) .complete(4, &trie_root4); storage.cache_mut().perform(cached_data4); @@ -754,7 +763,10 @@ mod test { .map(|(k, i)| (k, i.collect::>())) .collect::>(); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"1".to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { + block: 16u64, + storage_key: child_trie_key1.clone(), + }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]), @@ -763,7 +775,7 @@ mod test { ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"2".to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.to_vec() }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index aa2a7d5fa2ea4..d5f12643d00d4 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -205,7 +205,7 @@ where fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -229,7 +229,7 @@ where fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -253,7 +253,7 @@ where fn original_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -273,7 +273,7 @@ where fn original_child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -308,7 +308,7 @@ where fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -346,7 +346,7 @@ where fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend @@ -389,7 +389,7 @@ where fn place_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -407,7 +407,7 @@ where fn kill_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, @@ -442,7 +442,7 @@ where fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", @@ -480,13 +480,14 @@ where fn child_storage_root( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); let storage_key = child_info.storage_key(); + let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { let root = self - .storage(storage_key) + .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( default_child_trie_root::>() @@ -499,7 +500,7 @@ where root.encode() } else { - if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { let (root, is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() @@ -510,7 +511,7 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(child_info.as_ref(), delta) + self.backend.child_storage_root(&child_info, delta) }; let root = root.encode(); @@ -520,9 +521,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(storage_key.into(), None); + self.overlay.set_storage(prefixed_storage_key, None); } else { - self.overlay.set_storage(storage_key.into(), Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key, Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", @@ -534,7 +535,7 @@ where } else { // empty overlay let root = self - .storage(storage_key.as_ref()) + .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( default_child_trie_root::>() @@ -623,10 +624,6 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:Child1" - ); - fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -738,20 +735,23 @@ mod tests { #[test] fn next_child_storage_key_works() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; + let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_INFO_1.storage_key().to_vec() => StorageChild { + child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], vec![40] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ], }.into(); @@ -760,67 +760,68 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child_info, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child_info, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_INFO_1.storage_key().to_vec() => StorageChild { + child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], vec![30] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(CHILD_INFO_1, &[10]), + ext.child_storage_hash(child_info, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(child_info, &[20]), None); + assert_eq!(ext.original_child_storage(child_info, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(CHILD_INFO_1, &[20]), + ext.child_storage_hash(child_info, &[20]), None, ); - assert_eq!(ext.child_storage(CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(child_info, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(CHILD_INFO_1, &[30]), + ext.child_storage_hash(child_info, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); - } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 1a977e1d14076..f4cdb7315c756 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,7 +27,7 @@ use sp_trie::{ MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType, Storage}; +use sp_core::storage::{ChildInfo, ChildType, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -47,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -88,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -107,10 +107,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -121,7 +121,7 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> + let mut inner: HashMap, BTreeMap> = inners.children.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { @@ -144,12 +144,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option, StorageCollection)>, + inner: Vec<(Option, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -164,18 +164,16 @@ impl From, StorageCollection)>> } impl InMemory { - /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v| v.as_ref()) - ) + /// Child storage infos iterator. + pub fn child_storage_infos(&self) -> impl Iterator { + self.inner.iter().filter_map(|item| item.0.as_ref()) } } impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option, + Option, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -186,7 +184,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { Ok(self.inner.get(&Some(child_info.to_owned())) @@ -209,7 +207,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, mut f: F, ) { self.inner.get(&Some(child_info.to_owned())) @@ -218,7 +216,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -249,7 +247,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -290,7 +288,7 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); @@ -316,7 +314,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { self.inner.get(&Some(child_info.to_owned())) @@ -331,7 +329,7 @@ impl Backend for InMemory where H::Out: Codec { let mut root_map = None; for (child_info, map) in &self.inner { if let Some(child_info) = child_info.as_ref() { - let prefix_storage_key = child_info.as_ref().storage_key().to_vec(); + let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; new_child_roots.push((prefix_storage_key, ch.as_ref().into())); } else { @@ -361,7 +359,8 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"1".to_vec()); + let child_info = ChildInfo::new_default(b"1"); + let child_info = &child_info; let mut storage = storage.update( vec![( Some(child_info.clone()), @@ -369,10 +368,9 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); - let child_info = child_info.as_ref(); - let storage_key = child_info.storage_key(); - assert!(trie_backend.storage(storage_key).unwrap().is_some()); + let storage_key = child_info.prefixed_storage_key(); + assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8f63aa0da8e40..640a57b37f8d8 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -550,7 +550,7 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -589,7 +589,7 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -602,7 +602,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(child_info.clone(), key.as_ref()) + .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -633,7 +633,7 @@ where pub fn read_child_proof_check( root: H::Out, proof: StorageProof, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result, Option>>, Box> where @@ -670,7 +670,7 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( proving_backend: &TrieBackend, H>, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> where @@ -699,10 +699,6 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub1" - ); - impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -931,6 +927,8 @@ mod tests { #[test] fn set_child_storage_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -944,23 +942,23 @@ mod tests { ); ext.set_child_storage( - CHILD_INFO_1, + child_info, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - CHILD_INFO_1, + child_info, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - CHILD_INFO_1, + child_info, ); assert_eq!( ext.child_storage( - CHILD_INFO_1, + child_info, b"abc" ), None @@ -969,6 +967,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -995,19 +995,19 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - CHILD_INFO_1, + child_info, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - CHILD_INFO_1, + child_info, &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - CHILD_INFO_1, + child_info, &[b"value2"], ).unwrap(); assert_eq!( @@ -1023,12 +1023,8 @@ mod tests { #[test] fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub_test1" - ); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub_test2" - ); + let child_info_1 = ChildInfo::new_default(b"sub_test1"); + let child_info_2 = ChildInfo::new_default(b"sub_test2"); use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); @@ -1043,8 +1039,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 7dcbbdd2a0e40..71f5d66b4ba72 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -28,7 +28,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use std::{mem, ops}; use hash_db::Hasher; @@ -77,7 +77,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, OwnedChildInfo)>, + pub children: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -247,7 +247,7 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, val: Option, ) { @@ -275,7 +275,7 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key(); @@ -349,7 +349,7 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); @@ -430,7 +430,7 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. fn drain_committed(&mut self) -> ( impl Iterator)>, - impl Iterator)>, OwnedChildInfo))>, + impl Iterator)>, ChildInfo))>, ) { assert!(self.prospective.is_empty()); ( @@ -538,7 +538,7 @@ impl OverlayedChanges { .chain(self.committed.children.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( - self.child_info(storage_key).cloned() + self.default_child_info(storage_key).cloned() .expect("child info initialized in either committed or prospective"), self.committed.children.get(storage_key) .into_iter() @@ -594,7 +594,7 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. - pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { + pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children.get(storage_key) { return Some(&ci); } @@ -850,9 +850,8 @@ mod tests { #[test] fn next_child_storage_key_change_works() { - let child = b"Child1".to_vec(); - let child_info = OwnedChildInfo::new_default(child.clone()); - let child_info = child_info.as_ref(); + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], Some(vec![20])); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ec0ef6a4692ee..8542bdbef732c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -143,7 +143,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result>, String> { let storage_key = child_info.storage_key(); @@ -276,7 +276,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.child_storage(child_info, key) @@ -284,7 +284,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.0.for_keys_in_child_storage(child_info, f) @@ -296,7 +296,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.next_child_storage_key(child_info, key) @@ -312,7 +312,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -329,7 +329,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.0.child_keys(child_info, prefix) @@ -343,7 +343,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -395,13 +395,6 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub1" - ); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub2" - ); - fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { @@ -469,29 +462,33 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(CHILD_INFO_1.to_owned()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(CHILD_INFO_2.to_owned()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.to_owned(), Vec::new())) + in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i] )); @@ -519,7 +516,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -527,7 +524,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 29a31be210c77..2c09c049b542d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -80,7 +80,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.essence.child_storage(child_info, key) @@ -92,7 +92,7 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.essence.next_child_storage_key(child_info, key) @@ -108,7 +108,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.essence.for_keys_in_child_storage(child_info, f) @@ -116,7 +116,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -190,7 +190,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -202,8 +202,8 @@ impl, H: Hasher> Backend for TrieBackend where }; let mut write_overlay = S::Overlay::default(); - let storage_key = child_info.storage_key(); - let mut root = match self.storage(storage_key) { + let prefixed_storage_key = child_info.prefixed_storage_key(); + let mut root = match self.storage(prefixed_storage_key.as_slice()) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -247,15 +247,14 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub1" - ); + const CHILD_KEY_1: &[u8] = b"sub1"; fn test_db() -> (PrefixedMemoryDB, H256) { + let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_INFO_1.keyspace()); + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -265,7 +264,8 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(CHILD_INFO_1.storage_key(), &sub_root[..]).expect("insert failed"); + trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) + .expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); @@ -291,7 +291,7 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 9a8ad14445c5f..763f57bd6b7d5 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -72,15 +72,15 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: ChildInfo) -> Result, String> { - self.storage(child_info.storage_key()) + fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + self.storage(child_info.prefixed_storage_key().as_slice()) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, String> { let child_root = match self.child_root(child_info)? { @@ -103,7 +103,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: fn next_storage_key_from_root( &self, root: &H::Out, - child_info: Option, + child_info: Option<&ChildInfo>, key: &[u8], ) -> Result, String> { let mut read_overlay = S::Overlay::default(); @@ -165,7 +165,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of child storage at given key. pub fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, String> { let root = self.child_root(child_info)? @@ -186,7 +186,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Retrieve all entries keys of child storage and call `f` for each of those keys. pub fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { let root = match self.child_root(child_info) { @@ -216,7 +216,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Execute given closure for all keys starting with prefix. pub fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], mut f: F, ) { @@ -242,7 +242,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option, + child_info: Option<&ChildInfo>, ) { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -436,9 +436,8 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::default_unchecked( - b":child_storage:default:MyChild" - ); + let child_info = ChildInfo::new_default(b"MyChild"); + let child_info = &child_info; // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -462,7 +461,7 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - trie.insert(child_info.storage_key(), root_1.as_ref()) + trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index df0b9a932af10..30677f0f617c2 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -53,7 +53,7 @@ pub struct StorageChild { pub data: StorageMap, /// Associated child info for a child /// trie. - pub child_info: OwnedChildInfo, + pub child_info: ChildInfo, } #[cfg(feature = "std")] @@ -129,62 +129,60 @@ pub mod well_known_keys { } } -#[derive(Clone, Copy)] /// Information related to a child state. -pub enum ChildInfo<'a> { - ParentKeyId(ChildTrie<'a>), -} - -/// Owned version of `ChildInfo`. -/// To be use in persistence layers. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub enum OwnedChildInfo { - ParentKeyId(OwnedChildTrie), +pub enum ChildInfo { + ParentKeyId(ChildTrie), } -impl<'a> ChildInfo<'a> { - /// Instantiates information for a default child trie. - /// This is a rather unsafe method and requires to be - /// use from a valid payload such as: - /// ``` - /// use sp_storage::{ChildInfo, ChildType, OwnedChildInfo}; - /// - /// let info1 = ChildInfo::default_unchecked( - /// b":child_storage:default:stor_key", - /// ); - /// let info2 = OwnedChildInfo::new_default( - /// b"stor_key".to_vec(), - /// ); - /// - /// assert!(info1.info() == info2.as_ref().info()); - /// ``` - pub const fn default_unchecked(encoded: &'a[u8]) -> Self { +impl ChildInfo { + /// Instantiates info for a default child trie with a default unprefixed parent + /// storage key. + pub fn new_default(storage_key: &[u8]) -> Self { + let data = storage_key.to_vec(); + ChildInfo::ParentKeyId(ChildTrie { data }) + } + + /// Instantiates info for a default child trie with a default unprefixed parent + /// owned storage key. + pub fn new_default_from_vec(storage_key: Vec) -> Self { ChildInfo::ParentKeyId(ChildTrie { - data: encoded, + data: storage_key, }) } + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: &ChildInfo) -> bool { + match self { + ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other), + } + } + /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, info: &'a [u8]) -> Option { - match child_type { - x if x == ChildType::ParentKeyId as u32 => { + pub fn resolve_child_info(child_type: u32, info: &[u8]) -> Option { + match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => { debug_assert!( info.starts_with(ChildType::ParentKeyId.parent_prefix()) ); - Some(Self::default_unchecked(info)) + Some(Self::new_default(info)) }, - _ => None, + None => None, } } - /// Instantiates a owned version of this child info. - pub fn to_owned(&self) -> OwnedChildInfo { + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn top_trie() -> Self { + Self::new_default(&[]) + } + + /// Is this child info a the top trie. + pub fn is_top_trie(&self) -> bool { match self { - ChildInfo::ParentKeyId(ChildTrie { data }) - => OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data: data.to_vec(), - }), + ChildInfo::ParentKeyId(ChildTrie { data }) => data.len() == 0 } } @@ -198,20 +196,30 @@ impl<'a> ChildInfo<'a> { } } + /// Owned variant of `info`. + pub fn into_info(self) -> (Vec, u32) { + match self { + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => (data, ChildType::ParentKeyId as u32), + } + } + /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(..) => self.unprefixed_storage_key(), + ChildInfo::ParentKeyId(..) => self.storage_key(), } } - /// Return a reference to the full location in the direct parent of + /// Return a reference to the location in the direct parent of /// this trie. /// If the trie got no parent this returns the empty slice, /// so by nature an empty slice is not a valid parent location. /// This does not include child type related prefix. + /// The static part of the storage key is omitted. pub fn storage_key(&self) -> &[u8] { match self { ChildInfo::ParentKeyId(ChildTrie { @@ -220,17 +228,25 @@ impl<'a> ChildInfo<'a> { } } - /// Return a reference to the location in the direct parent of + /// Return a the full location in the direct parent of /// this trie. - /// The static part of the storage key is omitted. - pub fn unprefixed_storage_key(&self) -> &[u8] { + pub fn prefixed_storage_key(&self) -> Vec { match self { ChildInfo::ParentKeyId(ChildTrie { data, - }) => if data.len() != 0 { - &data[ChildType::ParentKeyId.parent_prefix().len()..] - } else { - &[] + }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), + } + } + + /// Return a the full location in the direct parent of + /// this trie. + pub fn into_prefixed_storage_key(self) -> Vec { + match self { + ChildInfo::ParentKeyId(ChildTrie { + mut data, + }) => { + ChildType::ParentKeyId.do_prefix_key(&mut data); + data }, } } @@ -247,16 +263,34 @@ impl<'a> ChildInfo<'a> { /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] +#[derive(Clone, Copy, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] pub enum ChildType { /// If runtime module ensures that the child key is a unique id that will - /// only be used once, this parent key is used as a child trie unique id. + /// only be used once, its parent key is used as a child trie unique id. ParentKeyId = 1, } impl ChildType { + /// Try to get a child type from its `u32` representation. + fn new(repr: u32) -> Option { + Some(match repr { + r if r == ChildType::ParentKeyId as u32 => ChildType::ParentKeyId, + _ => return None, + }) + } + /// Change a key to get prefixed with the parent prefix. - /// TODO try to make this method non public - pub fn do_prefix_key(&self, key: &mut Vec) { + fn new_prefixed_key(&self, key: &[u8]) -> Vec { + let parent_prefix = self.parent_prefix(); + let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); + result.extend_from_slice(parent_prefix); + result.extend_from_slice(key); + result + } + + /// Change a key to get prefixed with the parent prefix. + fn do_prefix_key(&self, key: &mut Vec) { let parent_prefix = self.parent_prefix(); let key_len = key.len(); if parent_prefix.len() > 0 { @@ -275,79 +309,24 @@ impl ChildType { } } -impl OwnedChildInfo { - /// Instantiates info for a default child trie with a default parent. - pub fn new_default(mut storage_key: Vec) -> Self { - ChildType::ParentKeyId.do_prefix_key(&mut storage_key); - OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data: storage_key, - }) - } - - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: ChildInfo) -> bool { - match self { - OwnedChildInfo::ParentKeyId(owned_child_trie) => owned_child_trie.try_update(other), - } - } - - /// Owned variant of `info`. - pub fn owned_info(self) -> (Vec, u32) { - match self { - OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data, - }) => (data, ChildType::ParentKeyId as u32), - } - } - - /// Return a reference to the full location in the direct parent of - /// this trie. - pub fn storage_key(self) -> Vec { - match self { - OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data, - }) => data, - } - } - - /// Get `ChildInfo` reference to this owned child info. - pub fn as_ref(&self) -> ChildInfo { - match self { - OwnedChildInfo::ParentKeyId(OwnedChildTrie { data }) - => ChildInfo::ParentKeyId(ChildTrie { - data: data.as_slice(), - }), - - } - } -} - /// A child trie of default type. -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Clone, Copy)] -pub struct ChildTrie<'a> { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: &'a[u8], -} - -/// Owned version of default child trie `ChildTrie`. +/// It uses the same default implementation as the top trie, +/// top trie being a child trie with no keyspace and no storage key. +/// Its keyspace is the variable (unprefixed) part of its storage key. +/// It shares its trie nodes backend storage with every other +/// child trie, so its storage key needs to be a unique id +/// that will be use only once. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub struct OwnedChildTrie { - /// See `ChildTrie` reference field documentation. +pub struct ChildTrie { + /// Data is the full prefixed storage key. data: Vec, } -impl OwnedChildTrie { +impl ChildTrie { /// Try to update with another instance, return false if both instance /// are not compatible. - fn try_update(&mut self, other: ChildInfo) -> bool { + fn try_update(&mut self, other: &ChildInfo) -> bool { match other { ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } @@ -357,9 +336,9 @@ impl OwnedChildTrie { const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; #[test] -fn assert_default_trie_in_child_trie() { - let child_info = OwnedChildInfo::new_default(b"any key".to_vec()); - let child_info = child_info.as_ref(); +fn test_prefix_default_child_info() { + let child_info = ChildInfo::new_default(b"any key"); let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); + assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX)); } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 9267989a40c53..e248986f67c62 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -129,17 +129,17 @@ impl TestClientBuilder, - child_key: impl AsRef<[u8]>, - child_info: ChildInfo, value: impl AsRef<[u8]>, ) -> Self { - let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) + let storage_key = child_info.storage_key(); + let entry = self.child_storage_extension.entry(storage_key.to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), - child_info: child_info.to_owned(), + child_info: child_info.clone(), }); - entry.data.insert(child_key.as_ref().to_vec(), value.as_ref().to_vec()); + entry.data.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); self } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 2c6967ff2e0f5..c6e1d4752705b 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -127,9 +127,8 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - let child_info = child_content.child_info.as_ref(); - let storage_key = child_info.storage_key().to_vec(); - (storage_key, state_root.encode()) + let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); + (prefixed_storage_key, state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() @@ -196,7 +195,7 @@ pub trait TestClientBuilderExt: Sized { /// Panics if the key is empty. fn add_extra_child_storage>, V: Into>>( mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: K, value: V, ) -> Self { @@ -208,7 +207,7 @@ pub trait TestClientBuilderExt: Sized { .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), - child_info: child_info.to_owned(), + child_info: child_info.clone(), }).data.insert(key, value.into()); self } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 0c3459bbb7f18..5d3c7ecfcfea1 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -872,7 +872,7 @@ fn test_read_storage() { } fn test_read_child_storage() { - const STORAGE_KEY: &[u8] = b":child_storage:default:unique_id_1"; + const STORAGE_KEY: &[u8] = b"unique_id_1"; const KEY: &[u8] = b":read_child_storage"; sp_io::default_child_storage::set( STORAGE_KEY, From b3ccc93a54db9b9c597f90b207da0d42ef8afcbb Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 Feb 2020 13:01:56 +0100 Subject: [PATCH 050/185] bump version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 97424950fc569..9ff6d46a3b971 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 219, + spec_version: 220, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From afe85ae775511bd143c83c525cde0150045cb5e6 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 Feb 2020 13:37:36 +0100 Subject: [PATCH 051/185] fix tabs and doc. --- .../src/protocol/light_client_handler.rs | 2 +- primitives/io/src/lib.rs | 14 ------ primitives/state-machine/src/basic.rs | 16 +++---- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/storage/src/lib.rs | 48 +++++++------------ 5 files changed, 24 insertions(+), 58 deletions(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index a9accd7f158d1..f90a19ec036b4 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -511,7 +511,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let child_info = ChildInfo::new_default(&request.storage_key); - let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { + let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index befc3434761d8..fd7f247ecdc27 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -161,8 +161,6 @@ pub trait DefaultChildStorage { /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. - /// - /// See `child_get` for common child api parameters. fn read( &self, storage_key: &[u8], @@ -182,8 +180,6 @@ pub trait DefaultChildStorage { } /// Set `key` to `value` in the child storage denoted by `storage_key`. - /// - /// See `child_get` for common child api parameters. fn set( &mut self, storage_key: &[u8], @@ -195,8 +191,6 @@ pub trait DefaultChildStorage { } /// Clear the given child storage of the given `key` and its value. - /// - /// See `child_get` for common child api parameters. fn clear ( &mut self, storage_key: &[u8], @@ -207,8 +201,6 @@ pub trait DefaultChildStorage { } /// Clear an entire child storage. - /// - /// See `child_get` for common child api parameters. fn storage_kill( &mut self, storage_key: &[u8], @@ -218,8 +210,6 @@ pub trait DefaultChildStorage { } /// Check whether the given `key` exists in storage. - /// - /// See `child_get` for common child api parameters. fn exists( &self, storage_key: &[u8], @@ -230,8 +220,6 @@ pub trait DefaultChildStorage { } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. - /// - /// See `child_get` for common child api parameters. fn clear_prefix( &mut self, storage_key: &[u8], @@ -246,8 +234,6 @@ pub trait DefaultChildStorage { /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. - /// - /// See `child_get` for common child api parameters. fn root( &mut self, storage_key: &[u8], diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 61ec462491b50..e1c10a83023b0 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -132,8 +132,8 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, key: &[u8], ) -> Option { - let storage_key = child_info.storage_key(); - self.inner.children.get(storage_key).and_then(|child| child.data.get(key)).cloned() + self.inner.children.get(child_info.storage_key()) + .and_then(|child| child.data.get(key)).cloned() } fn child_storage_hash( @@ -170,9 +170,8 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, key: &[u8], ) -> Option { - let storage_key = child_info.storage_key(); let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(storage_key) + self.inner.children.get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } @@ -194,8 +193,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let storage_key = child_info.storage_key().to_vec(); - let child_map = self.inner.children.entry(storage_key) + let child_map = self.inner.children.entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -211,8 +209,7 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, ) { - let storage_key = child_info.storage_key(); - self.inner.children.remove(storage_key); + self.inner.children.remove(child_info.storage_key()); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -240,8 +237,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, prefix: &[u8], ) { - let storage_key = child_info.storage_key(); - if let Some(child) = self.inner.children.get_mut(storage_key) { + if let Some(child) = self.inner.children.get_mut(child_info.storage_key()) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f4cdb7315c756..04a6a2f6b5cb3 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -166,7 +166,7 @@ impl From, StorageCollection)>> impl InMemory { /// Child storage infos iterator. pub fn child_storage_infos(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| item.0.as_ref()) + self.inner.iter().filter_map(|item| item.0.as_ref()) } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 30677f0f617c2..e960bc6435f36 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -63,9 +63,9 @@ pub struct Storage { /// Top trie storage data. pub top: StorageMap, /// Children trie storage data. - /// Note that the key is not including child prefix, this will - /// not be possible if a different kind of trie than `default` - /// get in use. + /// The key does not including prefix, for the `default` + /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` + /// tries. pub children: std::collections::HashMap, StorageChild>, } @@ -137,15 +137,15 @@ pub enum ChildInfo { } impl ChildInfo { - /// Instantiates info for a default child trie with a default unprefixed parent + /// Instantiates child information for a default child trie + /// of kind `ChildType::ParentKeyId`, using an unprefixed parent /// storage key. pub fn new_default(storage_key: &[u8]) -> Self { let data = storage_key.to_vec(); ChildInfo::ParentKeyId(ChildTrie { data }) } - /// Instantiates info for a default child trie with a default unprefixed parent - /// owned storage key. + /// Same as `new_default` but with `Vec` as input. pub fn new_default_from_vec(storage_key: Vec) -> Self { ChildInfo::ParentKeyId(ChildTrie { data: storage_key, @@ -173,20 +173,7 @@ impl ChildInfo { } } - /// Top trie defined as the unique crypto id trie with - /// 0 length unique id. - pub fn top_trie() -> Self { - Self::new_default(&[]) - } - - /// Is this child info a the top trie. - pub fn is_top_trie(&self) -> bool { - match self { - ChildInfo::ParentKeyId(ChildTrie { data }) => data.len() == 0 - } - } - - /// Return a single byte vector containing packed child info content and its child info type. + /// Returns a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { match self { @@ -205,7 +192,7 @@ impl ChildInfo { } } - /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. + /// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { @@ -214,12 +201,9 @@ impl ChildInfo { } } - /// Return a reference to the location in the direct parent of - /// this trie. - /// If the trie got no parent this returns the empty slice, - /// so by nature an empty slice is not a valid parent location. - /// This does not include child type related prefix. - /// The static part of the storage key is omitted. + /// Returns a reference to the location in the direct parent of + /// this trie but without the common prefix for this kind of + /// child trie. pub fn storage_key(&self) -> &[u8] { match self { ChildInfo::ParentKeyId(ChildTrie { @@ -238,7 +222,7 @@ impl ChildInfo { } } - /// Return a the full location in the direct parent of + /// Returns a the full location in the direct parent of /// this trie. pub fn into_prefixed_storage_key(self) -> Vec { match self { @@ -251,7 +235,7 @@ impl ChildInfo { } } - /// Return the type for this child info. + /// Returns the type for this child info. pub fn child_type(&self) -> ChildType { match self { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, @@ -280,7 +264,7 @@ impl ChildType { }) } - /// Change a key to get prefixed with the parent prefix. + /// Produce a prefixed key for a given child type. fn new_prefixed_key(&self, key: &[u8]) -> Vec { let parent_prefix = self.parent_prefix(); let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); @@ -289,7 +273,7 @@ impl ChildType { result } - /// Change a key to get prefixed with the parent prefix. + /// Prefixes a vec with the prefix for this child type. fn do_prefix_key(&self, key: &mut Vec) { let parent_prefix = self.parent_prefix(); let key_len = key.len(); @@ -300,7 +284,7 @@ impl ChildType { } } - /// Return the location reserved for this child trie in their parent trie if there + /// Returns the location reserved for this child trie in their parent trie if there /// is one. fn parent_prefix(&self) -> &'static [u8] { match self { From ed480fa4661ef1611a499a2047476473abe2d4e8 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 Feb 2020 11:02:18 +0100 Subject: [PATCH 052/185] Apply more consistant naming 'storage_key' instead of 'child_storage_key' in rpc crate. --- client/rpc/src/state/mod.rs | 26 +++++++++++++------------- client/rpc/src/state/state_light.rs | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 57a4b6cab897e..856369164db13 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -107,7 +107,7 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_keys( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, prefix: StorageKey, ) -> FutureResult>; @@ -115,7 +115,7 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult>; @@ -123,7 +123,7 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_hash( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult>; @@ -131,10 +131,10 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_size( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, key) + Box::new(self.child_storage(block, storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -294,38 +294,38 @@ impl StateApi for State fn child_storage( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, key) + self.backend.child_storage(block, storage_key, key) } fn child_storage_keys( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, key_prefix) + self.backend.child_storage_keys(block, storage_key, key_prefix) } fn child_storage_hash( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, key) + self.backend.child_storage_hash(block, storage_key, key) } fn child_storage_size( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, key) + self.backend.child_storage_size(block, storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 485950de97c00..c65f86c9f2ba5 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -249,7 +249,7 @@ impl StateBackend for LightState, - _child_storage_key: StorageKey, + _storage_key: StorageKey, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -258,7 +258,7 @@ impl StateBackend for LightState, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -268,7 +268,7 @@ impl StateBackend for LightState Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { block, header, - storage_key: child_storage_key.0, + storage_key: storage_key.0, keys: vec![key.0.clone()], retry_count: Default::default(), }).then(move |result| ready(result @@ -288,11 +288,11 @@ impl StateBackend for LightState, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, child_storage_key, key) + .child_storage(block, storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) From e103c2a3f2ea0bb24cef6d8178e96dcf4548a2f6 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 08:10:56 +0100 Subject: [PATCH 053/185] Update primitives/storage/src/lib.rs Co-Authored-By: thiolliere --- primitives/storage/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index e960bc6435f36..8034bb2acccd5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -133,6 +133,7 @@ pub mod well_known_keys { #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum ChildInfo { + /// This is the one used by default. ParentKeyId(ChildTrie), } From 958b6268cdcd3217e160a69b6e94571c9d4ca9aa Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 08:55:50 +0100 Subject: [PATCH 054/185] use prefixed storage key in change trie --- .../state-machine/src/changes_trie/build.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 3d5ca3d41ba21..53bf2c585a7f3 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -105,13 +105,13 @@ fn prepare_extrinsics_input<'a, B, H, Number>( Number: BlockNumber, { - let mut children_keys = BTreeSet::::new(); + let mut children_prefixed_keys = BTreeSet::::new(); let mut children_result = BTreeMap::new(); - for (storage_key, _) in changes.prospective.children.iter() + for (_storage_key, (_map, child_info)) in changes.prospective.children.iter() .chain(changes.committed.children.iter()) { - children_keys.insert(storage_key.clone()); + children_prefixed_keys.insert(child_info.prefixed_storage_key()); } - for storage_key in children_keys { + for storage_key in children_prefixed_keys { let child_index = ChildIndex:: { block: block.clone(), storage_key: storage_key.clone(), @@ -367,8 +367,8 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = child_info_1.storage_key().to_vec(); - let child_trie_key2 = child_info_2.storage_key().to_vec(); + let child_trie_key1 = child_info_1.prefixed_storage_key(); + let child_trie_key2 = child_info_2.prefixed_storage_key(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -485,8 +485,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_non_digest_block() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; let changes_trie_nodes = prepare_input( @@ -523,8 +523,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l1() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; let changes_trie_nodes = prepare_input( @@ -570,8 +570,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l2() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; let changes_trie_nodes = prepare_input( @@ -661,8 +661,8 @@ mod test { #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, mut changes, config) = prepare_for_build(zero); // 110: missing from backend, set to None in overlay @@ -715,8 +715,8 @@ mod test { #[test] fn cache_is_used_when_changes_trie_is_built() { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, mut storage, changes, config) = prepare_for_build(0); let parent = AnchorBlockId { hash: Default::default(), number: 15 }; From f471d56b7f22077970573ff1dc03de4c3e3028e7 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 09:00:27 +0100 Subject: [PATCH 055/185] renaming 'default_child_trie_root' to 'empty_child_trie_root' --- primitives/state-machine/src/basic.rs | 6 +++--- primitives/state-machine/src/ext.rs | 6 +++--- primitives/state-machine/src/in_memory_backend.rs | 4 ++-- primitives/state-machine/src/proving_backend.rs | 4 ++-- primitives/state-machine/src/trie_backend.rs | 4 ++-- primitives/state-machine/src/trie_backend_essence.rs | 8 ++++---- primitives/trie/src/lib.rs | 4 ++-- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index e1c10a83023b0..3dbc2c1e0bb4e 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -21,7 +21,7 @@ use std::{ }; use crate::{Backend, InMemoryBackend, StorageKey, StorageValue}; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, default_child_trie_root}; +use sp_trie::{TrieConfiguration, empty_child_trie_root}; use sp_trie::trie_types::Layout; use sp_core::{ storage::{ @@ -260,7 +260,7 @@ impl Externalities for BasicExternalities { // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = default_child_trie_root::>(); + let empty_hash = empty_child_trie_root::>(); for (prefixed_storage_key, child_info) in keys { let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { @@ -283,7 +283,7 @@ impl Externalities for BasicExternalities { InMemoryBackend::::default() .child_storage_root(&child.child_info, delta).0 } else { - default_child_trie_root::>() + empty_child_trie_root::>() }.encode() } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d5f12643d00d4..2c1c3bd01cf51 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -27,7 +27,7 @@ use sp_core::{ storage::{well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; -use sp_trie::{trie_types::Layout, default_child_trie_root}; +use sp_trie::{trie_types::Layout, empty_child_trie_root}; use sp_externalities::Extensions; use codec::{Decode, Encode}; @@ -490,7 +490,7 @@ where .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>() + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, @@ -538,7 +538,7 @@ where .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>() + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 04a6a2f6b5cb3..5a7f2ced5952a 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,7 +24,7 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; use sp_trie::{ - MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, ChildType, Storage}; @@ -272,7 +272,7 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); let is_default = match child_type { - ChildType::ParentKeyId => root == default_child_trie_root::>(), + ChildType::ParentKeyId => root == empty_child_trie_root::>(), }; (root, is_default, vec![(child_info, full_transaction)]) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 67f90f92e9f98..0ba7b91e52348 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -22,7 +22,7 @@ use codec::{Decode, Encode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys }; pub use sp_trie::Recorder; @@ -149,7 +149,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>()); + .unwrap_or(empty_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2c09c049b542d..4762192ece61f 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -18,7 +18,7 @@ use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildType}; use codec::{Codec, Decode}; @@ -198,7 +198,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord, { let default_root = match child_info.child_type() { - ChildType::ParentKeyId => default_child_trie_root::>() + ChildType::ParentKeyId => empty_child_trie_root::>() }; let mut write_overlay = S::Overlay::default(); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 763f57bd6b7d5..28d1c68ca2e40 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -22,7 +22,7 @@ use std::sync::Arc; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - default_child_trie_root, read_trie_value, read_child_trie_value, + empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; @@ -169,7 +169,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: key: &[u8], ) -> Result, String> { let root = self.child_root(child_info)? - .unwrap_or(default_child_trie_root::>().encode()); + .unwrap_or(empty_child_trie_root::>().encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -190,7 +190,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: f: F, ) { let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -221,7 +221,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, ) { let root_vec = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 01466f3ed48fc..a7edf01a0473a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -209,8 +209,8 @@ pub fn read_trie_value_with< Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } -/// Determine the default child trie root. -pub fn default_child_trie_root( +/// Determine the empty child trie root. +pub fn empty_child_trie_root( ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } From 02ff227286c3c4b43add22f6afdd506f22b894da Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 11:09:20 +0100 Subject: [PATCH 056/185] apply some renaming, rpc change are really ugly, will revert them --- bin/node/executor/tests/basic.rs | 12 +- bin/node/executor/tests/fees.rs | 2 +- client/api/src/light.rs | 10 +- client/chain-spec/src/chain_spec.rs | 44 ++--- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 6 +- client/executor/src/integration_tests/mod.rs | 4 +- client/network/src/on_demand_layer.rs | 6 +- client/network/src/protocol.rs | 19 ++- .../src/protocol/light_client_handler.rs | 36 ++-- client/network/src/protocol/light_dispatch.rs | 49 +++--- client/network/src/protocol/message.rs | 8 +- .../src/protocol/schema/light.v1.proto | 6 +- client/rpc-api/src/state/mod.rs | 16 +- client/rpc/src/state/mod.rs | 29 ++-- client/rpc/src/state/state_full.rs | 6 +- client/rpc/src/state/state_light.rs | 12 +- client/rpc/src/state/tests.rs | 8 +- client/src/in_mem.rs | 7 +- client/src/light/backend.rs | 4 +- client/src/light/fetcher.rs | 10 +- frame/support/test/tests/instance.rs | 2 +- frame/system/src/lib.rs | 2 +- primitives/io/src/lib.rs | 154 +++++++++++++++++- primitives/runtime/src/lib.rs | 6 +- primitives/state-machine/src/basic.rs | 26 +-- .../state-machine/src/changes_trie/build.rs | 12 +- primitives/state-machine/src/ext.rs | 10 +- .../state-machine/src/in_memory_backend.rs | 2 +- .../state-machine/src/overlayed_changes.rs | 44 ++--- primitives/state-machine/src/testing.rs | 6 +- primitives/storage/src/lib.rs | 22 +-- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 11 +- test-utils/runtime/src/genesismap.rs | 4 +- test-utils/runtime/src/system.rs | 2 +- 36 files changed, 369 insertions(+), 232 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 100bdf3fe60ee..79512527d19ab 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -173,7 +173,7 @@ fn panic_execution_with_foreign_code_gives_error() { vec![0u8; 32] } ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -209,7 +209,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { vec![0u8; 32] } ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -243,7 +243,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -285,7 +285,7 @@ fn successful_execution_with_foreign_code_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -706,7 +706,7 @@ fn panic_execution_gives_error() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -740,7 +740,7 @@ fn successful_execution_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index ba303a6feb6ff..46c8fe332a9e2 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -145,7 +145,7 @@ fn transaction_fee_is_correct_ultimate() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let tip = 1_000_000; diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 2911d77f18209..61f56628d5866 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -75,7 +75,7 @@ pub struct RemoteReadRequest { /// Remote storage read child request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadChildRequest { +pub struct RemoteReadDefaultChildRequest { /// Read at state of given block. pub block: Header::Hash, /// Header of block at which read is performed. @@ -175,7 +175,7 @@ pub trait Fetcher: Send + Sync { /// Fetch remote storage child value. fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadDefaultChildRequest ) -> Self::RemoteReadResult; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; @@ -205,9 +205,9 @@ pub trait FetchChecker: Send + Sync { remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote storage read proof. - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - request: &RemoteReadChildRequest, + request: &RemoteReadDefaultChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote method execution proof. @@ -330,7 +330,7 @@ pub mod tests { not_implemented_in_tests() } - fn remote_read_child(&self, _request: RemoteReadChildRequest
) -> Self::RemoteReadResult { + fn remote_read_child(&self, _request: RemoteReadDefaultChildRequest
) -> Self::RemoteReadResult { not_implemented_in_tests() } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index ae53559aa9f43..ea6dae7724713 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -74,17 +74,14 @@ impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { + Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children: children_map.into_iter().map(|(storage_key, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content").to_owned(); + children_default: children_map.into_iter().map(|(storage_key, child_content)| { + let child_info = ChildInfo::new_default(storage_key.0.as_slice()); ( storage_key.0, StorageChild { - data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), child_info, }, ) @@ -103,22 +100,13 @@ impl BuildStorage for ChainSpec { type GenesisStorage = HashMap; -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -struct ChildRawStorage { - data: GenesisStorage, - child_info: Vec, - child_type: u32, -} - #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] /// Storage content for genesis block. struct RawGenesis { pub top: GenesisStorage, - pub children: HashMap, + pub children_default: HashMap, } #[derive(Serialize, Deserialize)] @@ -285,22 +273,16 @@ impl ChainSpec { let top = storage.top.into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); - let children = storage.children.into_iter() - .map(|(sk, child)| { - let (info, ci_type) = child.child_info.info(); - ( - StorageKey(sk), - ChildRawStorage { - data: child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - child_info: info.to_vec(), - child_type: ci_type, - }, - )}) + let children_default = storage.children_default.into_iter() + .map(|(sk, child)| ( + StorageKey(sk), + child.data.into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + )) .collect(); - Genesis::Raw(RawGenesis { top, children }) + Genesis::Raw(RawGenesis { top, children_default }) }, (_, genesis) => genesis, }; diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index fce759590e531..ec8b975aa24fd 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -77,7 +77,7 @@ impl BenchmarkingState { }; state.reopen()?; - let child_delta = genesis.children.into_iter().map(|(_storage_key, child_content)| ( + let child_delta = genesis.children_default.into_iter().map(|(_storage_key, child_content)| ( child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 598b372b440c0..f108b1d737710 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -581,7 +581,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - let child_delta = storage.children.into_iter().map(|(_storage_key, child_content)|( + let child_delta = storage.children_default.into_iter().map(|(_storage_key, child_content)|( child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); @@ -1782,7 +1782,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children_default: Default::default(), }).unwrap(); op.set_block_data( header.clone(), @@ -1867,7 +1867,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children_default: Default::default(), }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index c0516d3ac7dfa..ca5e72aedf7a9 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -180,7 +180,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], - children: map![], + children_default: map![], }); assert_eq!(ext, expected); } @@ -214,7 +214,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { b"aab".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"5".to_vec() ], - children: map![], + children_default: map![], }); assert_eq!(expected, ext); } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index d672ed0b7f569..330daf590d7a7 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -23,7 +23,7 @@ use parking_lot::Mutex; use sp_blockchain::Error as ClientError; use sc_client_api::{ Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, - RemoteChangesRequest, RemoteReadChildRequest, RemoteBodyRequest, + RemoteChangesRequest, RemoteReadDefaultChildRequest, RemoteBodyRequest, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; @@ -103,10 +103,10 @@ impl Fetcher for OnDemand where fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadDefaultChildRequest ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteReadChild(request, sender)); + let _ = self.requests_send.unbounded_send(RequestData::RemoteReadDefaultChild(request, sender)); RemoteResponse { receiver } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 33fad6b4c5fe6..3622b96685649 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -245,7 +245,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { self.behaviour.send_packet(who, message.encode()) } - fn send_read_child_request( + fn send_read_default_child_request( &mut self, who: &PeerId, id: RequestId, @@ -253,12 +253,13 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { storage_key: Vec, keys: Vec>, ) { - let message: Message = message::generic::Message::RemoteReadChildRequest(message::RemoteReadChildRequest { - id, - block, - storage_key, - keys, - }); + let message: Message = message::generic::Message::RemoteReadDefaultChildRequest( + message::RemoteReadDefaultChildRequest { + id, + block, + storage_key, + keys, + }); self.behaviour.send_packet(who, message.encode()) } @@ -639,7 +640,7 @@ impl, H: ExHashT> Protocol { self.on_finality_proof_request(who, request), GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadChildRequest(request) => + GenericMessage::RemoteReadDefaultChildRequest(request) => self.on_remote_read_child_request(who, request), GenericMessage::Consensus(msg) => return if self.registered_notif_protocols.contains(&msg.engine_id) { @@ -1547,7 +1548,7 @@ impl, H: ExHashT> Protocol { fn on_remote_read_child_request( &mut self, who: PeerId, - request: message::RemoteReadChildRequest, + request: message::RemoteReadDefaultChildRequest, ) { if request.keys.is_empty() { debug!(target: "sync", "Invalid remote child read request sent by {}", who); diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 568e2caa8a1bd..1c49c20b0c4b0 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -169,8 +169,8 @@ pub enum Request { request: fetcher::RemoteReadRequest, sender: oneshot::Sender, Option>>, ClientError>> }, - ReadChild { - request: fetcher::RemoteReadChildRequest, + ReadDefaultChild { + request: fetcher::RemoteReadDefaultChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, Call { @@ -367,9 +367,9 @@ where let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } - Request::ReadChild { request, .. } => { + Request::ReadDefaultChild { request, .. } => { let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; + let reply = self.checker.check_read_default_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } _ => Err(Error::UnexpectedResponse) @@ -496,7 +496,7 @@ where ( &mut self , peer: &PeerId , request_id: u64 - , request: &api::v1::light::RemoteReadChildRequest + , request: &api::v1::light::RemoteReadDefaultChildRequest ) -> Result { if request.keys.is_empty() { @@ -692,7 +692,7 @@ where self.on_remote_read_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => self.on_remote_header_request(&peer, request.id, r), - Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => + Some(api::v1::light::request::Request::RemoteReadDefaultChildRequest(r)) => self.on_remote_read_child_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => self.on_remote_changes_request(&peer, request.id, r), @@ -888,7 +888,7 @@ fn required_block(request: &Request) -> NumberFor { match request { Request::Header { request, .. } => request.block, Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), + Request::ReadDefaultChild { request, .. } => *request.header.number(), Request::Call { request, .. } => *request.header.number(), Request::Changes { request, .. } => request.max_block.0, } @@ -898,7 +898,7 @@ fn retries(request: &Request) -> usize { let rc = match request { Request::Header { request, .. } => request.retry_count, Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, + Request::ReadDefaultChild { request, .. } => request.retry_count, Request::Call { request, .. } => request.retry_count, Request::Changes { request, .. } => request.retry_count, }; @@ -918,13 +918,13 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: }; api::v1::light::request::Request::RemoteReadRequest(r) } - Request::ReadChild { request, .. } => { - let r = api::v1::light::RemoteReadChildRequest { + Request::ReadDefaultChild { request, .. } => { + let r = api::v1::light::RemoteReadDefaultChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), keys: request.keys.clone(), }; - api::v1::light::request::Request::RemoteReadChildRequest(r) + api::v1::light::request::Request::RemoteReadDefaultChildRequest(r) } Request::Call { request, .. } => { let r = api::v1::light::RemoteCallRequest { @@ -965,7 +965,7 @@ fn send_reply(result: Result, ClientError>, request: Request< Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), } - Request::ReadChild { request, sender } => match result { + Request::ReadDefaultChild { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), @@ -1545,7 +1545,7 @@ mod tests { response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } } - Request::ReadChild{..} => { + Request::ReadDefaultChild{..} => { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; api::v1::light::Response { id: 1, @@ -1620,14 +1620,14 @@ mod tests { #[test] fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadChildRequest { + let request = fetcher::RemoteReadDefaultChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; - issue_request(Request::ReadChild { request, sender: chan.0 }); + issue_request(Request::ReadDefaultChild { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1720,16 +1720,16 @@ mod tests { #[test] fn send_receive_read_child() { let chan = oneshot::channel(); - let request = fetcher::RemoteReadChildRequest { + let request = fetcher::RemoteReadDefaultChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; - send_receive(Request::ReadChild { request, sender: chan.0 }); + send_receive(Request::ReadDefaultChild { request, sender: chan.0 }); assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_child_proof` + // ^--- from `DummyFetchChecker::check_read_default_child_proof` } #[test] diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 6654895971001..e2b4ff7874095 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -29,7 +29,7 @@ use linked_hash_map::{Entry, LinkedHashMap}; use sp_blockchain::Error as ClientError; use sc_client_api::{FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof, - RemoteReadChildRequest, RemoteBodyRequest, StorageProof}; + RemoteReadDefaultChildRequest, RemoteBodyRequest, StorageProof}; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use crate::config::Roles; @@ -64,7 +64,7 @@ pub trait LightDispatchNetwork { ); /// Send to `who` a child read request. - fn send_read_child_request( + fn send_read_default_child_request( &mut self, who: &PeerId, id: RequestId, @@ -147,8 +147,8 @@ pub(crate) enum RequestData { RemoteReadRequest, OneShotSender, Option>>, ClientError>>, ), - RemoteReadChild( - RemoteReadChildRequest, + RemoteReadDefaultChild( + RemoteReadDefaultChildRequest, OneShotSender, Option>>, ClientError>> ), RemoteCall(RemoteCallRequest, OneShotSender, ClientError>>), @@ -189,9 +189,9 @@ impl FetchChecker for AlwaysBadChecker { Err(ClientError::Msg("AlwaysBadChecker".into())) } - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - _request: &RemoteReadChildRequest, + _request: &RemoteReadDefaultChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { Err(ClientError::Msg("AlwaysBadChecker".into())) @@ -403,8 +403,8 @@ impl LightDispatch where RequestData::RemoteRead(request, sender) ), }}, - RequestData::RemoteReadChild(request, sender) => { - match checker.check_read_child_proof(&request, response.proof) { + RequestData::RemoteReadDefaultChild(request, sender) => { + match checker.check_read_default_child_proof(&request, response.proof) { Ok(response) => { // we do not bother if receiver has been dropped already let _ = sender.send(Ok(response)); @@ -412,7 +412,7 @@ impl LightDispatch where }, Err(error) => Accept::CheckFailed( error, - RequestData::RemoteReadChild(request, sender) + RequestData::RemoteReadDefaultChild(request, sender) ), }}, data => Accept::Unexpected(data), @@ -595,7 +595,7 @@ impl Request { match self.data { RequestData::RemoteHeader(ref data, _) => data.block, RequestData::RemoteRead(ref data, _) => *data.header.number(), - RequestData::RemoteReadChild(ref data, _) => *data.header.number(), + RequestData::RemoteReadDefaultChild(ref data, _) => *data.header.number(), RequestData::RemoteCall(ref data, _) => *data.header.number(), RequestData::RemoteChanges(ref data, _) => data.max_block.0, RequestData::RemoteBody(ref data, _) => *data.header.number(), @@ -617,8 +617,8 @@ impl Request { data.block, data.keys.clone(), ), - RequestData::RemoteReadChild(ref data, _) => - out.send_read_child_request( + RequestData::RemoteReadDefaultChild(ref data, _) => + out.send_read_default_child_request( peer, self.id, data.block, @@ -665,7 +665,7 @@ impl RequestData { RequestData::RemoteHeader(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteCall(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteRead(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteReadChild(_, sender) => { let _ = sender.send(Err(error)); }, + RequestData::RemoteReadDefaultChild(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteChanges(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteBody(_, sender) => { let _ = sender.send(Err(error)); }, } @@ -682,7 +682,7 @@ pub mod tests { use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, ChangesProof, RemoteCallRequest, RemoteReadRequest, - RemoteReadChildRequest, RemoteChangesRequest, RemoteBodyRequest}; + RemoteReadDefaultChildRequest, RemoteChangesRequest, RemoteBodyRequest}; use crate::config::Roles; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; @@ -729,9 +729,9 @@ pub mod tests { } } - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - request: &RemoteReadChildRequest, + request: &RemoteReadDefaultChildRequest, _: StorageProof, ) -> ClientResult, Option>>> { match self.ok { @@ -817,7 +817,7 @@ pub mod tests { } fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, + fn send_read_default_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, @@ -1040,13 +1040,14 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: b"sub".to_vec(), - keys: vec![b":key".to_vec()], - retry_count: None, - }, tx)); + light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadDefaultChild( + RemoteReadDefaultChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: b"sub".to_vec(), + keys: vec![b":key".to_vec()], + retry_count: None, + }, tx)); light_dispatch.on_remote_read_response(&mut network_interface, peer0.clone(), message::RemoteReadResponse { diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index be5a4f5acc871..ed9cd811006de 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -24,7 +24,7 @@ pub use self::generic::{ RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadChildRequest, + FromBlock, RemoteReadDefaultChildRequest, }; use sc_client_api::StorageProof; @@ -212,7 +212,7 @@ pub mod generic { /// Remote changes response. RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. - RemoteReadChildRequest(RemoteReadChildRequest), + RemoteReadDefaultChildRequest(RemoteReadDefaultChildRequest), /// Finality proof request. FinalityProofRequest(FinalityProofRequest), /// Finality proof response. @@ -242,7 +242,7 @@ pub mod generic { Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", Message::RemoteChangesRequest(_) => "RemoteChangesRequest", Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", + Message::RemoteReadDefaultChildRequest(_) => "RemoteReadDefaultChildRequest", Message::FinalityProofRequest(_) => "FinalityProofRequest", Message::FinalityProofResponse(_) => "FinalityProofResponse", Message::ConsensusBatch(_) => "ConsensusBatch", @@ -417,7 +417,7 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. - pub struct RemoteReadChildRequest { + pub struct RemoteReadDefaultChildRequest { /// Unique request id. pub id: RequestId, /// Block at which to perform call. diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index 930d229b0bf7c..1895f6275fe48 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -20,7 +20,7 @@ message Request { RemoteCallRequest remote_call_request = 2; RemoteReadRequest remote_read_request = 3; RemoteHeaderRequest remote_header_request = 4; - RemoteReadChildRequest remote_read_child_request = 5; + RemoteReadDefaultChildRequest remote_read_default_child_request = 5; RemoteChangesRequest remote_changes_request = 6; } } @@ -68,13 +68,13 @@ message RemoteReadResponse { } // Remote storage read child request. -message RemoteReadChildRequest { +message RemoteReadDefaultChildRequest { // Block at which to perform call. bytes block = 2; // Child Storage key. bytes storage_key = 3; // Storage keys. - repeated bytes keys = 4; + repeated bytes keys = 6; } // Remote header request. diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 48d363bb8921c..540eb67d5e7ea 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -73,8 +73,8 @@ pub trait StateApi { fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "state_getChildKeys")] - fn child_storage_keys( + #[rpc(name = "state_getDefaultChildKeys")] + fn default_child_storage_keys( &self, child_storage_key: StorageKey, prefix: StorageKey, @@ -82,8 +82,8 @@ pub trait StateApi { ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - #[rpc(name = "state_getChildStorage")] - fn child_storage( + #[rpc(name = "state_getDefaultChildStorage")] + fn default_child_storage( &self, child_storage_key: StorageKey, key: StorageKey, @@ -91,8 +91,8 @@ pub trait StateApi { ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageHash")] - fn child_storage_hash( + #[rpc(name = "state_getDefaultChildStorageHash")] + fn default_child_storage_hash( &self, child_storage_key: StorageKey, key: StorageKey, @@ -100,8 +100,8 @@ pub trait StateApi { ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageSize")] - fn child_storage_size( + #[rpc(name = "state_getDefaultChildStorageSize")] + fn default_child_storage_size( &self, child_storage_key: StorageKey, key: StorageKey, diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 856369164db13..1d0c322f9803f 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -103,8 +103,9 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - fn child_storage_keys( + /// Returns the keys with prefix from a defaultchild storage, + /// leave empty to get all the keys + fn default_child_storage_keys( &self, block: Option, storage_key: StorageKey, @@ -112,7 +113,7 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - fn child_storage( + fn default_child_storage( &self, block: Option, storage_key: StorageKey, @@ -120,7 +121,7 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - fn child_storage_hash( + fn default_child_storage_hash( &self, block: Option, storage_key: StorageKey, @@ -128,13 +129,13 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - fn child_storage_size( + fn default_child_storage_size( &self, block: Option, storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, key) + Box::new(self.default_child_storage(block, storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -292,40 +293,40 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn child_storage( + fn default_child_storage( &self, storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, storage_key, key) + self.backend.default_child_storage(block, storage_key, key) } - fn child_storage_keys( + fn default_child_storage_keys( &self, storage_key: StorageKey, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, storage_key, key_prefix) + self.backend.default_child_storage_keys(block, storage_key, key_prefix) } - fn child_storage_hash( + fn default_child_storage_hash( &self, storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, storage_key, key) + self.backend.default_child_storage_hash(block, storage_key, key) } - fn child_storage_size( + fn default_child_storage_size( &self, storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, storage_key, key) + self.backend.default_child_storage_size(block, storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index a949cee862845..ca237dbfa230f 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -305,7 +305,7 @@ impl StateBackend for FullState, storage_key: StorageKey, @@ -324,7 +324,7 @@ impl StateBackend for FullState, storage_key: StorageKey, @@ -343,7 +343,7 @@ impl StateBackend for FullState, storage_key: StorageKey, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index c65f86c9f2ba5..d9f56d9fb584f 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -45,7 +45,7 @@ use sc_client::{ BlockchainEvents, Client, CallExecutor, light::{ blockchain::{future_header, RemoteBlockchain}, - fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest}, + fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadDefaultChildRequest}, }, }; use sp_core::{ @@ -246,7 +246,7 @@ impl StateBackend for LightState, _storage_key: StorageKey, @@ -255,7 +255,7 @@ impl StateBackend for LightState, storage_key: StorageKey, @@ -265,7 +265,7 @@ impl StateBackend for LightState Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadDefaultChildRequest { block, header, storage_key: storage_key.0, @@ -285,14 +285,14 @@ impl StateBackend for LightState, storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, storage_key, key) + .default_child_storage(block, storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 22fd142347077..b579003e6c01c 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -65,7 +65,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, key, Some(genesis_hash).into()) + client.default_child_storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -87,7 +87,7 @@ fn should_return_child_storage() { assert_matches!( - client.child_storage( + client.default_child_storage( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -95,7 +95,7 @@ fn should_return_child_storage() { Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.child_storage_hash( + client.default_child_storage_hash( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -103,7 +103,7 @@ fn should_return_child_storage() { Ok(true) ); assert_matches!( - client.child_storage_size( + client.default_child_storage_size( child_key.clone(), key.clone(), None, diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index 3986c70116c01..991cc9fb74d6f 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -515,7 +515,7 @@ impl backend::BlockImportOperation for BlockImportOperatio fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let child_delta = storage.children.into_iter() + let child_delta = storage.children_default.into_iter() .map(|(_storage_key, child_content)| (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); @@ -724,8 +724,9 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { return Err(sp_blockchain::Error::GenesisInvalid.into()); } - if storage.children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + if storage.children_default.keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { + return Err(sp_blockchain::Error::GenesisInvalid.into()); } Ok(()) diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 4fba83b882c68..067feb316c8ea 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -316,12 +316,12 @@ impl BlockImportOperation for ImportOperation storage.insert(None, input.top); // create a list of children keys to re-compute roots for - let child_delta = input.children.iter() + let child_delta = input.children_default.iter() .map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None)) .collect::>(); // make sure to persist the child storage - for (_child_key, storage_child) in input.children { + for (_child_key, storage_child) in input.children_default { storage.insert(Some(storage_child.child_info), storage_child.data); } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index f37c06bea247d..4aafbfc630fe3 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -39,7 +39,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use crate::cht; pub use sc_client_api::{ light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, + RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadDefaultChildRequest, RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, Storage as BlockchainStorage, }, @@ -236,9 +236,9 @@ impl FetchChecker for LightDataChecker ).map_err(Into::into) } - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - request: &RemoteReadChildRequest, + request: &RemoteReadDefaultChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { let child_trie = ChildInfo::new_default(&request.storage_key); @@ -502,8 +502,8 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ + assert_eq!((&local_checker as &dyn FetchChecker).check_read_default_child_proof( + &RemoteReadDefaultChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b"child1".to_vec(), diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 6fa2806dd3483..9fdd695b86a3e 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -303,7 +303,7 @@ fn new_test_ext() -> sp_io::TestExternalities { fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), - children: std::collections::HashMap::new() + children_default: std::collections::HashMap::new() }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 57be7b157cb33..db1dd51e10e0d 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -815,7 +815,7 @@ impl Module { >::hashed_key().to_vec() => T::BlockNumber::one().encode(), >::hashed_key().to_vec() => [69u8; 32].encode() ], - children: map![], + children_default: map![], }) } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 68f15aaee6958..d7bdc3ee27d57 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -68,6 +68,14 @@ pub enum EcdsaVerifyError { BadSignature, } +/// Deprecated function, ensure that this is a default prefixed key. +#[cfg(feature = "std")] +fn child_storage_key_or_panic(storage_key: &[u8]) { + if !storage_key.starts_with(&ChildInfo::new_default(&[]).prefixed_storage_key()[..]) { + panic!("child storage key is invalid") + } +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -136,6 +144,146 @@ pub trait Storage { self.next_storage_key(&key) } + + /// Deprecated, please use dedicated runtime apis. + fn child_get( + &self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(&child_info, key).map(|s| s.to_vec()) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_read( + &self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value_out: &mut [u8], + value_offset: u32, + ) -> Option { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(&child_info, key) + .map(|value| { + let value_offset = value_offset as usize; + let data = &value[value_offset.min(value.len())..]; + let written = std::cmp::min(data.len(), value_out.len()); + value_out[..written].copy_from_slice(&data[..written]); + value.len() as u32 + }) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_set( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value: &[u8], + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_clear( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_storage(&child_info, key); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_storage_kill( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.kill_child_storage(&child_info); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_exists( + &self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> bool { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.exists_child_storage(&child_info, key) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_clear_prefix( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + prefix: &[u8], + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_prefix(&child_info, prefix); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_root( + &mut self, + storage_key: &[u8], + ) -> Vec { + child_storage_key_or_panic(storage_key); + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_next_key( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.next_child_storage_key(&child_info, key) + } + } @@ -911,7 +1059,7 @@ mod tests { t = BasicExternalities::new(Storage { top: map![b"foo".to_vec() => b"bar".to_vec()], - children: map![], + children_default: map![], }); t.execute_with(|| { @@ -924,7 +1072,7 @@ mod tests { fn read_storage_works() { let mut t = BasicExternalities::new(Storage { top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], - children: map![], + children_default: map![], }); t.execute_with(|| { @@ -946,7 +1094,7 @@ mod tests { b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], - children: map![], + children_default: map![], }); t.execute_with(|| { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 60f5da9cb389b..0409cb085256a 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -132,15 +132,15 @@ impl BuildStorage for sp_core::storage::Storage { storage: &mut sp_core::storage::Storage, )-> Result<(), String> { storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); - for (k, other_map) in self.children.iter() { + for (k, other_map) in self.children_default.iter() { let k = k.clone(); - if let Some(map) = storage.children.get_mut(&k) { + if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { - storage.children.insert(k, other_map.clone()); + storage.children_default.insert(k, other_map.clone()); } } Ok(()) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 3dbc2c1e0bb4e..8c34f0e041ff4 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -64,7 +64,7 @@ impl BasicExternalities { ) -> R { let mut ext = Self { inner: Storage { top: std::mem::replace(&mut storage.top, Default::default()), - children: std::mem::replace(&mut storage.children, Default::default()), + children_default: std::mem::replace(&mut storage.children_default, Default::default()), }}; let r = ext.execute_with(f); @@ -85,7 +85,7 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { self.inner.top.eq(&other.inner.top) - && self.inner.children.eq(&other.inner.children) + && self.inner.children_default.eq(&other.inner.children_default) } } @@ -105,7 +105,7 @@ impl From> for BasicExternalities { fn from(hashmap: BTreeMap) -> Self { BasicExternalities { inner: Storage { top: hashmap, - children: Default::default(), + children_default: Default::default(), }} } } @@ -132,7 +132,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, key: &[u8], ) -> Option { - self.inner.children.get(child_info.storage_key()) + self.inner.children_default.get(child_info.storage_key()) .and_then(|child| child.data.get(key)).cloned() } @@ -171,7 +171,7 @@ impl Externalities for BasicExternalities { key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(child_info.storage_key()) + self.inner.children_default.get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } @@ -193,7 +193,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self.inner.children.entry(child_info.storage_key().to_vec()) + let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -209,7 +209,7 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, ) { - self.inner.children.remove(child_info.storage_key()); + self.inner.children_default.remove(child_info.storage_key()); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -237,7 +237,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, prefix: &[u8], ) { - if let Some(child) = self.inner.children.get_mut(child_info.storage_key()) { + if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) @@ -254,14 +254,14 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.iter().map(|(_k, v)| { + let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| { (v.child_info.prefixed_storage_key(), v.child_info.clone()) }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = empty_child_trie_root::>(); - for (prefixed_storage_key, child_info) in keys { + for (prefixed_storage_key, child_info) in prefixed_keys { let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); @@ -277,7 +277,7 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, ) -> Vec { - if let Some(child) = self.inner.children.get(child_info.storage_key()) { + if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() @@ -334,7 +334,7 @@ mod tests { let child_info = &child_info; let mut ext = BasicExternalities::new(Storage { top: Default::default(), - children: map![ + children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: child_info.to_owned(), @@ -359,6 +359,6 @@ mod tests { // Make sure no values are set by default in `BasicExternalities`. let storage = BasicExternalities::new(Default::default()).into_storages(); assert!(storage.top.is_empty()); - assert!(storage.children.is_empty()); + assert!(storage.children_default.is_empty()); } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 53bf2c585a7f3..c206090fa4e18 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -107,8 +107,8 @@ fn prepare_extrinsics_input<'a, B, H, Number>( let mut children_prefixed_keys = BTreeSet::::new(); let mut children_result = BTreeMap::new(); - for (_storage_key, (_map, child_info)) in changes.prospective.children.iter() - .chain(changes.committed.children.iter()) { + for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter() + .chain(changes.committed.children_default.iter()) { children_prefixed_keys.insert(child_info.prefixed_storage_key()); } for storage_key in children_prefixed_keys { @@ -140,8 +140,8 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { let child_info = changes.default_child_info(sk).cloned(); ( - changes.committed.children.get(sk).map(|c| &c.0), - changes.prospective.children.get(sk).map(|c| &c.0), + changes.committed.children_default.get(sk).map(|c| &c.0), + changes.prospective.children_default.get(sk).map(|c| &c.0), child_info, ) } else { @@ -429,7 +429,7 @@ mod test { extrinsics: Some(vec![0, 1].into_iter().collect()) }), ].into_iter().collect(), - children: vec![ + children_default: vec![ (child_trie_key1.clone(), (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), @@ -458,7 +458,7 @@ mod test { extrinsics: Some(vec![1].into_iter().collect()) }), ].into_iter().collect(), - children: vec![ + children_default: vec![ (child_trie_key1, (vec![ (vec![100], OverlayedValue { value: Some(vec![202]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 2c1c3bd01cf51..77ae9a0820fb7 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -502,11 +502,11 @@ where if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { let (root, is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) + let delta = self.overlay.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) .chain( - self.overlay.prospective.children.get(storage_key) + self.overlay.prospective.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); @@ -708,7 +708,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - children: map![] + children_default: map![] }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -744,7 +744,7 @@ mod tests { overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], - children: map![ + children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], @@ -789,7 +789,7 @@ mod tests { overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], - children: map![ + children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 5a7f2ced5952a..6c8aecf775d8a 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -122,7 +122,7 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); + = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { inner, diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index a361884fe786e..b9e25fc547013 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -77,7 +77,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, ChildInfo)>, + pub children_default: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -171,7 +171,7 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { fn from_iter>(iter: T) -> Self { Self { top: iter.into_iter().collect(), - children: Default::default(), + children_default: Default::default(), } } } @@ -179,13 +179,13 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { impl OverlayedChangeSet { /// Whether the change set is empty. pub fn is_empty(&self) -> bool { - self.top.is_empty() && self.children.is_empty() + self.top.is_empty() && self.children_default.is_empty() } /// Clear the change set. pub fn clear(&mut self) { self.top.clear(); - self.children.clear(); + self.children_default.clear(); } } @@ -213,13 +213,13 @@ impl OverlayedChanges { /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children.get(storage_key) { + if let Some(map) = self.prospective.children_default.get(storage_key) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } } - if let Some(map) = self.committed.children.get(storage_key) { + if let Some(map) = self.committed.children_default.get(storage_key) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } @@ -253,7 +253,7 @@ impl OverlayedChanges { ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); - let map_entry = self.prospective.children.entry(storage_key) + let map_entry = self.prospective.children_default.entry(storage_key) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -279,7 +279,7 @@ impl OverlayedChanges { ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) + let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -293,7 +293,7 @@ impl OverlayedChanges { e.value = None; }); - if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) { + if let Some((committed_map, _child_info)) = self.committed.children_default.get(storage_key) { for (key, value) in committed_map.iter() { if !map_entry.0.contains_key(key) { map_entry.0.insert(key.clone(), OverlayedValue { @@ -354,7 +354,7 @@ impl OverlayedChanges { ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) + let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -370,7 +370,7 @@ impl OverlayedChanges { } } - if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) { + if let Some((child_committed, _child_info)) = self.committed.children_default.get(storage_key) { // Then do the same with keys from committed changes. // NOTE that we are making changes in the prospective change set. for key in child_committed.keys() { @@ -407,8 +407,8 @@ impl OverlayedChanges { .extend(prospective_extrinsics); } } - for (storage_key, (map, child_info)) in self.prospective.children.drain() { - let child_content = self.committed.children.entry(storage_key) + for (storage_key, (map, child_info)) in self.prospective.children_default.drain() { + let child_content = self.committed.children_default.entry(storage_key) .or_insert_with(|| (Default::default(), child_info)); // No update to child info at this point (will be needed for deletion). for (key, val) in map.into_iter() { @@ -437,7 +437,7 @@ impl OverlayedChanges { std::mem::replace(&mut self.committed.top, Default::default()) .into_iter() .map(|(k, v)| (k, v.value)), - std::mem::replace(&mut self.committed.children, Default::default()) + std::mem::replace(&mut self.committed.children_default, Default::default()) .into_iter() .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), ) @@ -534,17 +534,17 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { - let child_storage_keys = self.prospective.children.keys() - .chain(self.committed.children.keys()); + let child_storage_keys = self.prospective.children_default.keys() + .chain(self.committed.children_default.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( self.default_child_info(storage_key).cloned() .expect("child info initialized in either committed or prospective"), - self.committed.children.get(storage_key) + self.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) .chain( - self.prospective.children.get(storage_key) + self.prospective.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), @@ -595,10 +595,10 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { - if let Some((_, ci)) = self.prospective.children.get(storage_key) { + if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { return Some(&ci); } - if let Some((_, ci)) = self.committed.children.get(storage_key) { + if let Some((_, ci)) = self.committed.children_default.get(storage_key) { return Some(&ci); } None @@ -638,10 +638,10 @@ impl OverlayedChanges { ) -> Option<(&[u8], &OverlayedValue)> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_prospective_key = self.prospective.children.get(storage_key) + let next_prospective_key = self.prospective.children_default.get(storage_key) .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - let next_committed_key = self.committed.children.get(storage_key) + let next_committed_key = self.committed.children_default.get(storage_key) .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); match (next_committed_key, next_prospective_key) { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 6ff6d42aba3f8..9cf773f79b906 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -89,7 +89,7 @@ impl TestExternalities overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children.keys().all(|key| is_child_storage_key(key))); + assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); @@ -126,8 +126,8 @@ impl TestExternalities .map(|(k, v)| (k, v.value)).collect(); let mut transaction = vec![(None, top)]; - self.overlay.committed.children.clone().into_iter() - .chain(self.overlay.prospective.children.clone().into_iter()) + self.overlay.committed.children_default.clone().into_iter() + .chain(self.overlay.prospective.children_default.clone().into_iter()) .for_each(|(_storage_key, (map, child_info))| { transaction.push(( Some(child_info), diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 8034bb2acccd5..924fd1e67849a 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -66,7 +66,7 @@ pub struct Storage { /// The key does not including prefix, for the `default` /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. - pub children: std::collections::HashMap, StorageChild>, + pub children_default: std::collections::HashMap, StorageChild>, } /// Storage change set @@ -134,7 +134,7 @@ pub mod well_known_keys { #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum ChildInfo { /// This is the one used by default. - ParentKeyId(ChildTrie), + ParentKeyId(ChildTrieParentKeyId), } impl ChildInfo { @@ -143,12 +143,12 @@ impl ChildInfo { /// storage key. pub fn new_default(storage_key: &[u8]) -> Self { let data = storage_key.to_vec(); - ChildInfo::ParentKeyId(ChildTrie { data }) + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) } /// Same as `new_default` but with `Vec` as input. pub fn new_default_from_vec(storage_key: Vec) -> Self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data: storage_key, }) } @@ -178,7 +178,7 @@ impl ChildInfo { /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => (data, ChildType::ParentKeyId as u32), } @@ -187,7 +187,7 @@ impl ChildInfo { /// Owned variant of `info`. pub fn into_info(self) -> (Vec, u32) { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => (data, ChildType::ParentKeyId as u32), } @@ -207,7 +207,7 @@ impl ChildInfo { /// child trie. pub fn storage_key(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => &data[..], } @@ -217,7 +217,7 @@ impl ChildInfo { /// this trie. pub fn prefixed_storage_key(&self) -> Vec { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), } @@ -227,7 +227,7 @@ impl ChildInfo { /// this trie. pub fn into_prefixed_storage_key(self) -> Vec { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { mut data, }) => { ChildType::ParentKeyId.do_prefix_key(&mut data); @@ -303,12 +303,12 @@ impl ChildType { /// that will be use only once. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub struct ChildTrie { +pub struct ChildTrieParentKeyId { /// Data is the full prefixed storage key. data: Vec, } -impl ChildTrie { +impl ChildTrieParentKeyId { /// Try to update with another instance, return false if both instance /// are not compatible. fn try_update(&mut self, other: &ChildInfo) -> bool { diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 0fb8e6371f29f..646238726d859 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -189,7 +189,7 @@ impl TestClientBuilder::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); @@ -203,7 +203,7 @@ pub trait TestClientBuilderExt: Sized { let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.children + self.genesis_init_mut().extra_storage.children_default .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), @@ -311,7 +311,10 @@ impl Fetcher for LightFetcher { unimplemented!() } - fn remote_read_child(&self, _: RemoteReadChildRequest) -> Self::RemoteReadResult { + fn remote_read_child( + &self, + _: RemoteReadDefaultChildRequest, + ) -> Self::RemoteReadResult { unimplemented!() } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 25d9a807ccee1..b9de3ab3f4cb0 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -73,7 +73,7 @@ impl GenesisConfig { map.extend(self.extra_storage.top.clone().into_iter()); // Assimilate the system genesis config. - let mut storage = Storage { top: map, children: self.extra_storage.children.clone()}; + let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()}; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); @@ -85,7 +85,7 @@ impl GenesisConfig { pub fn insert_genesis_block( storage: &mut Storage, ) -> sp_core::hash::H256 { - let child_roots = storage.children.iter().map(|(sk, child_content)| { + let child_roots = storage.children_default.iter().map(|(sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect(), ); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index d0a38c7c77882..20f980cf97273 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -368,7 +368,7 @@ mod tests { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], - children: map![], + children_default: map![], }, ) } From 8fcc11206648a93881cdd9d69b0e34b2a55fb8cb Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 12:06:32 +0100 Subject: [PATCH 057/185] Switch back to using child_type for rpc (and light client). --- client/api/src/light.rs | 12 ++++--- client/network/src/on_demand_layer.rs | 4 +-- client/network/src/protocol.rs | 12 ++++--- .../src/protocol/light_client_handler.rs | 21 +++++++----- client/network/src/protocol/light_dispatch.rs | 29 +++++++++------- client/network/src/protocol/message.rs | 10 +++--- .../src/protocol/schema/light.v1.proto | 10 ++++-- client/rpc-api/src/state/mod.rs | 20 ++++++----- client/rpc/src/state/mod.rs | 34 ++++++++++++------- client/rpc/src/state/state_full.rs | 27 +++++++++++---- client/rpc/src/state/state_light.rs | 17 ++++++---- client/rpc/src/state/tests.rs | 11 +++--- client/src/light/fetcher.rs | 20 ++++++----- primitives/storage/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 4 +-- 15 files changed, 142 insertions(+), 91 deletions(-) diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 61f56628d5866..67376947d3913 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -75,13 +75,15 @@ pub struct RemoteReadRequest { /// Remote storage read child request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadDefaultChildRequest { +pub struct RemoteReadChildRequest { /// Read at state of given block. pub block: Header::Hash, /// Header of block at which read is performed. pub header: Header, /// Storage key for child. pub storage_key: Vec, + /// Child type. + pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. @@ -175,7 +177,7 @@ pub trait Fetcher: Send + Sync { /// Fetch remote storage child value. fn remote_read_child( &self, - request: RemoteReadDefaultChildRequest + request: RemoteReadChildRequest ) -> Self::RemoteReadResult; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; @@ -205,9 +207,9 @@ pub trait FetchChecker: Send + Sync { remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote storage read proof. - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - request: &RemoteReadDefaultChildRequest, + request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote method execution proof. @@ -330,7 +332,7 @@ pub mod tests { not_implemented_in_tests() } - fn remote_read_child(&self, _request: RemoteReadDefaultChildRequest
) -> Self::RemoteReadResult { + fn remote_read_child(&self, _request: RemoteReadChildRequest
) -> Self::RemoteReadResult { not_implemented_in_tests() } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 330daf590d7a7..3a20cb9548a76 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -23,7 +23,7 @@ use parking_lot::Mutex; use sp_blockchain::Error as ClientError; use sc_client_api::{ Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, - RemoteChangesRequest, RemoteReadDefaultChildRequest, RemoteBodyRequest, + RemoteChangesRequest, RemoteReadChildRequest, RemoteBodyRequest, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; @@ -103,7 +103,7 @@ impl Fetcher for OnDemand where fn remote_read_child( &self, - request: RemoteReadDefaultChildRequest + request: RemoteReadChildRequest ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); let _ = self.requests_send.unbounded_send(RequestData::RemoteReadDefaultChild(request, sender)); diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 3622b96685649..b3514a9dc7670 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -245,19 +245,21 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { self.behaviour.send_packet(who, message.encode()) } - fn send_read_default_child_request( + fn send_read_child_request( &mut self, who: &PeerId, id: RequestId, block: ::Hash, storage_key: Vec, + child_type: u32, keys: Vec>, ) { - let message: Message = message::generic::Message::RemoteReadDefaultChildRequest( - message::RemoteReadDefaultChildRequest { + let message: Message = message::generic::Message::RemoteReadChildRequest( + message::RemoteReadChildRequest { id, block, storage_key, + child_type, keys, }); @@ -640,7 +642,7 @@ impl, H: ExHashT> Protocol { self.on_finality_proof_request(who, request), GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadDefaultChildRequest(request) => + GenericMessage::RemoteReadChildRequest(request) => self.on_remote_read_child_request(who, request), GenericMessage::Consensus(msg) => return if self.registered_notif_protocols.contains(&msg.engine_id) { @@ -1548,7 +1550,7 @@ impl, H: ExHashT> Protocol { fn on_remote_read_child_request( &mut self, who: PeerId, - request: message::RemoteReadDefaultChildRequest, + request: message::RemoteReadChildRequest, ) { if request.keys.is_empty() { debug!(target: "sync", "Invalid remote child read request sent by {}", who); diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 1c49c20b0c4b0..30c9e0d9597b9 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -170,7 +170,7 @@ pub enum Request { sender: oneshot::Sender, Option>>, ClientError>> }, ReadDefaultChild { - request: fetcher::RemoteReadDefaultChildRequest, + request: fetcher::RemoteReadChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, Call { @@ -369,7 +369,7 @@ where } Request::ReadDefaultChild { request, .. } => { let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_default_child_proof(&request, proof)?; + let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } _ => Err(Error::UnexpectedResponse) @@ -496,7 +496,7 @@ where ( &mut self , peer: &PeerId , request_id: u64 - , request: &api::v1::light::RemoteReadDefaultChildRequest + , request: &api::v1::light::RemoteReadChildRequest ) -> Result { if request.keys.is_empty() { @@ -692,7 +692,7 @@ where self.on_remote_read_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => self.on_remote_header_request(&peer, request.id, r), - Some(api::v1::light::request::Request::RemoteReadDefaultChildRequest(r)) => + Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => self.on_remote_changes_request(&peer, request.id, r), @@ -919,12 +919,13 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: api::v1::light::request::Request::RemoteReadRequest(r) } Request::ReadDefaultChild { request, .. } => { - let r = api::v1::light::RemoteReadDefaultChildRequest { + let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), + child_type: request.child_type, keys: request.keys.clone(), }; - api::v1::light::request::Request::RemoteReadDefaultChildRequest(r) + api::v1::light::request::Request::RemoteReadChildRequest(r) } Request::Call { request, .. } => { let r = api::v1::light::RemoteCallRequest { @@ -1620,10 +1621,11 @@ mod tests { #[test] fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadDefaultChildRequest { + let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), + child_type: 1, keys: vec![b":key".to_vec()], retry_count: None, }; @@ -1720,16 +1722,17 @@ mod tests { #[test] fn send_receive_read_child() { let chan = oneshot::channel(); - let request = fetcher::RemoteReadDefaultChildRequest { + let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), + child_type: 1, keys: vec![b":key".to_vec()], retry_count: None, }; send_receive(Request::ReadDefaultChild { request, sender: chan.0 }); assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_default_child_proof` + // ^--- from `DummyFetchChecker::check_read_child_proof` } #[test] diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index e2b4ff7874095..15b5b42fd3ba2 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -29,7 +29,7 @@ use linked_hash_map::{Entry, LinkedHashMap}; use sp_blockchain::Error as ClientError; use sc_client_api::{FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof, - RemoteReadDefaultChildRequest, RemoteBodyRequest, StorageProof}; + RemoteReadChildRequest, RemoteBodyRequest, StorageProof}; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use crate::config::Roles; @@ -64,12 +64,13 @@ pub trait LightDispatchNetwork { ); /// Send to `who` a child read request. - fn send_read_default_child_request( + fn send_read_child_request( &mut self, who: &PeerId, id: RequestId, block: ::Hash, storage_key: Vec, + child_type: u32, keys: Vec>, ); @@ -148,7 +149,7 @@ pub(crate) enum RequestData { OneShotSender, Option>>, ClientError>>, ), RemoteReadDefaultChild( - RemoteReadDefaultChildRequest, + RemoteReadChildRequest, OneShotSender, Option>>, ClientError>> ), RemoteCall(RemoteCallRequest, OneShotSender, ClientError>>), @@ -189,9 +190,9 @@ impl FetchChecker for AlwaysBadChecker { Err(ClientError::Msg("AlwaysBadChecker".into())) } - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - _request: &RemoteReadDefaultChildRequest, + _request: &RemoteReadChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { Err(ClientError::Msg("AlwaysBadChecker".into())) @@ -404,7 +405,7 @@ impl LightDispatch where ), }}, RequestData::RemoteReadDefaultChild(request, sender) => { - match checker.check_read_default_child_proof(&request, response.proof) { + match checker.check_read_child_proof(&request, response.proof) { Ok(response) => { // we do not bother if receiver has been dropped already let _ = sender.send(Ok(response)); @@ -618,11 +619,12 @@ impl Request { data.keys.clone(), ), RequestData::RemoteReadDefaultChild(ref data, _) => - out.send_read_default_child_request( + out.send_read_child_request( peer, self.id, data.block, data.storage_key.clone(), + data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -682,7 +684,7 @@ pub mod tests { use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, ChangesProof, RemoteCallRequest, RemoteReadRequest, - RemoteReadDefaultChildRequest, RemoteChangesRequest, RemoteBodyRequest}; + RemoteReadChildRequest, RemoteChangesRequest, RemoteBodyRequest}; use crate::config::Roles; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; @@ -729,9 +731,9 @@ pub mod tests { } } - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - request: &RemoteReadDefaultChildRequest, + request: &RemoteReadChildRequest, _: StorageProof, ) -> ClientResult, Option>>> { match self.ok { @@ -817,8 +819,8 @@ pub mod tests { } fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_default_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: Vec>) {} + fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, + _: u32, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1041,10 +1043,11 @@ pub mod tests { let (tx, response) = oneshot::channel(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadDefaultChild( - RemoteReadDefaultChildRequest { + RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), + child_type: 1, keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ed9cd811006de..d44e13b06eab1 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -24,7 +24,7 @@ pub use self::generic::{ RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadDefaultChildRequest, + FromBlock, RemoteReadChildRequest, }; use sc_client_api::StorageProof; @@ -212,7 +212,7 @@ pub mod generic { /// Remote changes response. RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. - RemoteReadDefaultChildRequest(RemoteReadDefaultChildRequest), + RemoteReadChildRequest(RemoteReadChildRequest), /// Finality proof request. FinalityProofRequest(FinalityProofRequest), /// Finality proof response. @@ -242,7 +242,7 @@ pub mod generic { Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", Message::RemoteChangesRequest(_) => "RemoteChangesRequest", Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadDefaultChildRequest(_) => "RemoteReadDefaultChildRequest", + Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", Message::FinalityProofRequest(_) => "FinalityProofRequest", Message::FinalityProofResponse(_) => "FinalityProofResponse", Message::ConsensusBatch(_) => "ConsensusBatch", @@ -417,13 +417,15 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. - pub struct RemoteReadDefaultChildRequest { + pub struct RemoteReadChildRequest { /// Unique request id. pub id: RequestId, /// Block at which to perform call. pub block: H, /// Child Storage key. pub storage_key: Vec, + /// Child type. + pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index 1895f6275fe48..c4aff40c9626d 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -20,7 +20,7 @@ message Request { RemoteCallRequest remote_call_request = 2; RemoteReadRequest remote_read_request = 3; RemoteHeaderRequest remote_header_request = 4; - RemoteReadDefaultChildRequest remote_read_default_child_request = 5; + RemoteReadChildRequest remote_read_child_request = 5; RemoteChangesRequest remote_changes_request = 6; } } @@ -68,11 +68,15 @@ message RemoteReadResponse { } // Remote storage read child request. -message RemoteReadDefaultChildRequest { +message RemoteReadChildRequest { // Block at which to perform call. bytes block = 2; - // Child Storage key. + // Child Storage key, this is relative + // to the child type storage location. bytes storage_key = 3; + /// Child type, its required to resolve + /// child storage final location. + uint32 child_type = 5; // Storage keys. repeated bytes keys = 6; } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 540eb67d5e7ea..41690134009b8 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -73,37 +73,41 @@ pub trait StateApi { fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "state_getDefaultChildKeys")] - fn default_child_storage_keys( + #[rpc(name = "state_getChildKeys")] + fn child_storage_keys( &self, child_storage_key: StorageKey, + child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - #[rpc(name = "state_getDefaultChildStorage")] - fn default_child_storage( + #[rpc(name = "state_getChildStorage")] + fn child_storage( &self, child_storage_key: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "state_getDefaultChildStorageHash")] - fn default_child_storage_hash( + #[rpc(name = "state_getChildStorageHash")] + fn child_storage_hash( &self, child_storage_key: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getDefaultChildStorageSize")] - fn default_child_storage_size( + #[rpc(name = "state_getDefaultStorageSize")] + fn child_storage_size( &self, child_storage_key: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 1d0c322f9803f..a25828a869b00 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -105,37 +105,41 @@ pub trait StateBackend: Send + Sync + 'static /// Returns the keys with prefix from a defaultchild storage, /// leave empty to get all the keys - fn default_child_storage_keys( + fn child_storage_keys( &self, block: Option, storage_key: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - fn default_child_storage( + fn child_storage( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - fn default_child_storage_hash( + fn child_storage_hash( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - fn default_child_storage_size( + fn child_storage_size( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.default_child_storage(block, storage_key, key) + Box::new(self.child_storage(block, storage_key, child_type, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -293,40 +297,44 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn default_child_storage( + fn child_storage( &self, storage_key: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage(block, storage_key, key) + self.backend.child_storage(block, storage_key, child_type, key) } - fn default_child_storage_keys( + fn child_storage_keys( &self, storage_key: StorageKey, + child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage_keys(block, storage_key, key_prefix) + self.backend.child_storage_keys(block, storage_key, child_type, key_prefix) } - fn default_child_storage_hash( + fn child_storage_hash( &self, storage_key: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage_hash(block, storage_key, key) + self.backend.child_storage_hash(block, storage_key, child_type, key) } - fn default_child_storage_size( + fn child_storage_size( &self, storage_key: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage_size(block, storage_key, key) + self.backend.child_storage_size(block, storage_key, child_type, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index ca237dbfa230f..508ff8c74417d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,8 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, + ChildInfo, ChildType}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -305,16 +306,20 @@ impl StateBackend for FullState, storage_key: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = ChildInfo::new_default_from_vec(storage_key.0); + let child_info = match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + None => return Err("Invalid child type".into()), + }; self.client.child_storage_keys( &BlockId::Hash(block), &child_info, @@ -324,16 +329,20 @@ impl StateBackend for FullState, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = ChildInfo::new_default_from_vec(storage_key.0); + let child_info = match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + None => return Err("Invalid child type".into()), + }; self.client.child_storage( &BlockId::Hash(block), &child_info, @@ -343,16 +352,20 @@ impl StateBackend for FullState, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = ChildInfo::new_default_from_vec(storage_key.0); + let child_info = match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + None => return Err("Invalid child type".into()), + }; self.client.child_storage_hash( &BlockId::Hash(block), &child_info, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index d9f56d9fb584f..80d43f8ccee82 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -45,7 +45,7 @@ use sc_client::{ BlockchainEvents, Client, CallExecutor, light::{ blockchain::{future_header, RemoteBlockchain}, - fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadDefaultChildRequest}, + fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest}, }, }; use sp_core::{ @@ -246,29 +246,32 @@ impl StateBackend for LightState, _storage_key: StorageKey, + _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) } - fn default_child_storage( + fn child_storage( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); let fetcher = self.fetcher.clone(); let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadDefaultChildRequest { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { block, header, storage_key: storage_key.0, + child_type, keys: vec![key.0.clone()], retry_count: Default::default(), }).then(move |result| ready(result @@ -285,14 +288,14 @@ impl StateBackend for LightState, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self - .default_child_storage(block, storage_key, key) + Box::new(self.child_storage(block, storage_key, child_type, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index b579003e6c01c..417912e4b9585 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -65,7 +65,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.default_child_storage(storage_key, key, Some(genesis_hash).into()) + client.child_storage(storage_key, 1, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -87,24 +87,27 @@ fn should_return_child_storage() { assert_matches!( - client.default_child_storage( + client.child_storage( child_key.clone(), + 1, key.clone(), Some(genesis_hash).into(), ).wait(), Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.default_child_storage_hash( + client.child_storage_hash( child_key.clone(), + 1, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), Ok(true) ); assert_matches!( - client.default_child_storage_size( + client.child_storage_size( child_key.clone(), + 1, key.clone(), None, ).wait(), diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 4aafbfc630fe3..7a7ef6e0a91df 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,7 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildType}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -39,7 +39,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use crate::cht; pub use sc_client_api::{ light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadDefaultChildRequest, + RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, Storage as BlockchainStorage, }, @@ -236,16 +236,19 @@ impl FetchChecker for LightDataChecker ).map_err(Into::into) } - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - request: &RemoteReadDefaultChildRequest, + request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_trie = ChildInfo::new_default(&request.storage_key); + let child_info = match ChildType::new(request.child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default(&request.storage_key[..]), + None => return Err("Invalid child type".into()), + }; read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - &child_trie, + &child_info, request.keys.iter(), ).map_err(Into::into) } @@ -502,11 +505,12 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_default_child_proof( - &RemoteReadDefaultChildRequest::
{ + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b"child1".to_vec(), + child_type: 1, keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 924fd1e67849a..b89a4c43450c5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -258,7 +258,7 @@ pub enum ChildType { impl ChildType { /// Try to get a child type from its `u32` representation. - fn new(repr: u32) -> Option { + pub fn new(repr: u32) -> Option { Some(match repr { r if r == ChildType::ParentKeyId as u32 => ChildType::ParentKeyId, _ => return None, diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index a16a3596cadef..7685157d96a13 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -37,7 +37,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Numb use sc_client::{ light::fetcher::{ Fetcher, - RemoteHeaderRequest, RemoteReadRequest, RemoteReadDefaultChildRequest, + RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, RemoteCallRequest, RemoteChangesRequest, RemoteBodyRequest, }, }; @@ -313,7 +313,7 @@ impl Fetcher for LightFetcher { fn remote_read_child( &self, - _: RemoteReadDefaultChildRequest, + _: RemoteReadChildRequest, ) -> Self::RemoteReadResult { unimplemented!() } From 74aa3f8a34a72f9895ecd4e8cc559f2fdc762322 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 26 Feb 2020 15:45:49 +0100 Subject: [PATCH 058/185] bump runtime version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 730a983a43818..aaffe520e8013 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -81,7 +81,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 226, + spec_version: 227, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From 4c40ea7c750245ab4b286ad5cc433853140dc71a Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 26 Feb 2020 19:15:58 +0100 Subject: [PATCH 059/185] Resolve merging change of api from #4857 --- bin/node/runtime/src/lib.rs | 5 - client/chain-spec/src/chain_spec.rs | 24 -- client/db/src/bench.rs | 30 +-- client/db/src/lib.rs | 49 +---- client/db/src/storage_cache.rs | 28 --- client/network/src/chain.rs | 8 - client/network/src/protocol.rs | 22 -- .../src/protocol/light_client_handler.rs | 34 --- client/rpc/src/state/state_full.rs | 30 --- client/rpc/src/state/tests.rs | 26 --- client/src/client.rs | 16 -- client/src/light/backend.rs | 24 -- client/src/light/fetcher.rs | 27 --- frame/contracts/src/account_db.rs | 44 +--- frame/contracts/src/exec.rs | 10 +- frame/contracts/src/lib.rs | 18 +- frame/contracts/src/rent.rs | 8 - frame/contracts/src/tests.rs | 4 +- frame/support/src/storage/child.rs | 56 ----- primitives/externalities/src/lib.rs | 44 ---- primitives/io/src/lib.rs | 63 ------ primitives/state-machine/src/backend.rs | 87 +------- primitives/state-machine/src/basic.rs | 67 +----- .../state-machine/src/changes_trie/build.rs | 32 +-- primitives/state-machine/src/ext.rs | 205 ++---------------- .../state-machine/src/in_memory_backend.rs | 86 +------- primitives/state-machine/src/lib.rs | 95 -------- .../state-machine/src/overlayed_changes.rs | 49 +---- .../state-machine/src/proving_backend.rs | 84 +------ primitives/state-machine/src/trie_backend.rs | 83 +------ .../state-machine/src/trie_backend_essence.rs | 157 +------------- primitives/storage/src/lib.rs | 130 +---------- primitives/trie/src/lib.rs | 204 +---------------- test-utils/client/src/lib.rs | 5 - test-utils/runtime/client/src/lib.rs | 4 - 35 files changed, 77 insertions(+), 1781 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e9cf74b528bec..aaffe520e8013 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -81,13 +81,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. -<<<<<<< HEAD - spec_version: 216, - impl_version: 3, -======= spec_version: 227, impl_version: 0, ->>>>>>> child_trie_w3_change apis: RUNTIME_API_VERSIONS, }; diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index fab0b587a75bd..e67deab30f952 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -76,16 +76,8 @@ impl BuildStorage for ChainSpec { Genesis::Runtime(gc) => gc.build_storage(), Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), -<<<<<<< HEAD - children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content"); -======= children_default: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::new_default(storage_key.0.as_slice()); ->>>>>>> child_trie_w3_change ( storage_key.0, StorageChild { @@ -281,21 +273,6 @@ impl ChainSpec { let top = storage.top.into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); -<<<<<<< HEAD - let children = storage.children.into_iter() - .map(|(sk, child)| { - let (info, ci_type) = child.child_info.info(); - ( - StorageKey(sk), - ChildRawStorage { - data: child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - child_info: info.to_vec(), - child_type: ci_type, - }, - )}) -======= let children_default = storage.children_default.into_iter() .map(|(sk, child)| ( StorageKey(sk), @@ -303,7 +280,6 @@ impl ChainSpec { .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(), )) ->>>>>>> child_trie_w3_change .collect(); Genesis::Raw(RawGenesis { top, children_default }) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 372fb8e1c90f6..55561e5e50ad7 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -146,10 +146,6 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -162,10 +158,6 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -178,10 +170,6 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -202,10 +190,6 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -216,10 +200,6 @@ impl StateBackend> for BenchmarkingState { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -237,10 +217,6 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -259,10 +235,6 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { @@ -283,7 +255,7 @@ impl StateBackend> for BenchmarkingState { let mut keyspace = crate::Keyspaced::new(&[]); for (info, mut updates) in transaction.into_iter() { // child info with strong unique id are using the same state-db with prefixed key - if info.child_type() != ChildType::CryptoUniqueId { + if info.child_type() != ChildType::ParentKeyId { // Unhandled child kind unimplemented!( "Data for {:?} without a backend implementation", diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a1fbe41f89348..f6d066ca98c1f 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -152,10 +152,6 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -168,10 +164,6 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -184,10 +176,6 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -204,10 +192,6 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -216,10 +200,6 @@ impl StateBackend> for RefTrackingState { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -236,10 +216,6 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) @@ -259,10 +235,6 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { @@ -1149,7 +1121,7 @@ impl Backend { let mut keyspace = Keyspaced::new(&[]); for (info, mut updates) in operation.db_updates.into_iter() { // child info with strong unique id are using the same state-db with prefixed key - if info.child_type() != ChildType::CryptoUniqueId { + if info.child_type() != ChildType::ParentKeyId { // Unhandled child kind return Err(ClientError::Backend(format!( "Data for {:?} without a backend implementation", @@ -1869,8 +1841,7 @@ pub(crate) mod tests { fn set_state_data() { let db = Backend::::new_test(2, 0); - let child_info = sp_core::storage::ChildInfo::new_default(b"unique_id"); - let storage_key = b":child_storage:default:key1"; + let child_info = sp_core::storage::ChildInfo::new_default(b"key1"); let hash = { let mut op = db.begin_operation().unwrap(); @@ -1897,23 +1868,19 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - vec![(storage_key.to_vec(), child_storage.clone(), child_info.clone())], + vec![(child_info.clone(), child_storage.clone())], false, ).0.into(); let hash = header.hash(); - let mut children = HashMap::default(); - children.insert(storage_key.to_vec(), sp_core::storage::StorageChild { + let mut children_default = HashMap::default(); + children_default.insert(child_info.storage_key().to_vec(), sp_core::storage::StorageChild { child_info: child_info.clone(), data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), }); op.reset_storage(Storage { top: storage.iter().cloned().collect(), -<<<<<<< HEAD - children, -======= - children_default: Default::default(), ->>>>>>> child_trie_w3_change + children_default, }).unwrap(); op.set_block_data( header.clone(), @@ -1930,7 +1897,7 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); assert_eq!( - state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), Some(vec![4, 4, 6]), ); @@ -1972,7 +1939,7 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); assert_eq!( - state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), Some(vec![4, 4, 6]), ); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index db4aaf40409fe..07766288541f4 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -539,10 +539,6 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -579,10 +575,6 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -591,10 +583,6 @@ impl>, B: BlockT> StateBackend> for Ca fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -607,10 +595,6 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -627,10 +611,6 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -647,10 +627,6 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) @@ -670,10 +646,6 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 3371b8fee4b49..442334cb4f015 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -56,10 +56,6 @@ pub trait Client: Send + Sync { fn read_child_proof( &self, block: &Block::Hash, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: &[Vec], ) -> Result; @@ -141,10 +137,6 @@ impl Client for SubstrateClient where fn read_child_proof( &self, block: &Block::Hash, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: &[Vec], ) -> Result { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 19102831f72a8..00984dcf3cbb6 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1517,27 +1517,6 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); -<<<<<<< HEAD - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.context_data.chain.read_child_proof( - &request.block, - &request.storage_key, - &child_info, - &request.keys, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } -======= let child_info = ChildInfo::new_default(&request.storage_key); let proof = match self.context_data.chain.read_child_proof( &request.block, @@ -1555,7 +1534,6 @@ impl Protocol { error ); StorageProof::empty() ->>>>>>> child_trie_w3_change } }; self.send_message( diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index b88651e5c10f7..a7d3bf4dbbfe3 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,29 +514,10 @@ where let block = Decode::decode(&mut request.block.as_ref())?; -<<<<<<< HEAD - let proof = - if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof(&block, &request.storage_key, &info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - } - } else { -======= let child_info = ChildInfo::new_default(&request.storage_key); let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { ->>>>>>> child_trie_w3_change log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", request_id, peer, @@ -1150,11 +1131,6 @@ mod tests { use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; use void::Void; -<<<<<<< HEAD - const CHILD_UUID: &[u8] = b"foobarbaz"; - -======= ->>>>>>> child_trie_w3_change type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler; type Swarm = libp2p::swarm::Swarm; @@ -1645,11 +1621,6 @@ mod tests { #[test] fn receives_remote_read_child_response() { -<<<<<<< HEAD - let child_info = ChildInfo::new_default(CHILD_UUID); - let info = child_info.info(); -======= ->>>>>>> child_trie_w3_change let mut chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), @@ -1751,11 +1722,6 @@ mod tests { #[test] fn send_receive_read_child() { -<<<<<<< HEAD - let child_info = ChildInfo::new_default(CHILD_UUID); - let info = child_info.info(); -======= ->>>>>>> child_trie_w3_change let chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 687667daab542..508ff8c74417d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -315,15 +315,6 @@ impl StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) -<<<<<<< HEAD - .and_then(|block| self.client.child_storage_keys( - &BlockId::Hash(block), - &child_storage_key, - &ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &prefix, - )) -======= .and_then(|block| { let child_info = match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), @@ -335,7 +326,6 @@ impl StateBackend for FullState>>>>>> child_trie_w3_change .map_err(client_err))) } @@ -348,15 +338,6 @@ impl StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) -<<<<<<< HEAD - .and_then(|block| self.client.child_storage( - &BlockId::Hash(block), - &child_storage_key, - &ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) -======= .and_then(|block| { let child_info = match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), @@ -368,7 +349,6 @@ impl StateBackend for FullState>>>>>> child_trie_w3_change .map_err(client_err))) } @@ -381,15 +361,6 @@ impl StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) -<<<<<<< HEAD - .and_then(|block| self.client.child_storage_hash( - &BlockId::Hash(block), - &child_storage_key, - &ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) -======= .and_then(|block| { let child_info = match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), @@ -401,7 +372,6 @@ impl StateBackend for FullState>>>>>> child_trie_w3_change .map_err(client_err))) } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index f913837539171..df7c83e1dfe87 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,41 +30,24 @@ use substrate_test_runtime_client::{ runtime, }; -<<<<<<< HEAD -const CHILD_UID: &'static [u8] = b"unique_id"; -======= const STORAGE_KEY: &[u8] = b"child"; ->>>>>>> child_trie_w3_change #[test] fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_VALUE: &[u8] = b"hello world !"; -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(CHILD_UID); - let mut core = tokio::runtime::Runtime::new().unwrap(); - let client = TestClientBuilder::new() - .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), &child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) -======= let child_info = ChildInfo::new_default(STORAGE_KEY); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) ->>>>>>> child_trie_w3_change .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); -<<<<<<< HEAD - let (child_info, child_type) = child_info1.info(); - let child_info = StorageKey(child_info.to_vec()); -======= ->>>>>>> child_trie_w3_change assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -92,19 +75,10 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(CHILD_UID); - let (child_info, child_type) = child_info1.info(); - let child_info = StorageKey(child_info.to_vec()); - let core = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", &child_info1, vec![42_u8]) -======= let child_info = ChildInfo::new_default(STORAGE_KEY); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() .add_child_storage(&child_info, "key", vec![42_u8]) ->>>>>>> child_trie_w3_change .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); diff --git a/client/src/client.rs b/client/src/client.rs index 3fc91090376fb..1131ab78de223 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -334,10 +334,6 @@ impl Client where pub fn child_storage_keys( &self, id: &BlockId, -<<<<<<< HEAD - child_storage_key: &StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { @@ -353,10 +349,6 @@ impl Client where pub fn child_storage( &self, id: &BlockId, -<<<<<<< HEAD - storage_key: &StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { @@ -370,10 +362,6 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, -<<<<<<< HEAD - storage_key: &StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { @@ -415,10 +403,6 @@ impl Client where pub fn read_child_proof( &self, id: &BlockId, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: I, ) -> sp_blockchain::Result where diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 94b37916d57b9..a032f5e5e19fe 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -312,11 +312,7 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck -<<<<<<< HEAD - let mut storage: HashMap, ChildInfo)>, _> = HashMap::new(); -======= let mut storage: HashMap, _> = HashMap::new(); ->>>>>>> child_trie_w3_change storage.insert(None, input.top); // create a list of children keys to re-compute roots for @@ -390,10 +386,6 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { @@ -414,10 +406,6 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -446,10 +434,6 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, action: A, ) { @@ -462,10 +446,6 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], action: A, @@ -490,10 +470,6 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index bae2c7f7bfbce..8d6c68d6f7d37 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -356,11 +356,6 @@ pub mod tests { use sp_state_machine::Backend; use super::*; -<<<<<<< HEAD - const CHILD_UID_1: &'static [u8] = b"unique_id_1"; - -======= ->>>>>>> child_trie_w3_change type TestChecker = LightDataChecker< NativeExecutor, Blake2Hasher, @@ -407,7 +402,6 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - let child_info1 = ChildInfo::new_default(CHILD_UID_1); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; let child_info = ChildInfo::new_default(b"child1"); @@ -415,12 +409,7 @@ pub mod tests { // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( -<<<<<<< HEAD - b":child_storage:default:child1".to_vec(), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -433,23 +422,13 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, -<<<<<<< HEAD - &StorageKey(b":child_storage:default:child1".to_vec()), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, -<<<<<<< HEAD - b":child_storage:default:child1", - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change &[b"key1"], ).unwrap(); @@ -527,12 +506,6 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); -<<<<<<< HEAD - - let child_info = ChildInfo::new_default(CHILD_UID_1); - let child_infos = child_info.info(); -======= ->>>>>>> child_trie_w3_change assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 92e69c44eaef0..bff5061a0b875 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -17,7 +17,7 @@ //! Auxiliaries to help with managing partial changes to accounts state. use super::{ - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieIdGenerator, }; use crate::exec::StorageKey; @@ -26,13 +26,8 @@ use sp_std::collections::btree_map::{BTreeMap, Entry}; use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_runtime::traits::{Bounded, Zero}; -<<<<<<< HEAD -use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance, UpdateBalanceOutcome}; -use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; -======= use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance}; -use frame_support::{storage::child, StorageMap}; ->>>>>>> child_trie_w3_change +use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; use frame_system; // Note: we don't provide Option because we can't create @@ -116,7 +111,7 @@ pub trait AccountDb { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<(&TrieId, &ChildInfo)>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option>; /// If account has an alive contract then return the code hash associated. @@ -135,14 +130,10 @@ impl AccountDb for DirectAccountDb { fn get_storage( &self, _account: &T::AccountId, - trie_id: Option<(&TrieId, &ChildInfo)>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option> { -<<<<<<< HEAD - trie_id.and_then(|(id, child_info)| child::get_raw(id, child_info, &blake2_256(location))) -======= - trie_id.and_then(|id| child::get_raw(&crate::trie_unique_id(&id[..]), &blake2_256(location))) ->>>>>>> child_trie_w3_change + trie_id.and_then(|child_info| child::get_raw(child_info, &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -189,21 +180,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { -<<<<<<< HEAD - child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); -======= child::kill_storage(&info.child_trie_unique_id()); ->>>>>>> child_trie_w3_change >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { -<<<<<<< HEAD - child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); -======= child::kill_storage(&info.child_trie_unique_id()); ->>>>>>> child_trie_w3_change AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -243,27 +226,16 @@ impl AccountDb for DirectAccountDb { let child_info = &new_info.child_trie_unique_id(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( -<<<<<<< HEAD - &new_info.trie_id[..], child_info, -======= - &new_info.child_trie_unique_id(), ->>>>>>> child_trie_w3_change &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; -<<<<<<< HEAD - child::put_raw(&new_info.trie_id[..], child_info, &blake2_256(&k), &value[..]); - } else { - child::kill(&new_info.trie_id[..], child_info, &blake2_256(&k)); -======= - child::put_raw(&new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(child_info, &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.child_trie_unique_id(), &blake2_256(&k)); ->>>>>>> child_trie_w3_change + child::kill(child_info, &blake2_256(&k)); } } @@ -368,7 +340,7 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<(&TrieId, &ChildInfo)>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 77cf6f7f7de90..905aef1957479 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -277,7 +277,7 @@ pub enum DeferredAction { pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, - pub self_trie_info: Option<(TrieId, ChildInfo)>, + pub self_trie_info: Option, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -314,7 +314,7 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option<(TrieId, ChildInfo)>) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option) -> ExecutionContext<'b, T, V, L> { ExecutionContext { @@ -531,8 +531,7 @@ where { let (output, change_set, deferred) = { let mut nested = self.nested(dest, trie_id.map(|trie_id| { - let child_info = crate::trie_unique_id(&trie_id); - (trie_id, child_info) + crate::trie_unique_id(&trie_id) })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) @@ -684,8 +683,7 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - let trie_id = self.ctx.self_trie_info.as_ref() - .map(|info| ((&info.0, &info.1))); + let trie_id = self.ctx.self_trie_info.as_ref(); self.ctx.overlay.get_storage( &self.ctx.self_account, trie_id, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 5c423fa5796df..b92c57b431021 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -689,7 +689,7 @@ impl Module { let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, - Some((&contract_info.trie_id, &child_info)), + Some(&child_info), &key, ); Ok(maybe_value) @@ -805,18 +805,10 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( -<<<<<<< HEAD - &origin_contract.trie_id, -======= ->>>>>>> child_trie_w3_change &origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( -<<<<<<< HEAD - &origin_contract.trie_id, -======= ->>>>>>> child_trie_w3_change &origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -838,10 +830,6 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( -<<<<<<< HEAD - &origin_contract.trie_id, -======= ->>>>>>> child_trie_w3_change &origin_contract.child_trie_unique_id(), &blake2_256(key), &value, @@ -946,11 +934,7 @@ decl_storage! { impl OnKilledAccount for Module { fn on_killed_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { -<<<<<<< HEAD - child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); -======= child::kill_storage(&info.child_trie_unique_id()); ->>>>>>> child_trie_w3_change } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 6d26519dd63fb..dfcbc997c5b22 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,10 +223,6 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( -<<<<<<< HEAD - &alive_contract_info.trie_id, -======= ->>>>>>> child_trie_w3_change &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); @@ -250,10 +246,6 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( -<<<<<<< HEAD - &alive_contract_info.trie_id, -======= ->>>>>>> child_trie_w3_change &alive_contract_info.child_trie_unique_id(), ); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 49472fd3e0053..04574351fc289 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -311,8 +311,8 @@ fn account_removal_removes_storage() { let trie_id2 = ::TrieIdGenerator::trie_id(&2); let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); - let child_info1 = Some((&trie_id1, &child_info1)); - let child_info2 = Some((&trie_id2, &child_info2)); + let child_info1 = Some(&child_info1); + let child_info2 = Some(&child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index e6050d8d43500..658908d258a2f 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -26,10 +26,6 @@ pub use sp_core::storage::{ChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { @@ -53,10 +49,6 @@ pub fn get( /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> T { @@ -66,10 +58,6 @@ pub fn get_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: T, @@ -80,10 +68,6 @@ pub fn get_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: F, @@ -93,10 +77,6 @@ pub fn get_or_else T>( /// Put `value` in storage under `key`. pub fn put( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], value: &T, @@ -114,10 +94,6 @@ pub fn put( /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { @@ -131,10 +107,6 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> T { @@ -144,10 +116,6 @@ pub fn take_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: T, @@ -158,10 +126,6 @@ pub fn take_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: F, @@ -171,10 +135,6 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> bool { @@ -188,10 +148,6 @@ pub fn exists( /// Remove all `storage_key` key/values pub fn kill_storage( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, ) { match child_info.child_type() { @@ -203,10 +159,6 @@ pub fn kill_storage( /// Ensure `key` has no explicit entry in storage. pub fn kill( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) { @@ -222,10 +174,6 @@ pub fn kill( /// Get a Vec of bytes from storage. pub fn get_raw( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { @@ -239,10 +187,6 @@ pub fn get_raw( /// Put a raw byte slice into storage. pub fn put_raw( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], value: &[u8], diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 188cb3e351eab..4d2d61998637f 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -47,10 +47,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -63,10 +59,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -83,10 +75,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -96,10 +84,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -112,10 +96,6 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: Vec, value: Vec, @@ -131,10 +111,6 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) { @@ -149,10 +125,6 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> bool { @@ -165,20 +137,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. -<<<<<<< HEAD - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: &ChildInfo); -======= fn kill_child_storage(&mut self, child_info: &ChildInfo); ->>>>>>> child_trie_w3_change /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -186,10 +150,6 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ); @@ -200,10 +160,6 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: Vec, value: Option>, diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index fb997c47f0b7b..ef18e3f3dd496 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -84,33 +84,6 @@ pub trait Storage { self.storage(key).map(|s| s.to_vec()) } -<<<<<<< HEAD - /// All Child api uses : - /// - A `child_storage_key` to define the anchor point for the child proof - /// (commonly the location where the child root is stored in its parent trie). - /// - A `child_storage_types` to identify the kind of the child type and how its - /// `child definition` parameter is encoded. - /// - A `child_definition_parameter` which is the additional information required - /// to use the child trie. For instance defaults child tries requires this to - /// contain a collision free unique id. - /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. - fn child_get( - &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, &child_info, key).map(|s| s.to_vec()) - } - -======= ->>>>>>> child_trie_w3_change /// Get `key` from storage, placing the value into `value_out` and return the number of /// bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -201,11 +174,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.child_storage(storage_key, &child_info, key) -======= self.child_storage(&child_info, key) ->>>>>>> child_trie_w3_change .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -228,16 +197,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.set_child_storage(storage_key, &child_info, key.to_vec(), value.to_vec()); - } - - /// Clear the storage of the given `key` and its value. - fn clear(&mut self, key: &[u8]) { - self.clear_storage(key) -======= self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -252,11 +212,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.clear_child_storage(storage_key, &child_info, key); -======= self.clear_child_storage(&child_info, key); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -270,11 +226,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.kill_child_storage(storage_key, &child_info); -======= self.kill_child_storage(&child_info); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -289,11 +241,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.exists_child_storage(storage_key, &child_info, key) -======= self.exists_child_storage(&child_info, key) ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -308,11 +256,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.clear_child_prefix(storage_key, &child_info, prefix); -======= self.clear_child_prefix(&child_info, prefix); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -452,15 +396,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> Option> { -<<<<<<< HEAD - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, &child_info, key) -======= let child_info = ChildInfo::new_default(storage_key); self.next_child_storage_key(&child_info, key) ->>>>>>> child_trie_w3_change } } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 5507dba6fc272..d26a8baf0bf51 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,11 +20,7 @@ use log::warn; use sp_core::{Hasher, InnerHasher}; use codec::Encode; -<<<<<<< HEAD use sp_core::storage::{ChildInfo, ChildrenMap}; -======= -use sp_core::storage::ChildInfo; ->>>>>>> child_trie_w3_change use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, @@ -57,10 +53,6 @@ pub trait Backend: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; @@ -68,10 +60,6 @@ pub trait Backend: std::fmt::Debug { /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -86,10 +74,6 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -102,10 +86,6 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; @@ -113,10 +93,6 @@ pub trait Backend: std::fmt::Debug { /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ); @@ -136,10 +112,6 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -158,10 +130,6 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -182,10 +150,6 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec { @@ -211,11 +175,7 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, -<<<<<<< HEAD - I2: IntoIterator, -======= I2: IntoIterator, ->>>>>>> child_trie_w3_change H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -224,30 +184,21 @@ pub trait Backend: std::fmt::Debug { // child first for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = -<<<<<<< HEAD - self.child_storage_root(&storage_key[..], &child_info, child_delta); + self.child_storage_root(&child_info, child_delta); + let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { if return_child_roots { - result_child_roots.push((storage_key.clone(), None)); + result_child_roots.push((prefixed_storage_key.clone(), None)); } - child_roots.push((storage_key, None)); + child_roots.push((prefixed_storage_key, None)); } else { if return_child_roots { - child_roots.push((storage_key.clone(), Some(child_root.encode()))); - result_child_roots.push((storage_key, Some(child_root))); + child_roots.push((prefixed_storage_key.clone(), Some(child_root.encode()))); + result_child_roots.push((prefixed_storage_key, Some(child_root))); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key, Some(child_root.encode()))); } -======= - self.child_storage_root(&child_info, child_delta); - let prefixed_storage_key = child_info.prefixed_storage_key(); - txs.consolidate(child_txs); - if empty { - child_roots.push((prefixed_storage_key, None)); - } else { - child_roots.push((prefixed_storage_key, Some(child_root.encode()))); ->>>>>>> child_trie_w3_change } } let (root, parent_txs) = self.storage_root( @@ -287,10 +238,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -299,10 +246,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -315,10 +258,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -331,10 +270,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -352,10 +287,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -392,11 +323,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( -<<<<<<< HEAD - Option<(StorageKey, ChildInfo)>, -======= Option, ->>>>>>> child_trie_w3_change StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index a92619a449783..9499713484d80 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,12 +129,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change key: &[u8], ) -> Option { self.inner.children_default.get(child_info.storage_key()) @@ -143,10 +138,6 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { @@ -155,10 +146,6 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { @@ -167,10 +154,6 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { @@ -184,12 +167,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); @@ -211,10 +189,6 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: StorageKey, value: Option, @@ -233,12 +207,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change ) { self.inner.children_default.remove(child_info.storage_key()); } @@ -265,12 +234,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change prefix: &[u8], ) { if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { @@ -317,11 +281,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() -<<<<<<< HEAD - .child_storage_root(storage_key.as_ref(), &child.child_info, delta).0 -======= .child_storage_root(&child.child_info, delta).0 ->>>>>>> child_trie_w3_change } else { empty_child_trie_root::>() }.encode() @@ -374,42 +334,18 @@ mod tests { #[test] fn children_works() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(b"unique_id_1"); - let child_storage = b":child_storage:default:test".to_vec(); - -======= let child_info = ChildInfo::new_default(b"storage_key"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut ext = BasicExternalities::new(Storage { top: Default::default(), children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], -<<<<<<< HEAD - child_info: child_info1.clone(), -======= - child_info: child_info.to_owned(), ->>>>>>> child_trie_w3_change + child_info: child_info.clone(), } ] }); -<<<<<<< HEAD - let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - - assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), Some(b"reindeer".to_vec())); - - ext.set_child_storage(child(), &child_info1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), Some(b"puppy".to_vec())); - - ext.clear_child_storage(child(), &child_info1, b"dog"); - assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), None); - - ext.kill_child_storage(child(), &child_info1); - assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), None); -======= assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); ext.set_child_storage(child_info, b"dog".to_vec(), b"puppy".to_vec()); @@ -420,7 +356,6 @@ mod tests { ext.kill_child_storage(child_info); assert_eq!(ext.child_storage(child_info, b"doe"), None); ->>>>>>> child_trie_w3_change } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 6ea9edd10029b..d9b80987d368d 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,11 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { -<<<<<<< HEAD - let child_info = changes.child_info(sk).clone(); -======= let child_info = changes.default_child_info(sk).cloned(); ->>>>>>> child_trie_w3_change ( changes.committed.children_default.get(sk).map(|c| &c.0), changes.prospective.children_default.get(sk).map(|c| &c.0), @@ -162,11 +158,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { if let Some(child_info) = child_info.as_ref() { -<<<<<<< HEAD - if !backend.exists_child_storage(sk, child_info, k) -======= if !backend.exists_child_storage(&child_info, k) ->>>>>>> child_trie_w3_change .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -368,14 +360,8 @@ mod test { OverlayedChanges, Configuration, ) { -<<<<<<< HEAD - - let child_info1 = ChildInfo::new_default(b"unique_id_1"); - let child_info2 = ChildInfo::new_default(b"unique_id_2"); -======= let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); ->>>>>>> child_trie_w3_change let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -452,21 +438,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) -<<<<<<< HEAD - ].into_iter().collect(), child_info1.clone())), -======= - ].into_iter().collect(), child_info_1.to_owned())), ->>>>>>> child_trie_w3_change + ].into_iter().collect(), child_info_1.clone())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) -<<<<<<< HEAD - ].into_iter().collect(), child_info2)), -======= - ].into_iter().collect(), child_info_2.to_owned())), ->>>>>>> child_trie_w3_change + ].into_iter().collect(), child_info_2)), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -489,11 +467,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) -<<<<<<< HEAD - ].into_iter().collect(), child_info1)), -======= - ].into_iter().collect(), child_info_1.to_owned())), ->>>>>>> child_trie_w3_change + ].into_iter().collect(), child_info_1)), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3e44dfd6204cb..f2ce9738bb2a2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -23,12 +23,8 @@ use crate::{ }; use sp_core::{ -<<<<<<< HEAD Hasher, - storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, -======= storage::{well_known_keys::is_child_storage_key, ChildInfo}, ->>>>>>> child_trie_w3_change traits::Externalities, hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; @@ -209,19 +205,11 @@ where fn child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.storage(key); - } else { - return None; - } + return self.storage(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -244,19 +232,11 @@ where fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.storage_hash(key); - } else { - return None; - } + return self.storage_hash(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -279,19 +259,11 @@ where fn original_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.original_storage(key); - } else { - return None; - } + return self.original_storage(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -310,19 +282,11 @@ where fn original_child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.original_storage_hash(key); - } else { - return None; - } + return self.original_storage_hash(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -356,19 +320,11 @@ where fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> bool { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.exists_storage(key); - } else { - return false; - } + return self.exists_storage(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -405,19 +361,11 @@ where fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.next_storage_key(key); - } else { - return None; - } + return self.next_storage_key(key); } let next_backend_key = self.backend .next_child_storage_key(child_info, key) @@ -459,21 +407,12 @@ where fn place_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: StorageKey, value: Option, ) { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.place_storage(key, value); - } else { - trace!(target: "state-trace", "Ignoring place_child_storage on top trie"); - return; - } + return self.place_storage(key, value); } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, @@ -489,10 +428,6 @@ where fn kill_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, ) { if child_info.is_top_trie() { @@ -532,20 +467,11 @@ where fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.clear_prefix(prefix); - } else { - trace!(target: "state-trace", "Ignoring clear_child_prefix on top trie"); - return; - } + return self.clear_prefix(prefix); } trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", @@ -589,18 +515,11 @@ where let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { -<<<<<<< HEAD - let root = self.storage_transaction_cache.transaction_child_storage_root.get(storage_key.as_ref()) + let root = self.storage_transaction_cache.transaction_child_storage_root + .get(&prefixed_storage_key) .map(|root| root.encode()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()).encode() -======= - let root = self - .storage(prefixed_storage_key.as_slice()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or( - empty_child_trie_root::>() ->>>>>>> child_trie_w3_change + empty_child_trie_root::>().encode() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, @@ -610,15 +529,9 @@ where root } else { -<<<<<<< HEAD - if let Some(child_info) = self.overlay.child_info(storage_key).clone() { - let (root, _is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) -======= if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { - let (root, is_empty, _) = { + let (root, _is_empty, _) = { let delta = self.overlay.committed.children_default.get(storage_key) ->>>>>>> child_trie_w3_change .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) .chain( @@ -627,27 +540,10 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); -<<<<<<< HEAD - self.backend.child_storage_root(storage_key, child_info, delta) - }; - - let root = root.encode(); -======= self.backend.child_storage_root(&child_info, delta) }; let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(prefixed_storage_key, None); - } else { - self.overlay.set_storage(prefixed_storage_key, Some(root.clone())); - } ->>>>>>> child_trie_w3_change trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", self.id, @@ -747,11 +643,6 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; -<<<<<<< HEAD - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; -======= ->>>>>>> child_trie_w3_change fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -863,23 +754,14 @@ mod tests { #[test] fn next_child_storage_key_works() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); -======= let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); -<<<<<<< HEAD - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); -======= overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); ->>>>>>> child_trie_w3_change let backend = Storage { top: map![], children_default: map![ @@ -889,11 +771,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], -<<<<<<< HEAD - child_info: child_info1.clone(), -======= - child_info: child_info.to_owned(), ->>>>>>> child_trie_w3_change + child_info: child_info.clone(), } ], }.into(); @@ -902,25 +780,6 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay -<<<<<<< HEAD - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[5]), Some(vec![10])); - - // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[10]), Some(vec![30])); - - // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[20]), Some(vec![30])); - - // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[30]), Some(vec![40])); - - drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[40]), Some(vec![50])); -======= assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete @@ -938,29 +797,18 @@ mod tests { // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); ->>>>>>> child_trie_w3_change } #[test] fn child_storage_works() { -<<<<<<< HEAD use sp_core::InnerHasher; - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); - -======= let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); -<<<<<<< HEAD - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); -======= overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); ->>>>>>> child_trie_w3_change let backend = Storage { top: map![], children_default: map![ @@ -970,37 +818,13 @@ mod tests { vec![20] => vec![20], vec![30] => vec![40] ], -<<<<<<< HEAD - child_info: child_info1.clone(), -======= - child_info: child_info.to_owned(), ->>>>>>> child_trie_w3_change + child_info: child_info.clone(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); -<<<<<<< HEAD - assert_eq!(ext.child_storage(child(), &child_info1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(child(), &child_info1, &[10]), Some(vec![10])); - assert_eq!( - ext.child_storage_hash(child(), &child_info1, &[10]), - Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), - ); - - assert_eq!(ext.child_storage(child(), &child_info1, &[20]), None); - assert_eq!(ext.original_child_storage(child(), &child_info1, &[20]), Some(vec![20])); - assert_eq!( - ext.child_storage_hash(child(), &child_info1, &[20]), - None, - ); - - assert_eq!(ext.child_storage(child(), &child_info1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(child(), &child_info1, &[30]), Some(vec![40])); - assert_eq!( - ext.child_storage_hash(child(), &child_info1, &[30]), -======= assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!(ext.original_child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -1019,7 +843,6 @@ mod tests { assert_eq!(ext.original_child_storage(child_info, &[30]), Some(vec![40])); assert_eq!( ext.child_storage_hash(child_info, &[30]), ->>>>>>> child_trie_w3_change Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 101ce276acca8..4e170c3b75955 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,17 +24,10 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ -<<<<<<< HEAD - MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, -}; -use codec::Codec; -use sp_core::storage::{ChildInfo, Storage}; -======= - MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, ChildType, Storage}; ->>>>>>> child_trie_w3_change /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -54,11 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { -<<<<<<< HEAD - inner: HashMap, BTreeMap>, -======= inner: HashMap, BTreeMap>, ->>>>>>> child_trie_w3_change // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -99,11 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< -<<<<<<< HEAD - T: IntoIterator, StorageCollection)> -======= T: IntoIterator, StorageCollection)> ->>>>>>> child_trie_w3_change >( &self, changes: T, @@ -122,17 +107,10 @@ impl InMemory { } } -<<<<<<< HEAD -impl From, BTreeMap>> - for InMemory -{ - fn from(inner: HashMap, BTreeMap>) -> Self { -======= impl From, BTreeMap>> for InMemory { fn from(inner: HashMap, BTreeMap>) -> Self { ->>>>>>> child_trie_w3_change InMemory { inner, trie: None, @@ -143,13 +121,8 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { -<<<<<<< HEAD - let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); -======= let mut inner: HashMap, BTreeMap> = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); ->>>>>>> child_trie_w3_change inner.insert(None, inners.top); InMemory { inner, @@ -171,21 +144,12 @@ impl From> for InMemory { } } -<<<<<<< HEAD -impl From, StorageCollection)>> - for InMemory { - fn from( - inner: Vec<(Option<(StorageKey, ChildInfo)>, StorageCollection)>, - ) -> Self { - let mut expanded: HashMap, BTreeMap> -======= impl From, StorageCollection)>> for InMemory { fn from( inner: Vec<(Option, StorageCollection)>, ) -> Self { let mut expanded: HashMap, BTreeMap> ->>>>>>> child_trie_w3_change = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -200,28 +164,16 @@ impl From, StorageCollection)>> } impl InMemory { -<<<<<<< HEAD - /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], &v.1)) - ) -======= /// Child storage infos iterator. pub fn child_storage_infos(&self) -> impl Iterator { self.inner.iter().filter_map(|item| item.0.as_ref()) ->>>>>>> child_trie_w3_change } } impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( -<<<<<<< HEAD - Option<(StorageKey, ChildInfo)>, -======= Option, ->>>>>>> child_trie_w3_change StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -232,10 +184,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -259,10 +207,6 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, mut f: F, ) { @@ -272,10 +216,6 @@ impl Backend for InMemory where H::Out: Codec { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -307,10 +247,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -326,11 +262,7 @@ impl Backend for InMemory where H::Out: Codec { .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); -<<<<<<< HEAD let root = Layout::::trie_root( -======= - let root = child_trie_root::, _, _, _>( ->>>>>>> child_trie_w3_change existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() @@ -356,10 +288,6 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -386,10 +314,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec { @@ -435,12 +359,8 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); -<<<<<<< HEAD - let child_info = ChildInfo::new_default(b"unique_id_1"); -======= let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut storage = storage.update( vec![( Some(child_info.clone()), @@ -448,11 +368,7 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); -<<<<<<< HEAD - assert_eq!(trie_backend.child_storage(b"1", &child_info, b"2").unwrap(), -======= assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), ->>>>>>> child_trie_w3_change Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1bdf52e36a82b..1a735943c7ea0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -550,10 +550,6 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: I, ) -> Result> @@ -593,10 +589,6 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: I, ) -> Result> @@ -610,11 +602,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend -<<<<<<< HEAD - .child_storage(storage_key, child_info, key.as_ref()) -======= .child_storage(child_info, key.as_ref()) ->>>>>>> child_trie_w3_change .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -689,12 +677,7 @@ where H: Hasher, H::Out: Ord + Codec, { -<<<<<<< HEAD - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, &ChildInfo::top_trie(), key) -======= proving_backend.child_storage(child_info, key) ->>>>>>> child_trie_w3_change .map_err(|e| Box::new(e) as Box) } @@ -716,11 +699,6 @@ mod tests { fallback_succeeds: bool, } -<<<<<<< HEAD - const CHILD_UID_1: &'static [u8] = b"unique_id_1"; - -======= ->>>>>>> child_trie_w3_change impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -949,13 +927,8 @@ mod tests { #[test] fn set_child_storage_works() { -<<<<<<< HEAD - - let child_info1 = ChildInfo::new_default(CHILD_UID_1); -======= let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -969,43 +942,23 @@ mod tests { ); ext.set_child_storage( -<<<<<<< HEAD - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( -<<<<<<< HEAD - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( -<<<<<<< HEAD - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, - ); - assert_eq!( - ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, -======= child_info, ); assert_eq!( ext.child_storage( child_info, ->>>>>>> child_trie_w3_change b"abc" ), None @@ -1014,13 +967,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { -<<<<<<< HEAD - - let child_info1 = ChildInfo::new_default(CHILD_UID_1); -======= let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1047,12 +995,7 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, -<<<<<<< HEAD - b":child_storage:default:sub1", - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( @@ -1076,42 +1019,4 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } -<<<<<<< HEAD -======= - - #[test] - fn child_storage_uuid() { - - let child_info_1 = ChildInfo::new_default(b"sub_test1"); - let child_info_2 = ChildInfo::new_default(b"sub_test2"); - - use crate::trie_backend::tests::test_trie; - let mut overlay = OverlayedChanges::default(); - - let mut transaction = { - let backend = test_trie(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); - cache.transaction.unwrap() - }; - let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { - // look for a key inserted twice: transaction rc is 2 - if *rc == 2 { - duplicate = true; - println!("test duplicate for {:?} {:?}", k, value); - } - } - assert!(!duplicate); - } ->>>>>>> child_trie_w3_change } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 4fa7f89d1991b..517073ea92853 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -36,6 +36,9 @@ use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; +/// Storage key. +pub type PrefixedStorageKey = Vec; + /// Storage value. pub type StorageValue = Vec; @@ -77,11 +80,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. -<<<<<<< HEAD - pub children: HashMap, ChildInfo)>, -======= pub children_default: HashMap, ChildInfo)>, ->>>>>>> child_trie_w3_change } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -135,7 +134,7 @@ pub struct StorageTransactionCache { /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, /// The storage child roots after applying the transaction. - pub(crate) transaction_child_storage_root: BTreeMap>, + pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. @@ -254,10 +253,6 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, -<<<<<<< HEAD - storage_key: StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: StorageKey, val: Option, @@ -286,10 +281,6 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); @@ -364,10 +355,6 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) { @@ -450,11 +437,7 @@ impl OverlayedChanges { fn drain_committed(&mut self) -> ( impl Iterator)>, impl Iterator)>, ChildInfo))>, -<<<<<<< HEAD - ){ -======= ) { ->>>>>>> child_trie_w3_change assert!(self.prospective.is_empty()); ( std::mem::replace(&mut self.committed.top, Default::default()) @@ -572,12 +555,6 @@ impl OverlayedChanges { .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), -<<<<<<< HEAD - self.child_info(storage_key) - .expect("child info initialized in either committed or prospective") - .clone(), -======= ->>>>>>> child_trie_w3_change ) ); @@ -625,13 +602,8 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. -<<<<<<< HEAD - pub fn child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { - if let Some((_, ci)) = self.prospective.children.get(storage_key) { -======= pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { ->>>>>>> child_trie_w3_change return Some(&ci); } if let Some((_, ci)) = self.committed.children_default.get(storage_key) { @@ -890,21 +862,12 @@ mod tests { let child_info = &child_info; let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); -<<<<<<< HEAD - overlay.set_child_storage(child.clone(), &child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), &child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), &child_info, vec![40], Some(vec![40])); - overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), &child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), &child_info, vec![30], None); -======= overlay.set_child_storage(child_info, vec![20], Some(vec![20])); overlay.set_child_storage(child_info, vec![30], Some(vec![30])); overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); overlay.set_child_storage(child_info, vec![10], Some(vec![10])); overlay.set_child_storage(child_info, vec![30], None); ->>>>>>> child_trie_w3_change // next_prospective < next_committed let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); @@ -926,11 +889,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); -<<<<<<< HEAD - overlay.set_child_storage(child.clone(), &child_info, vec![50], Some(vec![50])); -======= overlay.set_child_storage(child_info, vec![50], Some(vec![50])); ->>>>>>> child_trie_w3_change // next_prospective, no next_committed let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 486463a83a094..92abe89a19f03 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,13 +23,8 @@ use log::debug; use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ -<<<<<<< HEAD - MemoryDB, default_child_trie_root, read_trie_value_with, - record_all_keys, -======= - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, record_all_keys ->>>>>>> child_trie_w3_change }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; @@ -150,14 +145,8 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, key: &[u8], -======= - child_info: &ChildInfo, - key: &[u8] ->>>>>>> child_trie_w3_change ) -> Result>, String> { let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? @@ -171,12 +160,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); -<<<<<<< HEAD read_trie_value_with::, _, _>( -======= - read_child_trie_value_with::, _, _>( - child_info.keyspace(), ->>>>>>> child_trie_w3_change &eph, &root, key, @@ -299,10 +283,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -311,10 +291,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -327,10 +303,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -347,10 +319,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -368,10 +336,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { @@ -387,10 +351,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -398,12 +358,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { -<<<<<<< HEAD - let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); + let (root, is_empty, mut tx) = self.0.child_storage_root(child_info, delta); (root, is_empty, tx.remove(child_info)) -======= - self.0.child_storage_root(child_info, delta) ->>>>>>> child_trie_w3_change } } @@ -516,19 +472,6 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(b"unique_id_1"); - let child_info2 = ChildInfo::new_default(b"unique_id_2"); - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); - let own1 = subtrie1.into_owned(); - let own2 = subtrie2.into_owned(); - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), child_info1.clone())), - (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), child_info2.clone())), -======= let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -538,38 +481,25 @@ mod tests { (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), ->>>>>>> child_trie_w3_change (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), -<<<<<<< HEAD - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())), + in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())), false, -======= - in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) ->>>>>>> child_trie_w3_change ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( -<<<<<<< HEAD - in_memory.child_storage(&own1[..], &child_info1, &[i]).unwrap().unwrap(), - vec![i] - )); - (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], &child_info2, &[i]).unwrap().unwrap(), -======= in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), ->>>>>>> child_trie_w3_change vec![i] )); @@ -597,11 +527,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); -<<<<<<< HEAD - assert_eq!(proving.child_storage(&own1[..], &child_info1, &[64]), Ok(Some(vec![64]))); -======= assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); ->>>>>>> child_trie_w3_change let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -609,11 +535,7 @@ mod tests { proof ).unwrap(); assert_eq!( -<<<<<<< HEAD - proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), -======= proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), ->>>>>>> child_trie_w3_change vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2b6333bae8540..d60e8e637b39d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -16,17 +16,10 @@ //! Trie-based state machine backend. use log::{warn, debug}; -<<<<<<< HEAD use sp_core::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildrenMap}; -======= -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType}; ->>>>>>> child_trie_w3_change +use sp_core::storage::{ChildInfo, ChildType, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -91,22 +84,14 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key)? { + if let Some(essence) = self.child_essence(child_info)? { essence.storage(child_info, key) } else { Ok(None) } -======= - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.child_storage(child_info, key) ->>>>>>> child_trie_w3_change } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -115,22 +100,14 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key)? { + if let Some(essence) = self.child_essence(child_info)? { essence.next_storage_key(child_info, key) } else { Ok(None) } -======= - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.next_child_storage_key(child_info, key) ->>>>>>> child_trie_w3_change } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -143,39 +120,23 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key) { + if let Ok(Some(essence)) = self.child_essence(child_info) { essence.for_keys(child_info, f) } -======= - child_info: &ChildInfo, - f: F, - ) { - self.essence.for_keys_in_child_storage(child_info, f) ->>>>>>> child_trie_w3_change } fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, ) { -<<<<<<< HEAD - if let Ok(Some(essence)) = self.child_essence(storage_key) { + if let Ok(Some(essence)) = self.child_essence(child_info) { essence.for_keys_with_prefix(child_info, prefix, f) } -======= - self.essence.for_child_keys_with_prefix(child_info, prefix, f) ->>>>>>> child_trie_w3_change } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -245,10 +206,6 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -261,12 +218,8 @@ impl, H: Hasher> Backend for TrieBackend where }; let mut write_overlay = S::Overlay::default(); -<<<<<<< HEAD - let mut root: H::Out = match self.storage(storage_key) { -======= let prefixed_storage_key = child_info.prefixed_storage_key(); let mut root = match self.storage(prefixed_storage_key.as_slice()) { ->>>>>>> child_trie_w3_change Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -284,12 +237,7 @@ impl, H: Hasher> Backend for TrieBackend where &mut write_overlay, ); -<<<<<<< HEAD match delta_trie_root::, _, _, _, _>( -======= - match child_delta_trie_root::, _, _, _, _, _>( - child_info.keyspace(), ->>>>>>> child_trie_w3_change &mut eph, root, delta @@ -316,9 +264,9 @@ impl, H: Hasher> TrieBackend where { fn child_essence<'a>( &'a self, - storage_key: &[u8], + child_info: &ChildInfo, ) -> Result>, >::Error> { - let root: Option = self.storage(storage_key)? + let root: Option = self.storage(&child_info.prefixed_storage_key()[..])? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); Ok(if let Some(root) = root { Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) @@ -336,23 +284,13 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; -<<<<<<< HEAD - const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; -======= const CHILD_KEY_1: &[u8] = b"sub1"; ->>>>>>> child_trie_w3_change fn test_db() -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { -<<<<<<< HEAD -======= - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); ->>>>>>> child_trie_w3_change let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -387,14 +325,9 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let test_trie = test_trie(); assert_eq!( -<<<<<<< HEAD - test_trie.child_storage(CHILD_KEY_1, &child_info1, b"value3").unwrap(), -======= test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), ->>>>>>> child_trie_w3_change Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index f46fc96ce685e..0dc7174c205f7 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -24,13 +24,8 @@ use log::{debug, warn}; use sp_core::Hasher; use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, -<<<<<<< HEAD - read_trie_value, check_if_empty_root, - for_keys_in_trie, TrieDBIterator}; -======= - empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; ->>>>>>> child_trie_w3_change + check_if_empty_root, read_trie_value, + TrieDBIterator, for_keys_in_trie}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -79,67 +74,10 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. -<<<<<<< HEAD pub fn next_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { let eph = BackendStorageDBRef::new(&self.storage, child_info); let trie = TrieDB::::new(&eph, &self.root) -======= - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - self.next_storage_key_from_root(&self.root, None, key) - } - - /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { - self.storage(child_info.prefixed_storage_key().as_slice()) - } - - /// Return the next key in the child trie i.e. the minimum key that is strictly superior to - /// `key` in lexicographic order. - pub fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, String> { - let child_root = match self.child_root(child_info)? { - Some(child_root) => child_root, - None => return Ok(None), - }; - - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - - self.next_storage_key_from_root(&hash, Some(child_info), key) - } - - /// Return next key from main trie or child trie by providing corresponding root. - fn next_storage_key_from_root( - &self, - root: &H::Out, - child_info: Option<&ChildInfo>, - key: &[u8], - ) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; - let keyspace_eph; - if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); - dyn_eph = &keyspace_eph; - } else { - dyn_eph = &eph; - } - - let trie = TrieDB::::new(dyn_eph, root) ->>>>>>> child_trie_w3_change .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -181,53 +119,11 @@ impl, H: Hasher> TrieBackendEssence where H::O pub fn for_keys( &self, child_info: &ChildInfo, -<<<<<<< HEAD f: F, ) { let eph = BackendStorageDBRef::new(&self.storage, child_info); if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( -======= - key: &[u8], - ) -> Result, String> { - let root = self.child_root(child_info)? - .unwrap_or(empty_child_trie_root::>().encode()); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) - .map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( - &self, - child_info: &ChildInfo, - f: F, - ) { - let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - child_info.keyspace(), ->>>>>>> child_trie_w3_change &eph, &self.root, f, @@ -237,32 +133,8 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Execute given closure for all keys starting with prefix. -<<<<<<< HEAD pub fn for_keys_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], mut f: F) { self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), child_info) -======= - pub fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - mut f: F, - ) { - let root_vec = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) - } - - /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) ->>>>>>> child_trie_w3_change } fn keys_values_with_prefix_inner( @@ -270,11 +142,7 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, -<<<<<<< HEAD child_info: &ChildInfo, -======= - child_info: Option<&ChildInfo>, ->>>>>>> child_trie_w3_change ) { let eph = BackendStorageDBRef::new(&self.storage, child_info); @@ -615,13 +483,8 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); -<<<<<<< HEAD - // using top trie as child trie (both with same content) - trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); -======= trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); ->>>>>>> child_trie_w3_change }; let essence_1 = TrieBackend::new(mdb, root_1); @@ -636,21 +499,6 @@ mod test { let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( -<<<<<<< HEAD - essence_2.next_child_storage_key(b"MyChild", &child_info, b"2"), Ok(Some(b"3".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"3"), Ok(Some(b"4".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"4"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"5"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"6"), Ok(None) -======= essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( @@ -664,7 +512,6 @@ mod test { ); assert_eq!( essence_2.next_child_storage_key(child_info, b"6"), Ok(None) ->>>>>>> child_trie_w3_change ); } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 95ebc33331851..12d7f124030f5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -136,76 +136,6 @@ pub mod well_known_keys { } } -<<<<<<< HEAD -/// A wrapper around a child storage key. -/// -/// This wrapper ensures that the child storage key is correct and properly used. It is -/// impossible to create an instance of this struct without providing a correct `storage_key`. -pub struct ChildStorageKey<'a> { - storage_key: Cow<'a, [u8]>, -} - -impl<'a> ChildStorageKey<'a> { - /// Create new instance of `Self`. - fn new(storage_key: Cow<'a, [u8]>) -> Option { - if well_known_keys::is_child_trie_key_valid(&storage_key) { - Some(ChildStorageKey { storage_key }) - } else { - None - } - } - - /// Create a new `ChildStorageKey` from a vector. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_vec(key: Vec) -> Option { - Self::new(Cow::Owned(key)) - } - - /// Create a new `ChildStorageKey` from a slice. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_slice(key: &'a [u8]) -> Option { - Self::new(Cow::Borrowed(key)) - } - - /// Get access to the byte representation of the storage key. - /// - /// This key is guaranteed to be correct. - pub fn as_ref(&self) -> &[u8] { - &*self.storage_key - } - - /// Destruct this instance into an owned vector that represents the storage key. - /// - /// This key is guaranteed to be correct. - pub fn into_owned(self) -> Vec { - self.storage_key.into_owned() - } - - /// Return true if the variable part of the key is empty. - pub fn is_empty(&self) -> bool { - well_known_keys::is_child_trie_key_empty(&*self.storage_key) - } - -} - - -/// Information related to a child state. -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum ChildInfo { - Default(ChildTrie), -} - -impl ChildInfo { - /// Create a new child trie information for default - /// child type. - pub fn new_default(unique_id: &[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id.to_vec(), -======= /// Information related to a child state. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] @@ -227,7 +157,6 @@ impl ChildInfo { pub fn new_default_from_vec(storage_key: Vec) -> Self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { data: storage_key, ->>>>>>> child_trie_w3_change }) } @@ -235,20 +164,7 @@ impl ChildInfo { /// are not compatible. pub fn try_update(&mut self, other: &ChildInfo) -> bool { match self { -<<<<<<< HEAD - ChildInfo::Default(child_trie) => child_trie.try_update(other), -======= ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other), ->>>>>>> child_trie_w3_change - } - } - - /// Create child info from a linear byte packed value and a given type. -<<<<<<< HEAD - pub fn resolve_child_info(child_type: u32, data: &[u8]) -> Option { - match ChildType::new(child_type) { - Some(ChildType::CryptoUniqueId) => Some(ChildInfo::new_default(data)), - None => None, } } @@ -262,8 +178,11 @@ impl ChildInfo { /// 0 length unique id. pub fn is_top_trie(&self) -> bool { match self { - ChildInfo::Default(ChildTrie { data }) => data.len() == 0 -======= + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => data.len() == 0, + } + } + + /// Create child info from a linear byte packed value and a given type. pub fn resolve_child_info(child_type: u32, info: &[u8]) -> Option { match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => { @@ -273,7 +192,6 @@ impl ChildInfo { Some(Self::new_default(info)) }, None => None, ->>>>>>> child_trie_w3_change } } @@ -316,12 +234,6 @@ impl ChildInfo { } } -<<<<<<< HEAD - /// Return type for child trie. - pub fn child_type(&self) -> ChildType { - match self { - ChildInfo::Default(..) => ChildType::CryptoUniqueId, -======= /// Return a the full location in the direct parent of /// this trie. pub fn prefixed_storage_key(&self) -> Vec { @@ -349,7 +261,6 @@ impl ChildInfo { pub fn child_type(&self) -> ChildType { match self { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, ->>>>>>> child_trie_w3_change } } } @@ -367,14 +278,6 @@ pub enum ChildType { } impl ChildType { -<<<<<<< HEAD - fn new(repr: u32) -> Option { - Some(match repr { - r if r == ChildType::CryptoUniqueId as u32 => ChildType::CryptoUniqueId, - _ => return None, - }) - } -======= /// Try to get a child type from its `u32` representation. pub fn new(repr: u32) -> Option { Some(match repr { @@ -410,24 +313,9 @@ impl ChildType { &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, } } ->>>>>>> child_trie_w3_change -} -/// A child trie of default type. -<<<<<<< HEAD -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct ChildTrie { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: Vec, } -impl ChildTrie { -======= +/// A child trie of default type. /// It uses the same default implementation as the top trie, /// top trie being a child trie with no keyspace and no storage key. /// Its keyspace is the variable (unprefixed) part of its storage key. @@ -442,7 +330,6 @@ pub struct ChildTrieParentKeyId { } impl ChildTrieParentKeyId { ->>>>>>> child_trie_w3_change /// Try to update with another instance, return false if both instance /// are not compatible. fn try_update(&mut self, other: &ChildInfo) -> bool { @@ -452,7 +339,6 @@ impl ChildTrieParentKeyId { } } -<<<<<<< HEAD #[cfg(feature = "std")] #[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. @@ -542,7 +428,8 @@ impl IntoIterator for ChildrenMap { fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } -======= +} + const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; #[test] @@ -551,5 +438,4 @@ fn test_prefix_default_child_info() { let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX)); ->>>>>>> child_trie_w3_change } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fe71fc88b7009..07c118666b846 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -209,10 +209,8 @@ pub fn read_trie_value_with< Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } -<<<<<<< HEAD -/// Determine the default child trie root. -pub fn default_child_trie_root( - _storage_key: &[u8], +/// Determine the empty child trie root. +pub fn empty_child_trie_root( ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } @@ -226,65 +224,6 @@ pub fn check_if_empty_root ( /// Call `f` for all keys in a child trie. pub fn for_keys_in_trie( -======= -/// Determine the empty child trie root. -pub fn empty_child_trie_root( -) -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) -} - -/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, -{ - L::trie_root(input) -} - -/// Determine a child trie root given a hash DB and delta values. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_delta_trie_root( - keyspace: &[u8], - db: &mut DB, - root_data: RD, - delta: I, -) -> Result<::Out, Box>> - where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB - + hash_db::PlainDB, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_data.as_ref()); - - { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; - - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } - - Ok(root) -} - -/// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( - keyspace: &[u8], ->>>>>>> child_trie_w3_change db: &DB, root: &TrieHash, mut f: F @@ -304,7 +243,6 @@ pub fn for_keys_in_child_trie( Ok(()) } - /// Record all keys for a given root. pub fn record_all_keys( db: &DB, @@ -328,144 +266,6 @@ pub fn record_all_keys( Ok(()) } -<<<<<<< HEAD -======= -/// Read a value from the child trie. -pub fn read_child_trie_value( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) -} - -/// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) -} - -/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); - -/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -/// -/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); - -/// Utility function used to merge some byte data (keyspace) and `prefix` data -/// before calling key value database primitives. -fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { - let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; - result[..ks.len()].copy_from_slice(ks); - result[ks.len()..].copy_from_slice(prefix.0); - (result, prefix.1) -} - -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where - H: Hasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) - } -} - -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: Hasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } -} - -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, - H: Hasher, - T: From<&'static [u8]>, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self - } -} - ->>>>>>> child_trie_w3_change /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e56a434c221dc..646238726d859 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -131,11 +131,6 @@ impl TestClientBuilder, -<<<<<<< HEAD - child_key: impl AsRef<[u8]>, - child_info: &ChildInfo, -======= ->>>>>>> child_trie_w3_change value: impl AsRef<[u8]>, ) -> Self { let storage_key = child_info.storage_key(); diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 4bd3b261c60b6..7685157d96a13 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -195,10 +195,6 @@ pub trait TestClientBuilderExt: Sized { /// Panics if the key is empty. fn add_extra_child_storage>, V: Into>>( mut self, -<<<<<<< HEAD - storage_key: SK, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: K, value: V, From b6b70f325d9d651d8e8641e6d8acc3d409870f6a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 15:31:46 +0100 Subject: [PATCH 060/185] revert rpc related default renaming. fix sp io deprecated. --- client/network/src/on_demand_layer.rs | 2 +- client/network/src/protocol.rs | 8 ++- .../src/protocol/light_client_handler.rs | 55 +++++++++++-------- client/network/src/protocol/light_dispatch.rs | 14 ++--- client/rpc/src/state/mod.rs | 4 +- primitives/io/src/lib.rs | 45 +++++++-------- primitives/storage/src/lib.rs | 11 ++-- 7 files changed, 74 insertions(+), 65 deletions(-) diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 3a20cb9548a76..d672ed0b7f569 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -106,7 +106,7 @@ impl Fetcher for OnDemand where request: RemoteReadChildRequest ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteReadDefaultChild(request, sender)); + let _ = self.requests_send.unbounded_send(RequestData::RemoteReadChild(request, sender)); RemoteResponse { receiver } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 00984dcf3cbb6..dcb75e2a228c7 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::listeners::ListenerId}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::storage::{StorageKey, ChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1517,7 +1517,11 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = ChildInfo::new_default(&request.storage_key); + let child_info = if let Some(ChildType::ParentKeyId) = ChildType::new(request.child_type) { + ChildInfo::new_default(&request.storage_key) + } else { + return; + }; let proof = match self.context_data.chain.read_child_proof( &request.block, &child_info, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index a7d3bf4dbbfe3..68adb8600fcc2 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, StorageKey}; +use sp_core::storage::{ChildInfo, ChildType, StorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::traits::{Block, Header, NumberFor, Zero}; use std::{ @@ -170,7 +170,7 @@ pub enum Request { request: fetcher::RemoteReadRequest, sender: oneshot::Sender, Option>>, ClientError>> }, - ReadDefaultChild { + ReadChild { request: fetcher::RemoteReadChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, @@ -368,7 +368,7 @@ where let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } - Request::ReadDefaultChild { request, .. } => { + Request::ReadChild { request, .. } => { let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) @@ -514,19 +514,30 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let child_info = ChildInfo::new_default(&request.storage_key); - let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() + let proof = if let Some(child_type) = ChildType::new(request.child_type) { + let child_info = ChildInfo::new_default(&request.storage_key); + match self.chain.read_child_proof(&block, &child_info, &request.keys) { + Ok(proof) => proof, + Err(error) => { + log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", + request_id, + peer, + request.storage_key.to_hex::(), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error); + StorageProof::empty() + } } + } else { + log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", + request_id, + peer, + request.storage_key.to_hex::(), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + "Unknown child type"); + StorageProof::empty() }; let response = { @@ -889,7 +900,7 @@ fn required_block(request: &Request) -> NumberFor { match request { Request::Header { request, .. } => request.block, Request::Read { request, .. } => *request.header.number(), - Request::ReadDefaultChild { request, .. } => *request.header.number(), + Request::ReadChild { request, .. } => *request.header.number(), Request::Call { request, .. } => *request.header.number(), Request::Changes { request, .. } => request.max_block.0, } @@ -899,7 +910,7 @@ fn retries(request: &Request) -> usize { let rc = match request { Request::Header { request, .. } => request.retry_count, Request::Read { request, .. } => request.retry_count, - Request::ReadDefaultChild { request, .. } => request.retry_count, + Request::ReadChild { request, .. } => request.retry_count, Request::Call { request, .. } => request.retry_count, Request::Changes { request, .. } => request.retry_count, }; @@ -919,7 +930,7 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: }; api::v1::light::request::Request::RemoteReadRequest(r) } - Request::ReadDefaultChild { request, .. } => { + Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), @@ -967,7 +978,7 @@ fn send_reply(result: Result, ClientError>, request: Request< Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), } - Request::ReadDefaultChild { request, sender } => match result { + Request::ReadChild { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), @@ -1547,7 +1558,7 @@ mod tests { response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } } - Request::ReadDefaultChild{..} => { + Request::ReadChild{..} => { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; api::v1::light::Response { id: 1, @@ -1630,7 +1641,7 @@ mod tests { keys: vec![b":key".to_vec()], retry_count: None, }; - issue_request(Request::ReadDefaultChild { request, sender: chan.0 }); + issue_request(Request::ReadChild { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1731,7 +1742,7 @@ mod tests { keys: vec![b":key".to_vec()], retry_count: None, }; - send_receive(Request::ReadDefaultChild { request, sender: chan.0 }); + send_receive(Request::ReadChild { request, sender: chan.0 }); assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); // ^--- from `DummyFetchChecker::check_read_child_proof` } diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index e56cffaf83817..74cc1bcd3c172 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -148,7 +148,7 @@ pub(crate) enum RequestData { RemoteReadRequest, OneShotSender, Option>>, ClientError>>, ), - RemoteReadDefaultChild( + RemoteReadChild( RemoteReadChildRequest, OneShotSender, Option>>, ClientError>> ), @@ -404,7 +404,7 @@ impl LightDispatch where RequestData::RemoteRead(request, sender) ), }}, - RequestData::RemoteReadDefaultChild(request, sender) => { + RequestData::RemoteReadChild(request, sender) => { match checker.check_read_child_proof(&request, response.proof) { Ok(response) => { // we do not bother if receiver has been dropped already @@ -413,7 +413,7 @@ impl LightDispatch where }, Err(error) => Accept::CheckFailed( error, - RequestData::RemoteReadDefaultChild(request, sender) + RequestData::RemoteReadChild(request, sender) ), }}, data => Accept::Unexpected(data), @@ -596,7 +596,7 @@ impl Request { match self.data { RequestData::RemoteHeader(ref data, _) => data.block, RequestData::RemoteRead(ref data, _) => *data.header.number(), - RequestData::RemoteReadDefaultChild(ref data, _) => *data.header.number(), + RequestData::RemoteReadChild(ref data, _) => *data.header.number(), RequestData::RemoteCall(ref data, _) => *data.header.number(), RequestData::RemoteChanges(ref data, _) => data.max_block.0, RequestData::RemoteBody(ref data, _) => *data.header.number(), @@ -618,7 +618,7 @@ impl Request { data.block, data.keys.clone(), ), - RequestData::RemoteReadDefaultChild(ref data, _) => + RequestData::RemoteReadChild(ref data, _) => out.send_read_child_request( peer, self.id, @@ -667,7 +667,7 @@ impl RequestData { RequestData::RemoteHeader(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteCall(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteRead(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteReadDefaultChild(_, sender) => { let _ = sender.send(Err(error)); }, + RequestData::RemoteReadChild(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteChanges(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteBody(_, sender) => { let _ = sender.send(Err(error)); }, } @@ -1042,7 +1042,7 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadDefaultChild( + light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild( RemoteReadChildRequest { header: dummy_header(), block: Default::default(), diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index a25828a869b00..58313236be06b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -103,8 +103,8 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a defaultchild storage, - /// leave empty to get all the keys + /// Returns the keys with prefix from a child storage, + /// leave prefix empty to get all the keys. fn child_storage_keys( &self, block: Option, diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ef18e3f3dd496..c99e3ce3ced8d 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::ChildInfo, + storage::{ChildInfo, ChildType}, }; use sp_core::{ @@ -68,11 +68,11 @@ pub enum EcdsaVerifyError { BadSignature, } -/// Deprecated function, ensure that this is a default prefixed key. #[cfg(feature = "std")] -fn child_storage_key_or_panic(storage_key: &[u8]) { - if !storage_key.starts_with(&ChildInfo::new_default(&[]).prefixed_storage_key()[..]) { - panic!("child storage key is invalid") +fn deprecated_storage_key_prefix_check(storage_key: &[u8]) { + let prefix = ChildType::ParentKeyId.parent_prefix(); + if !storage_key.starts_with(prefix) { + panic!("Invalid storage key"); } } @@ -153,8 +153,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) -> Option> { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.child_storage(&child_info, key).map(|s| s.to_vec()) @@ -170,8 +169,7 @@ pub trait Storage { value_out: &mut [u8], value_offset: u32, ) -> Option { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.child_storage(&child_info, key) @@ -193,8 +191,7 @@ pub trait Storage { key: &[u8], value: &[u8], ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); @@ -208,8 +205,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.clear_child_storage(&child_info, key); @@ -222,8 +218,7 @@ pub trait Storage { child_definition: &[u8], child_type: u32, ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.kill_child_storage(&child_info); @@ -237,8 +232,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) -> bool { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.exists_child_storage(&child_info, key) @@ -252,8 +246,7 @@ pub trait Storage { child_type: u32, prefix: &[u8], ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.clear_child_prefix(&child_info, prefix); @@ -264,8 +257,13 @@ pub trait Storage { &mut self, storage_key: &[u8], ) -> Vec { - child_storage_key_or_panic(storage_key); - let child_info = ChildInfo::new_default(storage_key); + let prefix = ChildType::ParentKeyId.parent_prefix(); + if !storage_key.starts_with(prefix) { + panic!("Invalid storage key"); + } + let storage_key = &storage_key[..prefix.len()]; + let child_info = ChildInfo::resolve_child_info(ChildType::ParentKeyId as u32, storage_key) + .expect("Invalid storage key"); self.child_storage_root(&child_info) } @@ -277,8 +275,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) -> Option> { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.next_child_storage_key(&child_info, key) @@ -291,7 +288,7 @@ pub trait Storage { /// from within the runtime. #[runtime_interface] pub trait DefaultChildStorage { - /// `storage_key` is the full location of the root of the child trie in the parent trie. + /// `storage_key` is the unprefixed location of the root of the child trie in the parent trie. /// /// This function specifically returns the data for `key` in the child storage or `None` /// if the key can not be found. diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index b89a4c43450c5..7330444ff476c 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -161,14 +161,11 @@ impl ChildInfo { } } - /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, info: &[u8]) -> Option { + /// Create child info from a prefixed storage key and a given type. + pub fn resolve_child_info(child_type: u32, storage_key: &[u8]) -> Option { match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => { - debug_assert!( - info.starts_with(ChildType::ParentKeyId.parent_prefix()) - ); - Some(Self::new_default(info)) + Some(Self::new_default(storage_key)) }, None => None, } @@ -287,7 +284,7 @@ impl ChildType { /// Returns the location reserved for this child trie in their parent trie if there /// is one. - fn parent_prefix(&self) -> &'static [u8] { + pub fn parent_prefix(&self) -> &'static [u8] { match self { &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, } From d563278a484d5e9ea732ad629ea84340ef6de7c6 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 15:39:53 +0100 Subject: [PATCH 061/185] fix slice indexing for child root --- primitives/io/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index c99e3ce3ced8d..62f12dfd5b5e4 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -261,7 +261,7 @@ pub trait Storage { if !storage_key.starts_with(prefix) { panic!("Invalid storage key"); } - let storage_key = &storage_key[..prefix.len()]; + let storage_key = &storage_key[prefix.len()..]; let child_info = ChildInfo::resolve_child_info(ChildType::ParentKeyId as u32, storage_key) .expect("Invalid storage key"); self.child_storage_root(&child_info) From a85bb3860e619cbabb9b4ea0e584323f23159a87 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 17:02:20 +0100 Subject: [PATCH 062/185] Switch back to prefixed storage key for rpc. --- client/api/src/light.rs | 2 -- client/network/src/protocol.rs | 9 +++------ .../src/protocol/light_client_handler.rs | 13 ++++++------- client/network/src/protocol/light_dispatch.rs | 7 ++----- client/network/src/protocol/message.rs | 2 -- .../network/src/protocol/schema/light.v1.proto | 3 --- client/rpc-api/src/state/mod.rs | 4 ---- client/rpc/src/state/mod.rs | 18 +++++------------- client/rpc/src/state/state_full.rs | 15 ++++++--------- client/rpc/src/state/state_light.rs | 6 +----- client/rpc/src/state/tests.rs | 10 ++++------ client/src/light/fetcher.rs | 7 +++---- primitives/storage/src/lib.rs | 18 +++++++++++++++++- 13 files changed, 47 insertions(+), 67 deletions(-) diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 67376947d3913..2911d77f18209 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -82,8 +82,6 @@ pub struct RemoteReadChildRequest { pub header: Header, /// Storage key for child. pub storage_key: Vec, - /// Child type. - pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index dcb75e2a228c7..ea109c0c48701 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -253,7 +253,6 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id: RequestId, block: ::Hash, storage_key: Vec, - child_type: u32, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest( @@ -261,7 +260,6 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id, block, storage_key, - child_type, keys, }); @@ -1517,10 +1515,9 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = if let Some(ChildType::ParentKeyId) = ChildType::new(request.child_type) { - ChildInfo::new_default(&request.storage_key) - } else { - return; + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return, }; let proof = match self.context_data.chain.read_child_proof( &request.block, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 68adb8600fcc2..d951b58f7bca5 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,8 +514,10 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = if let Some(child_type) = ChildType::new(request.child_type) { - let child_info = ChildInfo::new_default(&request.storage_key); + let proof = if let Some((ChildType::ParentKeyId, storage_key)) = ChildType::from_prefixed_key( + &request.storage_key, + ) { + let child_info = ChildInfo::new_default(storage_key); match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { @@ -934,7 +936,6 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), - child_type: request.child_type, keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -1636,8 +1637,7 @@ mod tests { let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; @@ -1737,8 +1737,7 @@ mod tests { let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b"sub".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 74cc1bcd3c172..8cd6ce51c49fa 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -70,7 +70,6 @@ pub trait LightDispatchNetwork { id: RequestId, block: ::Hash, storage_key: Vec, - child_type: u32, keys: Vec>, ); @@ -624,7 +623,6 @@ impl Request { self.id, data.block, data.storage_key.clone(), - data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -820,7 +818,7 @@ pub mod tests { fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: u32, _: Vec>) {} + _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1046,8 +1044,7 @@ pub mod tests { RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b"sub".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 1481ec55b7ff4..0539d96234ca6 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -420,8 +420,6 @@ pub mod generic { pub block: H, /// Child Storage key. pub storage_key: Vec, - /// Child type. - pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index c4aff40c9626d..fd970c79b4757 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -74,9 +74,6 @@ message RemoteReadChildRequest { // Child Storage key, this is relative // to the child type storage location. bytes storage_key = 3; - /// Child type, its required to resolve - /// child storage final location. - uint32 child_type = 5; // Storage keys. repeated bytes keys = 6; } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 41690134009b8..3263b6a4cc606 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -77,7 +77,6 @@ pub trait StateApi { fn child_storage_keys( &self, child_storage_key: StorageKey, - child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -87,7 +86,6 @@ pub trait StateApi { fn child_storage( &self, child_storage_key: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -97,7 +95,6 @@ pub trait StateApi { fn child_storage_hash( &self, child_storage_key: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -107,7 +104,6 @@ pub trait StateApi { fn child_storage_size( &self, child_storage_key: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 58313236be06b..f07d06578f42e 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -109,7 +109,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -118,7 +117,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -127,7 +125,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -136,10 +133,9 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, child_type, key) + Box::new(self.child_storage(block, storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -300,41 +296,37 @@ impl StateApi for State fn child_storage( &self, storage_key: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, storage_key, child_type, key) + self.backend.child_storage(block, storage_key, key) } fn child_storage_keys( &self, storage_key: StorageKey, - child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, storage_key, child_type, key_prefix) + self.backend.child_storage_keys(block, storage_key, key_prefix) } fn child_storage_hash( &self, storage_key: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, storage_key, child_type, key) + self.backend.child_storage_hash(block, storage_key, key) } fn child_storage_size( &self, storage_key: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, storage_key, child_type, key) + self.backend.child_storage_size(block, storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 508ff8c74417d..40cf3ade504b8 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -310,14 +310,13 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::new(child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; self.client.child_storage_keys( @@ -333,14 +332,13 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::new(child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; self.client.child_storage( @@ -356,14 +354,13 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::new(child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; self.client.child_storage_hash( diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 80d43f8ccee82..0af0b45cd8b45 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -250,7 +250,6 @@ impl StateBackend for LightState, _storage_key: StorageKey, - _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -260,7 +259,6 @@ impl StateBackend for LightState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -271,7 +269,6 @@ impl StateBackend for LightState StateBackend for LightState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, child_type, key) + Box::new(self.child_storage(block, storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index df7c83e1dfe87..36a8f1ff0fc86 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -31,6 +31,7 @@ use substrate_test_runtime_client::{ }; const STORAGE_KEY: &[u8] = b"child"; +const PREFIXED_STORAGE_KEY: &[u8] = b":child_storage:default:child"; #[test] fn should_return_storage() { @@ -47,7 +48,7 @@ fn should_return_storage() { let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); - let storage_key = StorageKey(STORAGE_KEY.to_vec()); + let storage_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -65,7 +66,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, 1, key, Some(genesis_hash).into()) + client.child_storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -82,14 +83,13 @@ fn should_return_child_storage() { .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(STORAGE_KEY.to_vec()); + let child_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); let key = StorageKey(b"key".to_vec()); assert_matches!( client.child_storage( child_key.clone(), - 1, key.clone(), Some(genesis_hash).into(), ).wait(), @@ -98,7 +98,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_hash( child_key.clone(), - 1, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), @@ -107,7 +106,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_size( child_key.clone(), - 1, key.clone(), None, ).wait(), diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 7a7ef6e0a91df..ce3c2719cf705 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -241,8 +241,8 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_info = match ChildType::new(request.child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default(&request.storage_key[..]), + let child_info = match ChildType::from_prefixed_key(&request.storage_key[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; read_child_proof_check::( @@ -509,8 +509,7 @@ pub mod tests { &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b"child1".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:child1".to_vec(), keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 7330444ff476c..984c8e4738796 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -262,6 +262,20 @@ impl ChildType { }) } + /// Transform a prefixed key into a tuple of the child type + /// and the unprefixed representation of the key. + pub fn from_prefixed_key<'a>(storage_key: &'a [u8]) -> Option<(Self, &'a [u8])> { + let match_type = |storage_key: &'a [u8], child_type: ChildType| { + let prefix = child_type.parent_prefix(); + if storage_key.starts_with(prefix) { + Some((child_type, &storage_key[prefix.len()..])) + } else { + None + } + }; + match_type(storage_key, ChildType::ParentKeyId) + } + /// Produce a prefixed key for a given child type. fn new_prefixed_key(&self, key: &[u8]) -> Vec { let parent_prefix = self.parent_prefix(); @@ -298,10 +312,12 @@ impl ChildType { /// It shares its trie nodes backend storage with every other /// child trie, so its storage key needs to be a unique id /// that will be use only once. +/// Those unique id also required to be long enough to avoid any +/// unique id to be prefixed by an other unique id. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub struct ChildTrieParentKeyId { - /// Data is the full prefixed storage key. + /// Data is the storage key without prefix. data: Vec, } From 5f98fbe9d9f3a4ba35028f21c1bbe054be3e3433 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 17:25:39 +0100 Subject: [PATCH 063/185] rpc error discrepancy --- client/network/src/protocol.rs | 8 ++-- .../src/protocol/light_client_handler.rs | 40 ++++++++----------- client/rpc-api/src/state/mod.rs | 2 +- client/rpc/src/state/state_full.rs | 6 +-- 4 files changed, 24 insertions(+), 32 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ea109c0c48701..e9135166ceebd 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1516,14 +1516,14 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); let child_info = match ChildType::from_prefixed_key(&request.storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return, + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), }; - let proof = match self.context_data.chain.read_child_proof( + let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( &request.block, &child_info, &request.keys, - ) { + )) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index d951b58f7bca5..e661bf672554d 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,32 +514,24 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = if let Some((ChildType::ParentKeyId, storage_key)) = ChildType::from_prefixed_key( - &request.storage_key, + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), + }; + let proof = match child_info.and_then(|child_info| + self.chain.read_child_proof(&block, &child_info, &request.keys) ) { - let child_info = ChildInfo::new_default(storage_key); - match self.chain.read_child_proof(&block, &child_info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } + Ok(proof) => proof, + Err(error) => { + log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", + request_id, + peer, + request.storage_key.to_hex::(), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error); + StorageProof::empty() } - } else { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - "Unknown child type"); - StorageProof::empty() }; let response = { diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 3263b6a4cc606..48d363bb8921c 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -100,7 +100,7 @@ pub trait StateApi { ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getDefaultStorageSize")] + #[rpc(name = "state_getChildStorageSize")] fn child_storage_size( &self, child_storage_key: StorageKey, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 40cf3ade504b8..8727810c83291 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -317,7 +317,7 @@ impl StateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err("Invalid child storage key".into()), }; self.client.child_storage_keys( &BlockId::Hash(block), @@ -339,7 +339,7 @@ impl StateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err("Invalid child storage key".into()), }; self.client.child_storage( &BlockId::Hash(block), @@ -361,7 +361,7 @@ impl StateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err("Invalid child storage key".into()), }; self.client.child_storage_hash( &BlockId::Hash(block), From d85dec23be5dc88cd2741cc9b234a74df6b2c716 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 19:38:34 +0100 Subject: [PATCH 064/185] Fix test --- primitives/runtime-interface/test-wasm/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 2e1ab52d67741..3ac746e55c6d4 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -233,7 +233,7 @@ wasm_export_functions! { } fn test_ext_blake2_256() { - use sp_core::Hasher; + use sp_core::InnerHasher; let data = "hey, hash me please!"; let hash = sp_core::Blake2Hasher::hash(data.as_bytes()); From 9f704c9a00a599b17b6ccf2893d18fceaf538747 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 28 Feb 2020 12:51:49 +0100 Subject: [PATCH 065/185] use correct parameter in overlay and fix change trie. --- .../state-machine/src/changes_trie/build.rs | 49 +++++++++---------- primitives/state-machine/src/ext.rs | 6 +-- .../state-machine/src/overlayed_changes.rs | 6 +-- 3 files changed, 30 insertions(+), 31 deletions(-) diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c206090fa4e18..cf1a2e3bfba1b 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -32,6 +32,7 @@ use crate::{ input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, }, }; +use sp_core::storage::{ChildInfo, ChildType}; /// Prepare input pairs for building a changes trie of given block. /// @@ -105,19 +106,19 @@ fn prepare_extrinsics_input<'a, B, H, Number>( Number: BlockNumber, { - let mut children_prefixed_keys = BTreeSet::::new(); + let mut children_info = BTreeSet::::new(); let mut children_result = BTreeMap::new(); for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter() .chain(changes.committed.children_default.iter()) { - children_prefixed_keys.insert(child_info.prefixed_storage_key()); + children_info.insert(child_info.clone()); } - for storage_key in children_prefixed_keys { + for child_info in children_info { let child_index = ChildIndex:: { block: block.clone(), - storage_key: storage_key.clone(), + storage_key: child_info.prefixed_storage_key(), }; - let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(storage_key))?; + let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(child_info))?; children_result.insert(child_index, iter); } @@ -130,22 +131,22 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( backend: &'a B, block: &Number, changes: &'a OverlayedChanges, - storage_key: Option, + child_info: Option, ) -> Result> + 'a, String> where B: Backend, H: Hasher, Number: BlockNumber, { - let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.default_child_info(sk).cloned(); - ( - changes.committed.children_default.get(sk).map(|c| &c.0), - changes.prospective.children_default.get(sk).map(|c| &c.0), - child_info, - ) + let (committed, prospective) = if let Some(child_info) = child_info.as_ref() { + match child_info.child_type() { + ChildType::ParentKeyId => ( + changes.committed.children_default.get(child_info.storage_key()).map(|c| &c.0), + changes.prospective.children_default.get(child_info.storage_key()).map(|c| &c.0), + ), + } } else { - (Some(&changes.committed.top), Some(&changes.prospective.top), None) + (Some(&changes.committed.top), Some(&changes.prospective.top)) }; committed.iter().flat_map(|c| c.iter()) .chain(prospective.iter().flat_map(|c| c.iter())) @@ -155,13 +156,11 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Entry::Vacant(entry) => { // ignore temporary values (values that have null value at the end of operation // AND are not in storage at the beginning of operation - if let Some(sk) = storage_key.as_ref() { - if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? { - return Ok(map); - } + if let Some(child_info) = child_info.as_ref() { + if !changes.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { + if !backend.exists_child_storage(&child_info, k) + .map_err(|e| format!("{}", e))? { + return Ok(map); } } } else { @@ -344,7 +343,6 @@ mod test { use codec::Encode; use sp_core::Blake2Hasher; use sp_core::storage::well_known_keys::EXTRINSIC_INDEX; - use sp_core::storage::ChildInfo; use crate::InMemoryBackend; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; @@ -367,8 +365,9 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = child_info_1.prefixed_storage_key(); - let child_trie_key2 = child_info_2.prefixed_storage_key(); + let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); + let child_trie_key1 = child_info_1.storage_key().to_vec(); + let child_trie_key2 = child_info_2.storage_key().to_vec(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -402,7 +401,7 @@ mod test { ]), (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), - ], vec![(child_trie_key1.clone(), vec![ + ], vec![(prefixed_child_trie_key1.clone(), vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 77ae9a0820fb7..32ddac8d2e191 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -210,7 +210,7 @@ where ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(child_info.storage_key(), key) + .child_storage(child_info, key) .map(|x| x.map(|x| x.to_vec())) .unwrap_or_else(|| self.backend.child_storage(child_info, key) @@ -234,7 +234,7 @@ where ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(child_info.storage_key(), key) + .child_storage(child_info, key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| self.backend.child_storage_hash(child_info, key) @@ -313,7 +313,7 @@ where ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = match self.overlay.child_storage(child_info.storage_key(), key) { + let result = match self.overlay.child_storage(child_info, key) { Some(x) => x.is_some(), _ => self.backend .exists_child_storage(child_info, key) diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index b9e25fc547013..2dc56bc772d3d 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -212,14 +212,14 @@ impl OverlayedChanges { /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children_default.get(storage_key) { + pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + if let Some(map) = self.prospective.children_default.get(child_info.storage_key()) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } } - if let Some(map) = self.committed.children_default.get(storage_key) { + if let Some(map) = self.committed.children_default.get(child_info.storage_key()) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } From 7c5d54642ed499c9ff0ab87ce79f4423fce77828 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 2 Mar 2020 14:37:11 +0100 Subject: [PATCH 066/185] bump version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 85889a50c20b9..d05f8fa2fb444 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 227, + spec_version: 228, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From eff1d8c53b5f7b3655b84020c5aafab53f204242 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 12 Mar 2020 11:13:27 +0100 Subject: [PATCH 067/185] keeping inner hasher, it is needed at state-machine level. --- primitives/runtime/src/traits.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 690c076cd0691..0ded39fd7372e 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -25,7 +25,7 @@ use std::fmt::Display; use std::str::FromStr; #[cfg(feature = "std")] use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, InnerHasher as Hasher, TypeId, RuntimeDebug}; +use sp_core::{self, InnerHasher, Hasher, TypeId, RuntimeDebug}; use crate::codec::{Codec, Encode, Decode}; use crate::transaction_validity::{ ValidTransaction, TransactionValidity, TransactionValidityError, UnknownTransaction, @@ -378,20 +378,19 @@ pub trait OffchainWorker { // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq - + PartialEq + Hasher::Output> + sp_core::Hasher { - // TODO try fuse the alt Hasher into this?? + + PartialEq + InnerHasher::Output> + Hasher { /// The hash type produced. type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { - ::hash(s) + ::hash(s) } /// Produce the hash of some codec-encodable value. fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, ::hash) + Encode::using_encoded(s, ::hash) } /// The ordered Patricia tree root of the given `input`. @@ -406,7 +405,7 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwo256; -impl Hasher for BlakeTwo256 { +impl InnerHasher for BlakeTwo256 { type Out = sp_core::H256; type StdHasher = hash256_std_hasher::Hash256StdHasher; const LENGTH: usize = 32; @@ -428,7 +427,7 @@ impl Hash for BlakeTwo256 { } } -impl sp_core::Hasher for BlakeTwo256 { +impl Hasher for BlakeTwo256 { const EMPTY_ROOT: &'static [u8] = &[ 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, @@ -1429,7 +1428,6 @@ mod tests { #[test] fn empty_root_const() { - use sp_core::Hasher; let empty = ::hash(&[0u8]); assert_eq!(BlakeTwo256::EMPTY_ROOT, empty.as_ref()); } From 29254aa8733da0dcc7667f3483b55fcc16c9f875 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 17 Mar 2020 15:41:51 +0100 Subject: [PATCH 068/185] missing merge --- client/network/src/protocol.rs | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5304ba71d60a3..af8a15e72e9c2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1603,13 +1603,12 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); -<<<<<<< HEAD let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( - &request.block, + &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), )) { @@ -1624,27 +1623,6 @@ impl Protocol { error ); StorageProof::empty() -======= - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.context_data.chain.read_child_proof( - &BlockId::Hash(request.block), - &request.storage_key, - child_info, - &mut request.keys.iter().map(AsRef::as_ref), - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } ->>>>>>> master } }; self.send_message( From baacffacbdd4837c207adf1afc6d42dfb5cb6ba4 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 17 Mar 2020 16:36:18 +0100 Subject: [PATCH 069/185] fix bench --- client/db/src/bench.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index b70d713b437e6..54db9556ac9b6 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -88,7 +88,7 @@ impl BenchmarkingState { child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let (root, transaction, _): (B::Hash, _, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, false, From 160d2ea7ec0fec4412447db234a180f9e7ac2e88 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 18 Mar 2020 19:18:37 +0100 Subject: [PATCH 070/185] Restore previous cli api (will need deprecation in a v2 of rpc), split new api into child state api. --- client/rpc-api/src/child_state/mod.rs | 66 ++++++++++ client/rpc-api/src/lib.rs | 1 + client/rpc-api/src/state/mod.rs | 12 ++ client/rpc/src/state/mod.rs | 170 ++++++++++++++++++++++---- client/rpc/src/state/state_full.rs | 162 +++++++++++++++++------- client/rpc/src/state/state_light.rs | 121 +++++++++++++----- client/rpc/src/state/tests.rs | 24 ++-- client/service/src/builder.rs | 12 +- 8 files changed, 448 insertions(+), 120 deletions(-) create mode 100644 client/rpc-api/src/child_state/mod.rs diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs new file mode 100644 index 0000000000000..f9027b0f15a08 --- /dev/null +++ b/client/rpc-api/src/child_state/mod.rs @@ -0,0 +1,66 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Substrate state API. + +use jsonrpc_derive::rpc; +use sp_core::storage::{StorageKey, StorageData}; +use crate::state::error::FutureResult; + +pub use self::gen_client::Client as ChildStateClient; + +/// Substrate child state API +#[rpc] +pub trait ChildStateApi { + /// RPC Metadata + type Metadata; + + /// Returns the keys with prefix from a child storage, leave empty to get all the keys + #[rpc(name = "childstate_getKeys")] + fn storage_keys( + &self, + child_storage_key: StorageKey, + prefix: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + #[rpc(name = "childstate_getStorage")] + fn storage( + &self, + child_storage_key: StorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + #[rpc(name = "childstate_getStorageHash")] + fn storage_hash( + &self, + child_storage_key: StorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + #[rpc(name = "childstate_getStorageSize")] + fn storage_size( + &self, + child_storage_key: StorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; +} diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 8ad2d94bfd271..82913f2dea672 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -32,4 +32,5 @@ pub mod author; pub mod chain; pub mod offchain; pub mod state; +pub mod child_state; pub mod system; diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 48d363bb8921c..e94df46736b48 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -73,37 +73,49 @@ pub trait StateApi { fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; /// Returns the keys with prefix from a child storage, leave empty to get all the keys + /// This method is deprecated in favor of `childstate_getChildKeys`. #[rpc(name = "state_getChildKeys")] fn child_storage_keys( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. + /// This method is deprecated in favor of `childstate_getChildStorage`. #[rpc(name = "state_getChildStorage")] fn child_storage( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. + /// This method is deprecated in favor of `childstate_getChildStorageHash`. #[rpc(name = "state_getChildStorageHash")] fn child_storage_hash( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. + /// This method is deprecated in favor of `childstate_getChildStorageSize`. #[rpc(name = "state_getChildStorageSize")] fn child_storage_size( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index a8a2e58b9a43d..29ad2f7538eee 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -37,6 +37,7 @@ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; +pub use sc_rpc_api::child_state::*; use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend}; use sp_blockchain::{HeaderMetadata, HeaderBackend}; @@ -103,12 +104,13 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, - /// leave prefix empty to get all the keys. + /// Returns the keys with prefix from a child storage, leave empty to get all the keys fn child_storage_keys( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -116,7 +118,9 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -124,7 +128,9 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_hash( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -132,10 +138,12 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_size( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, key) + Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -190,7 +198,7 @@ pub trait StateBackend: Send + Sync + 'static pub fn new_full( client: Arc, subscriptions: Subscriptions, -) -> State +) -> (State, ChildState) where Block: BlockT + 'static, BE: Backend + 'static, @@ -200,9 +208,11 @@ pub fn new_full( + ProvideRuntimeApi + Send + Sync + 'static, Client::Api: Metadata, { - State { - backend: Box::new(self::state_full::FullState::new(client, subscriptions)), - } + let child_backend = Box::new( + self::state_full::FullState::new(client.clone(), subscriptions.clone()) + ); + let backend = Box::new(self::state_full::FullState::new(client, subscriptions)); + (State { backend }, ChildState { backend: child_backend }) } /// Create new state API that works on light node. @@ -211,7 +221,7 @@ pub fn new_light>( subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, -) -> State +) -> (State, ChildState) where Block: BlockT + 'static, BE: Backend + 'static, @@ -221,14 +231,20 @@ pub fn new_light>( + Send + Sync + 'static, F: Send + Sync + 'static, { - State { - backend: Box::new(self::state_light::LightState::new( + let child_backend = Box::new(self::state_light::LightState::new( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + fetcher.clone(), + )); + + let backend = Box::new(self::state_light::LightState::new( client, subscriptions, remote_blockchain, fetcher, - )), - } + )); + (State { backend }, ChildState { backend: child_backend }) } /// State API with subscriptions support. @@ -295,38 +311,46 @@ impl StateApi for State fn child_storage( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, storage_key, key) + self.backend.child_storage(block, child_storage_key, child_info, child_type, key) } fn child_storage_keys( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, storage_key, key_prefix) + self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) } fn child_storage_hash( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, storage_key, key) + self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) } fn child_storage_size( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, storage_key, key) + self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) } fn metadata(&self, block: Option) -> FutureResult { @@ -372,6 +396,104 @@ impl StateApi for State } } +/// Child state backend API. +pub trait ChildStateBackend: Send + Sync + 'static + where + Block: BlockT + 'static, + Client: Send + Sync + 'static, +{ + /// Returns the keys with prefix from a child storage, + /// leave prefix empty to get all the keys. + fn storage_keys( + &self, + block: Option, + storage_key: StorageKey, + prefix: StorageKey, + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + fn storage( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + fn storage_hash( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + fn storage_size( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(self.storage(block, storage_key, key) + .map(|x| x.map(|x| x.0.len() as u64))) + } +} + +/// Child state API with subscriptions support. +pub struct ChildState { + backend: Box>, +} + +impl ChildStateApi for ChildState + where + Block: BlockT + 'static, + Client: Send + Sync + 'static, +{ + type Metadata = crate::metadata::Metadata; + + fn storage( + &self, + storage_key: StorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage(block, storage_key, key) + } + + fn storage_keys( + &self, + storage_key: StorageKey, + key_prefix: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_keys(block, storage_key, key_prefix) + } + + fn storage_hash( + &self, + storage_key: StorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_hash(block, storage_key, key) + } + + fn storage_size( + &self, + storage_key: StorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_size(block, storage_key, key) + } +} + +const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; + +fn child_resolution_error() -> Error { + client_err(sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string())) +} + fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index ae00bcec72aa3..d27086a02c176 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -39,7 +39,8 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, + client_err, child_resolution_error}; use std::marker::PhantomData; use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; @@ -309,67 +310,58 @@ impl StateBackend for FullState, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage_keys( - &BlockId::Hash(block), - &child_info, - &prefix, - ) - }) - .map_err(client_err))) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage_keys( + self, + block, + child_storage_key, + prefix, + ) } fn child_storage( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage( - &BlockId::Hash(block), - &child_info, - &key, - ) - }) - .map_err(client_err))) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage( + self, + block, + child_storage_key, + key, + ) } fn child_storage_hash( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage_hash( - &BlockId::Hash(block), - &child_info, - &key, - ) - }) - .map_err(client_err))) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage_hash( + self, + block, + child_storage_key, + key, + ) } fn metadata(&self, block: Option) -> FutureResult { @@ -488,7 +480,7 @@ impl StateBackend for FullState StateBackend for FullState ChildStateBackend for FullState where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + StorageProvider + HeaderBackend + + HeaderMetadata + BlockchainEvents + + CallApiAt + ProvideRuntimeApi + + Send + Sync + 'static, + Client::Api: Metadata, +{ + fn storage_keys( + &self, + block: Option, + storage_key: StorageKey, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage_keys( + &BlockId::Hash(block), + &child_info, + &prefix, + ) + }) + .map_err(client_err))) + } + + fn storage( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage( + &BlockId::Hash(block), + &child_info, + &key, + ) + }) + .map_err(client_err))) + } + + fn storage_hash( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage_hash( + &BlockId::Hash(block), + &child_info, + &key, + ) + }) + .map_err(client_err))) + } +} + /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 23ae3c4aede70..6e1aa6bc1f07d 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -53,12 +53,14 @@ use sp_core::{ use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, error::{FutureResult, Error}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err, + child_resolution_error}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; /// State API backend for light nodes. +#[derive(Clone)] pub struct LightState, Client> { client: Arc, subscriptions: Subscriptions, @@ -233,8 +235,7 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(self - .storage(block, key) + Box::new(StateBackend::storage(self, block, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) ) @@ -244,7 +245,9 @@ impl StateBackend for LightState, - _storage_key: StorageKey, + _child_storage_key: StorageKey, + _child_info: StorageKey, + _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -253,43 +256,38 @@ impl StateBackend for LightState, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - let block = self.block_or_best(block); - let fetcher = self.fetcher.clone(); - let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key: storage_key.0, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }).then(move |result| ready(result - .map(|mut data| data - .remove(&key.0) - .expect("successful result has entry for all keys; qed") - .map(StorageData) - ) - .map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }); - - Box::new(child_storage.boxed().compat()) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage( + self, + block, + child_storage_key, + key, + ) } fn child_storage_hash( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage_hash( + self, + block, + child_storage_key, + key, ) } @@ -501,6 +499,65 @@ impl StateBackend for LightState ChildStateBackend for LightState + where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static +{ + fn storage_keys( + &self, + _block: Option, + _storage_key: StorageKey, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + } + + fn storage( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + let block = self.block_or_best(block); + let fetcher = self.fetcher.clone(); + let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) + .then(move |result| match result { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key: storage_key.0, + keys: vec![key.0.clone()], + retry_count: Default::default(), + }).then(move |result| ready(result + .map(|mut data| data + .remove(&key.0) + .expect("successful result has entry for all keys; qed") + .map(StorageData) + ) + .map_err(client_err) + ))), + Err(error) => Either::Right(ready(Err(error))), + }); + + Box::new(child_storage.boxed().compat()) + } + + fn storage_hash( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(ChildStateBackend::storage(self, block, storage_key, key) + .and_then(|maybe_storage| + result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) + ) + ) + } +} + /// Resolve header by hash. fn resolve_header>( remote_blockchain: &dyn RemoteBlockchain, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 8d351b00c71b5..24ea59dc484ec 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -47,7 +47,7 @@ fn should_return_storage() { .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); - let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); + let (client, child) = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); @@ -67,7 +67,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, key, Some(genesis_hash).into()) + child.storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -83,13 +83,13 @@ fn should_return_child_storage() { .add_child_storage(&child_info, "key", vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let (_client, child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); let child_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); let key = StorageKey(b"key".to_vec()); assert_matches!( - client.child_storage( + child.storage( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -97,7 +97,7 @@ fn should_return_child_storage() { Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.child_storage_hash( + child.storage_hash( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -105,7 +105,7 @@ fn should_return_child_storage() { Ok(true) ); assert_matches!( - client.child_storage_size( + child.storage_size( child_key.clone(), key.clone(), None, @@ -119,7 +119,7 @@ fn should_call_contract() { let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let (client, _child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); assert_matches!( client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(), @@ -135,7 +135,7 @@ fn should_notify_about_storage_changes() { { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); api.subscribe_storage(Default::default(), subscriber, None.into()); @@ -168,7 +168,7 @@ fn should_send_initial_storage_changes_and_notifications() { { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); @@ -204,7 +204,7 @@ fn should_send_initial_storage_changes_and_notifications() { fn should_query_storage() { fn run_tests(mut client: Arc) { let core = tokio::runtime::Runtime::new().unwrap(); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let mut add_block = |nonce| { let mut builder = client.new_block(Default::default()).unwrap(); @@ -389,7 +389,7 @@ fn should_return_runtime_version() { let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",2],\ @@ -412,7 +412,7 @@ fn should_notify_on_runtime_version_initially() { { let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); api.subscribe_runtime_version(Default::default(), subscriber); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4363e204c07a8..480a7a5d7d00f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1138,7 +1138,7 @@ ServiceBuilder< let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); - let (chain, state) = if let (Some(remote_backend), Some(on_demand)) = + let (chain, state, child_state) = if let (Some(remote_backend), Some(on_demand)) = (remote_backend.as_ref(), on_demand.as_ref()) { // Light clients let chain = sc_rpc::chain::new_light( @@ -1147,19 +1147,19 @@ ServiceBuilder< remote_backend.clone(), on_demand.clone() ); - let state = sc_rpc::state::new_light( + let (state, child_state) = sc_rpc::state::new_light( client.clone(), subscriptions.clone(), remote_backend.clone(), on_demand.clone() ); - (chain, state) + (chain, state, child_state) } else { // Full nodes let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let state = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); - (chain, state) + let (state, child_state) = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); + (chain, state, child_state) }; let author = sc_rpc::author::Author::new( @@ -1175,6 +1175,7 @@ ServiceBuilder< let offchain = sc_rpc::offchain::Offchain::new(storage); sc_rpc_server::rpc_handler(( state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), chain::ChainApi::to_delegate(chain), offchain::OffchainApi::to_delegate(offchain), author::AuthorApi::to_delegate(author), @@ -1184,6 +1185,7 @@ ServiceBuilder< }, None => sc_rpc_server::rpc_handler(( state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), chain::ChainApi::to_delegate(chain), author::AuthorApi::to_delegate(author), system::SystemApi::to_delegate(system), From 63cf6bd01d20e06009c6c96814d9ef1984418cb1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 19 Mar 2020 20:06:23 +0100 Subject: [PATCH 071/185] remove old rpc --- client/rpc-api/src/state/mod.rs | 48 --------------- client/rpc/src/state/mod.rs | 93 ----------------------------- client/rpc/src/state/state_full.rs | 60 +------------------ client/rpc/src/state/state_light.rs | 52 +--------------- 4 files changed, 2 insertions(+), 251 deletions(-) diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index e94df46736b48..fd709788e5e42 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -72,54 +72,6 @@ pub trait StateApi { #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - /// This method is deprecated in favor of `childstate_getChildKeys`. - #[rpc(name = "state_getChildKeys")] - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - /// This method is deprecated in favor of `childstate_getChildStorage`. - #[rpc(name = "state_getChildStorage")] - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - /// This method is deprecated in favor of `childstate_getChildStorageHash`. - #[rpc(name = "state_getChildStorageHash")] - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - /// This method is deprecated in favor of `childstate_getChildStorageSize`. - #[rpc(name = "state_getChildStorageSize")] - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - /// Returns the runtime metadata as an opaque blob. #[rpc(name = "state_getMetadata")] fn metadata(&self, hash: Option) -> FutureResult; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 29ad2f7538eee..1805ac5351991 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -104,49 +104,6 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - fn child_storage_keys( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - fn child_storage_size( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) - .map(|x| x.map(|x| x.0.len() as u64))) - } - /// Returns the runtime metadata as an opaque blob. fn metadata(&self, block: Option) -> FutureResult; @@ -309,50 +266,6 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key_prefix: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) - } - - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) - } - fn metadata(&self, block: Option) -> FutureResult { self.backend.metadata(block) } @@ -488,12 +401,6 @@ impl ChildStateApi for ChildState } } -const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; - -fn child_resolution_error() -> Error { - client_err(sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string())) -} - fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index d27086a02c176..599b8af349759 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -39,8 +39,7 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, - client_err, child_resolution_error}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; use std::marker::PhantomData; use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; @@ -307,63 +306,6 @@ impl StateBackend for FullState, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage_keys( - self, - block, - child_storage_key, - prefix, - ) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage( - self, - block, - child_storage_key, - key, - ) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage_hash( - self, - block, - child_storage_key, - key, - ) - } - fn metadata(&self, block: Option) -> FutureResult { Box::new(result( self.block_or_best(block) diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 6e1aa6bc1f07d..22bee62950d62 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -53,8 +53,7 @@ use sp_core::{ use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err, - child_resolution_error}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; @@ -242,55 +241,6 @@ impl StateBackend for LightState, - _child_storage_key: StorageKey, - _child_info: StorageKey, - _child_type: u32, - _prefix: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage( - self, - block, - child_storage_key, - key, - ) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage_hash( - self, - block, - child_storage_key, - key, - ) - } - fn metadata(&self, block: Option) -> FutureResult { let metadata = self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) .and_then(|metadata| OpaqueMetadata::decode(&mut &metadata.0[..]) From f21606f0b0a949cefcb95922a81d3cc747c111f0 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 20 Mar 2020 14:39:14 +0100 Subject: [PATCH 072/185] review change. --- .../network/src/protocol/light_client_handler.rs | 2 +- frame/contracts/src/account_db.rs | 12 ++++++------ frame/contracts/src/lib.rs | 16 ++++++++-------- frame/contracts/src/rent.rs | 6 +++--- .../state-machine/src/overlayed_changes.rs | 2 +- test-utils/client/src/lib.rs | 2 ++ 6 files changed, 21 insertions(+), 19 deletions(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f6588ea19836b..ecbd62e431de4 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,7 +514,7 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; - let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( + let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref) diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index f65ecb0d8cda1..fb1ec52b3d060 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(&crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -175,13 +175,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -220,16 +220,16 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - &new_info.child_trie_unique_id(), + &new_info.child_trie_info(), &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.child_trie_info(), &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.child_trie_info(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 8da776ae6b962..a2d194714d35c 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -225,13 +225,13 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> ChildInfo { - trie_unique_id(&self.trie_id[..]) + pub fn child_trie_info(&self) -> ChildInfo { + child_trie_info(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> ChildInfo { +pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { ChildInfo::new_default(trie_id) } @@ -804,11 +804,11 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), ).map(|value| { child::kill( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), ); @@ -821,7 +821,7 @@ impl Module { // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. &child::root( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), )[..], code_hash, ); @@ -829,7 +829,7 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), &value, ); @@ -933,7 +933,7 @@ decl_storage! { impl OnKilledAccount for Module { fn on_killed_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index dfcbc997c5b22..1aa52fff31435 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,7 +223,7 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - &alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -235,7 +235,7 @@ fn enact_verdict( // Note: this operation is heavy. let child_storage_root = child::root( - &alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); let tombstone = >::new( @@ -246,7 +246,7 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - &alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 2dc56bc772d3d..c72cfc5c1cf58 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -76,7 +76,7 @@ pub struct OverlayedValue { pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, - /// Child storage changes. + /// Child storage changes. The map key is the child storage key without the common prefix. pub children_default: HashMap, ChildInfo)>, } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index fad08c8238669..4880b296c7048 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -66,6 +66,8 @@ impl GenesisInit for () { pub struct TestClientBuilder { execution_strategies: ExecutionStrategies, genesis_init: G, + /// The key is an unprefixed storage key, this only contains + /// default child trie content. child_storage_extension: HashMap, StorageChild>, backend: Arc, _executor: std::marker::PhantomData, From 1b2a30cd9938ee776ebe441279374d88e8e81a27 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 20 Mar 2020 20:47:40 +0100 Subject: [PATCH 073/185] Using `PrefixedStorageKey` type for child storage key that are prefixed. --- Cargo.lock | 21 +++++ client/api/src/backend.rs | 4 +- client/api/src/light.rs | 6 +- client/api/src/proof_provider.rs | 4 +- client/db/src/changes_tries_storage.rs | 3 +- client/network/src/protocol.rs | 42 +++++---- .../src/protocol/light_client_handler.rs | 36 ++++--- client/network/src/protocol/light_dispatch.rs | 13 ++- client/rpc-api/src/child_state/mod.rs | 10 +- client/rpc/src/state/mod.rs | 18 ++-- client/rpc/src/state/state_full.rs | 14 +-- client/rpc/src/state/state_light.rs | 11 ++- client/rpc/src/state/tests.rs | 11 ++- client/src/client.rs | 16 ++-- client/src/light/fetcher.rs | 7 +- primitives/state-machine/src/backend.rs | 4 +- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/changes_trie/build.rs | 6 +- .../src/changes_trie/build_cache.rs | 15 +-- .../src/changes_trie/changes_iterator.rs | 26 ++++-- .../state-machine/src/changes_trie/input.rs | 18 +++- .../state-machine/src/changes_trie/mod.rs | 3 +- .../state-machine/src/changes_trie/prune.rs | 3 +- .../state-machine/src/changes_trie/storage.rs | 5 +- primitives/state-machine/src/ext.rs | 4 +- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/storage/Cargo.toml | 1 + primitives/storage/src/lib.rs | 93 ++++++++++++++----- test-utils/runtime/client/src/lib.rs | 2 +- 29 files changed, 256 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 189f85013c0c1..1ff40208ebc79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5341,6 +5341,26 @@ dependencies = [ "rust-argon2", ] +[[package]] +name = "ref-cast" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "regex" version = "1.3.4" @@ -7420,6 +7440,7 @@ name = "sp-storage" version = "2.0.0-alpha.4" dependencies = [ "impl-serde 0.2.3", + "ref-cast", "serde", "sp-debug-derive", "sp-std", diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 68dd61b233ae0..33a370c7cb2c5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -26,7 +26,7 @@ use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; -use sp_storage::{StorageData, StorageKey, ChildInfo}; +use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ blockchain::{ Backend as BlockchainBackend, well_known_cache_keys @@ -349,7 +349,7 @@ pub trait StorageProvider> { &self, first: NumberFor, last: BlockId, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>>; } diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 2911d77f18209..30e6d14d557f1 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, generic::BlockId }; -use sp_core::ChangesTrieConfigurationRange; +use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; use sp_state_machine::StorageProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, @@ -81,7 +81,7 @@ pub struct RemoteReadChildRequest { /// Header of block at which read is performed. pub header: Header, /// Storage key for child. - pub storage_key: Vec, + pub storage_key: PrefixedStorageKey, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. @@ -105,7 +105,7 @@ pub struct RemoteChangesRequest { /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. pub tries_roots: (Header::Number, Header::Hash, Vec), /// Optional Child Storage key to read. - pub storage_key: Option>, + pub storage_key: Option, /// Storage key to read. pub key: Vec, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index a805baf42b8f6..93160855eaebe 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -19,7 +19,7 @@ use sp_runtime::{ traits::{Block as BlockT}, }; use crate::{StorageProof, ChangesProof}; -use sp_storage::{ChildInfo, StorageKey}; +use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -64,7 +64,7 @@ pub trait ProofProvider { last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result>; } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index a28cd604fe363..55e740f43462a 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -27,6 +27,7 @@ use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; +use sp_core::storage::PrefixedStorageKey; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, }; @@ -481,7 +482,7 @@ where fn with_cached_changed_keys( &self, root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>>), ) -> bool { self.build_cache.read().with_changed_keys(root, functor) } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 1e6158a59ecc3..2635a24f8dd3f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::listeners::ListenerId}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo, ChildType}; +use sp_core::storage::{StorageKey, PrefixedStorageKey, ChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -312,14 +312,14 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { who: &PeerId, id: RequestId, block: ::Hash, - storage_key: Vec, + storage_key: PrefixedStorageKey, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest( message::RemoteReadChildRequest { id, block, - storage_key, + storage_key: storage_key.key(), keys, }); @@ -352,7 +352,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { last: ::Hash, min: ::Hash, max: ::Hash, - storage_key: Option>, + storage_key: Option, key: Vec, ) { let message: Message = message::generic::Message::RemoteChangesRequest(message::RemoteChangesRequest { @@ -361,7 +361,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { last, min, max, - storage_key, + storage_key: storage_key.map(|p| p.key()), key, }); @@ -1608,7 +1608,8 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -1708,23 +1709,32 @@ impl Protocol { request.first, request.last ); - let storage_key = request.storage_key.map(|sk| StorageKey(sk)); let key = StorageKey(request.key); - let proof = match self.context_data.chain.key_changes_proof( - request.first, - request.last, - request.min, - request.max, - storage_key.as_ref(), + let prefixed_key = if let Some(storage_key) = request.storage_key.as_ref() { + if let Some(storage_key) = PrefixedStorageKey::new_ref(storage_key) { + Ok(Some(storage_key)) + } else { + Err("Invalid prefixed storage key".into()) + } + } else { + Ok(None) + }; + let (first, last, min, max) = (request.first, request.last, request.min, request.max); + let proof = match prefixed_key.and_then(|p_key| self.context_data.chain.key_changes_proof( + first, + last, + min, + max, + p_key, &key, - ) { + )) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", request.id, who, - if let Some(sk) = storage_key { - format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) + if let Some(sk) = request.storage_key.as_ref() { + format!("{} : {}", sk.to_hex::(), key.0.to_hex::()) } else { key.0.to_hex::() }, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index ecbd62e431de4..085bd06e4cca5 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, ChildType, StorageKey}; +use sp_core::storage::{ChildInfo, ChildType, StorageKey, PrefixedStorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::{ traits::{Block, Header, NumberFor, Zero}, @@ -510,7 +510,8 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -588,20 +589,25 @@ where let min = Decode::decode(&mut request.min.as_ref())?; let max = Decode::decode(&mut request.max.as_ref())?; let key = StorageKey(request.key.clone()); - let storage_key = - if request.storage_key.is_empty() { - None + let storage_key = if request.storage_key.is_empty() { + Ok(None) + } else { + if let Some(storage_key) = PrefixedStorageKey::new_ref(&request.storage_key) { + Ok(Some(storage_key)) } else { - Some(StorageKey(request.storage_key.clone())) - }; + Err("Invalid prefix for storage key.".into()) + } + }; - let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key.as_ref(), &key) { + let proof = match storage_key.and_then(|storage_key| { + self.chain.key_changes_proof(first, last, min, max, storage_key, &key) + }) { Ok(proof) => proof, Err(error) => { log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", peer, - if let Some(sk) = storage_key { - format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) + if !request.storage_key.is_empty() { + format!("{} : {}", request.storage_key.to_hex::(), key.0.to_hex::()) } else { key.0.to_hex::() }, @@ -918,7 +924,7 @@ fn serialize_request(request: &Request) -> api::v1::light::Request Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), - storage_key: request.storage_key.clone(), + storage_key: request.storage_key.clone().key(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -937,7 +943,7 @@ fn serialize_request(request: &Request) -> api::v1::light::Request last: request.last_block.1.encode(), min: request.tries_roots.1.encode(), max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().unwrap_or_default(), + storage_key: request.storage_key.clone().map(|s| s.key()).unwrap_or_default(), key: request.key.clone(), }; api::v1::light::request::Request::RemoteChangesRequest(r) @@ -1562,10 +1568,11 @@ mod tests { #[test] fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:default:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], retry_count: None, }; @@ -1662,10 +1669,11 @@ mod tests { #[test] fn send_receive_read_child() { let chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:default:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], retry_count: None, }; diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 94d2e35a1278d..d35855d9c45e0 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -35,6 +35,7 @@ use libp2p::PeerId; use crate::config::Roles; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sc_peerset::ReputationChange; +use sp_core::storage::PrefixedStorageKey; /// Remote request timeout. const REQUEST_TIMEOUT: Duration = Duration::from_secs(15); @@ -69,7 +70,7 @@ pub trait LightDispatchNetwork { who: &PeerId, id: RequestId, block: ::Hash, - storage_key: Vec, + storage_key: PrefixedStorageKey, keys: Vec>, ); @@ -92,7 +93,7 @@ pub trait LightDispatchNetwork { last: ::Hash, min: ::Hash, max: ::Hash, - storage_key: Option>, + storage_key: Option, key: Vec, ); @@ -678,6 +679,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; + use sp_core::storage::PrefixedStorageKey; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -821,11 +823,11 @@ pub mod tests { } fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, + fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: PrefixedStorageKey, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, - _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} + _: ::Hash, _: ::Hash, _: Option, _: Vec) {} fn send_body_request(&mut self, _: &PeerId, _: RequestId, _: BlockAttributes, _: FromBlock<::Hash, <::Header as HeaderT>::Number>, _: Option, _: Direction, _: Option) {} } @@ -1043,12 +1045,13 @@ pub mod tests { let peer0 = PeerId::random(); light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); + let child_info = sp_core::storage::ChildInfo::new_default(&b":child_storage:default:sub"[..]); let (tx, response) = oneshot::channel(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild( RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:default:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index f9027b0f15a08..3c530b64dec30 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -17,7 +17,7 @@ //! Substrate state API. use jsonrpc_derive::rpc; -use sp_core::storage::{StorageKey, StorageData}; +use sp_core::storage::{StorageKey, PrefixedStorageKey, StorageData}; use crate::state::error::FutureResult; pub use self::gen_client::Client as ChildStateClient; @@ -32,7 +32,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getKeys")] fn storage_keys( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -41,7 +41,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getStorage")] fn storage( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option ) -> FutureResult>; @@ -50,7 +50,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getStorageHash")] fn storage_hash( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option ) -> FutureResult>; @@ -59,7 +59,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getStorageSize")] fn storage_size( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 1805ac5351991..d61cd43773328 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -28,7 +28,7 @@ use rpc::{Result as RpcResult, futures::{Future, future::result}}; use sc_rpc_api::Subscriptions; use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}}; -use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}}; +use sp_core::{Bytes, storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}}; use sp_version::RuntimeVersion; use sp_runtime::traits::Block as BlockT; @@ -320,7 +320,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage_keys( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, prefix: StorageKey, ) -> FutureResult>; @@ -328,7 +328,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult>; @@ -336,7 +336,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage_hash( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult>; @@ -344,7 +344,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage_size( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(self.storage(block, storage_key, key) @@ -366,7 +366,7 @@ impl ChildStateApi for ChildState fn storage( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, block: Option ) -> FutureResult> { @@ -375,7 +375,7 @@ impl ChildStateApi for ChildState fn storage_keys( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key_prefix: StorageKey, block: Option ) -> FutureResult> { @@ -384,7 +384,7 @@ impl ChildStateApi for ChildState fn storage_hash( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, block: Option ) -> FutureResult> { @@ -393,7 +393,7 @@ impl ChildStateApi for ChildState fn storage_size( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, block: Option ) -> FutureResult> { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 599b8af349759..273d421dc9da5 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -30,7 +30,7 @@ use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata use sc_client::BlockchainEvents; use sp_core::{ Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, - ChildInfo, ChildType}, + ChildInfo, ChildType, PrefixedStorageKey}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -471,13 +471,13 @@ impl ChildStateBackend for FullState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child storage key".into()), }; @@ -493,13 +493,13 @@ impl ChildStateBackend for FullState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child storage key".into()), }; @@ -515,13 +515,13 @@ impl ChildStateBackend for FullState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child storage key".into()), }; diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 22bee62950d62..ae33bd44cc02c 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -48,7 +48,8 @@ use sc_client::{ }, }; use sp_core::{ - Bytes, OpaqueMetadata, storage::{StorageKey, StorageData, StorageChangeSet}, + Bytes, OpaqueMetadata, + storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}, }; use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; @@ -458,7 +459,7 @@ impl ChildStateBackend for LightState, - _storage_key: StorageKey, + _storage_key: PrefixedStorageKey, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -467,7 +468,7 @@ impl ChildStateBackend for LightState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -477,7 +478,7 @@ impl ChildStateBackend for LightState Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { block, header, - storage_key: storage_key.0, + storage_key, keys: vec![key.0.clone()], retry_count: Default::default(), }).then(move |result| ready(result @@ -497,7 +498,7 @@ impl ChildStateBackend for LightState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(ChildStateBackend::storage(self, block, storage_key, key) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 24ea59dc484ec..74455c99f61f8 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -32,7 +32,11 @@ use substrate_test_runtime_client::{ }; const STORAGE_KEY: &[u8] = b"child"; -const PREFIXED_STORAGE_KEY: &[u8] = b":child_storage:default:child"; + +fn prefixed_storage_key() -> PrefixedStorageKey { + let child_info = ChildInfo::new_default(&b":child_storage:default:child"[..]); + child_info.prefixed_storage_key() +} #[test] fn should_return_storage() { @@ -49,7 +53,6 @@ fn should_return_storage() { let genesis_hash = client.genesis_hash(); let (client, child) = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); - let storage_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -67,7 +70,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - child.storage(storage_key, key, Some(genesis_hash).into()) + child.storage(prefixed_storage_key(), key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -84,7 +87,7 @@ fn should_return_child_storage() { .build()); let genesis_hash = client.genesis_hash(); let (_client, child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); + let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); diff --git a/client/src/client.rs b/client/src/client.rs index 8ec045b7f57ac..f273cae650bd5 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -26,8 +26,8 @@ use parking_lot::{Mutex, RwLock}; use codec::{Encode, Decode}; use hash_db::Prefix; use sp_core::{ - ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, - NativeOrEncoded, storage::{StorageKey, StorageData, well_known_keys, ChildInfo}, + ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, NativeOrEncoded, + storage::{StorageKey, PrefixedStorageKey, StorageData, well_known_keys, ChildInfo}, }; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::{ @@ -344,7 +344,7 @@ impl Client where last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, cht_size: NumberFor, ) -> sp_blockchain::Result> { @@ -393,7 +393,7 @@ impl Client where fn with_cached_changed_keys( &self, root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>>), ) -> bool { self.storage.with_cached_changed_keys(root, functor) } @@ -438,7 +438,7 @@ impl Client where number: last_number, }, max_number, - storage_key.as_ref().map(|x| &x.0[..]), + storage_key, &key.0, ) .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; @@ -1146,7 +1146,7 @@ impl ProofProvider for Client where last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result> { self.key_changes_proof_with_cht_size( @@ -1345,7 +1345,7 @@ impl StorageProvider for Client wher &self, first: NumberFor, last: BlockId, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; @@ -1376,7 +1376,7 @@ impl StorageProvider for Client wher range_first, &range_anchor, best_number, - storage_key.as_ref().map(|x| &x.0[..]), + storage_key, &key.0) .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index ce2434d6c6859..ef6a062cf3c07 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -136,7 +136,7 @@ impl> LightDataChecker { number: request.last_block.0, }, remote_max_block, - request.storage_key.as_ref().map(Vec::as_slice), + request.storage_key.as_ref(), &request.key) .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; result.extend(result_range); @@ -243,7 +243,7 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_info = match ChildType::from_prefixed_key(&request.storage_key[..]) { + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; @@ -512,6 +512,7 @@ pub mod tests { #[test] fn storage_child_read_proof_is_generated_and_checked() { + let child_info = ChildInfo::new_default(&b"child1"[..]); let ( local_checker, remote_block_header, @@ -522,7 +523,7 @@ pub mod tests { &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b":child_storage:default:child1".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 5334a3b8c427f..c3b2146a73ae1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -187,9 +187,9 @@ pub trait Backend: std::fmt::Debug { let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((prefixed_storage_key, None)); + child_roots.push((prefixed_storage_key.key(), None)); } else { - child_roots.push((prefixed_storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key.key(), Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 54c21dfc2057c..b8b3210a87c9c 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -242,7 +242,7 @@ impl Externalities for BasicExternalities { if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(prefixed_storage_key, child_root); + top.insert(prefixed_storage_key.key(), child_root); } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index cf1a2e3bfba1b..0f60c8e317f70 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -32,7 +32,7 @@ use crate::{ input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, }, }; -use sp_core::storage::{ChildInfo, ChildType}; +use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey}; /// Prepare input pairs for building a changes trie of given block. /// @@ -280,7 +280,7 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } - let mut children_roots = BTreeMap::::new(); + let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), @@ -774,7 +774,7 @@ mod test { ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index 9d0dbb4c1f310..aebebf3a17f59 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -19,6 +19,7 @@ use std::collections::{HashMap, HashSet}; use crate::StorageKey; +use sp_core::storage::PrefixedStorageKey; /// Changes trie build cache. /// @@ -38,7 +39,7 @@ pub struct BuildCache { /// The `Option>` in inner `HashMap` stands for the child storage key. /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. - changed_keys: HashMap, HashSet>>, + changed_keys: HashMap, HashSet>>, } /// The action to perform when block-with-changes-trie is imported. @@ -56,7 +57,7 @@ pub struct CachedBuildData { block: N, trie_root: H, digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + changed_keys: HashMap, HashSet>, } /// The action to perform when block-with-changes-trie is imported. @@ -72,7 +73,7 @@ pub(crate) enum IncompleteCacheAction { #[derive(Debug, PartialEq)] pub(crate) struct IncompleteCachedBuildData { digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + changed_keys: HashMap, HashSet>, } impl BuildCache @@ -89,7 +90,7 @@ impl BuildCache } /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { + pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { self.changed_keys.get(&root) } @@ -98,7 +99,7 @@ impl BuildCache pub fn with_changed_keys( &self, root: &H, - functor: &mut dyn FnMut(&HashMap, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { match self.changed_keys.get(&root) { Some(changed_keys) => { @@ -164,7 +165,7 @@ impl IncompleteCacheAction { /// Insert changed keys of given storage into cached data. pub(crate) fn insert( self, - storage_key: Option, + storage_key: Option, changed_keys: HashSet, ) -> Self { match self { @@ -200,7 +201,7 @@ impl IncompleteCachedBuildData { fn insert( mut self, - storage_key: Option, + storage_key: Option, changed_keys: HashSet, ) -> Self { self.changed_keys.insert(storage_key, changed_keys); diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 685786218c75f..f5a936069ba40 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -22,6 +22,7 @@ use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; use hash_db::Hasher; use num_traits::Zero; +use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; @@ -40,7 +41,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &'a AnchorBlockId, max: Number, - storage_key: Option<&'a [u8]>, + storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], ) -> Result, String> { // we can't query any roots before root @@ -79,7 +80,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8], ) -> Result>, String> where H::Out: Codec { // we can't query any roots before root @@ -127,7 +128,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8] ) -> Result, String> where H::Out: Encode { key_changes_proof_check_with_db( @@ -150,7 +151,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8] ) -> Result, String> where H::Out: Encode { // we can't query any roots before root @@ -188,7 +189,7 @@ pub struct DrilldownIteratorEssence<'a, H, Number> Number: BlockNumber, H::Out: 'a, { - storage_key: Option<&'a [u8]>, + storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], roots_storage: &'a dyn RootsStorage, storage: &'a dyn Storage, @@ -238,7 +239,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> let trie_root = if let Some(storage_key) = self.storage_key { let child_key = ChildIndex { block: block.clone(), - storage_key: storage_key.to_vec(), + storage_key: storage_key.clone(), }.encode(); if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? .and_then(|v| >::decode(&mut &v[..]).ok()) @@ -382,6 +383,11 @@ mod tests { use sp_runtime::traits::BlakeTwo256; use super::*; + fn child_key() -> PrefixedStorageKey { + let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); + child_info.prefixed_storage_key() + } + fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { let config = Configuration { digest_interval: 4, digest_levels: 2 }; let backend = InMemoryStorage::with_inputs(vec![ @@ -418,7 +424,7 @@ mod tests { (16, vec![ InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), ]), - ], vec![(b"1".to_vec(), vec![ + ], vec![(child_key(), vec![ (1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]), ]), @@ -535,7 +541,7 @@ mod tests { 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, - Some(&b"1"[..]), + Some(&child_key()), &[42], ).and_then(|i| i.collect::, _>>()).is_err()); } @@ -577,7 +583,7 @@ mod tests { let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof_child = key_changes_proof::( configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]).unwrap(); + &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]).unwrap(); // happens on local light node: @@ -592,7 +598,7 @@ mod tests { local_storage.clear_storage(); let local_result_child = key_changes_proof_check::( configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]); + &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4a1420f8486f9..4007620f92ca8 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -21,6 +21,7 @@ use crate::{ StorageKey, StorageValue, changes_trie::BlockNumber }; +use sp_core::storage::PrefixedStorageKey; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] @@ -49,7 +50,7 @@ pub struct ChildIndex { /// Block at which this key has been inserted in the trie. pub block: Number, /// Storage key this node is responsible for. - pub storage_key: StorageKey, + pub storage_key: PrefixedStorageKey, } /// Value of { changed key => block/digest block numbers } mapping. @@ -176,10 +177,17 @@ impl Decode for InputKey { block: Decode::decode(input)?, key: Decode::decode(input)?, })), - 3 => Ok(InputKey::ChildIndex(ChildIndex { - block: Decode::decode(input)?, - storage_key: Decode::decode(input)?, - })), + 3 => { + let block = Decode::decode(input)?; + if let Some(storage_key) = PrefixedStorageKey::new(Decode::decode(input)?) { + Ok(InputKey::ChildIndex(ChildIndex { + block, + storage_key, + })) + } else { + Err("Invalid prefixed key in change trie".into()) + } + }, _ => Err("Invalid input key variant".into()), } } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index d614992df3033..ee6c6778e0aad 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,6 +71,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; +use sp_core::storage::PrefixedStorageKey; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -156,7 +157,7 @@ pub trait Storage: RootsStorage { fn with_cached_changed_keys( &self, root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 87923dc2f593c..05555df305b7c 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -137,7 +137,8 @@ mod tests { #[test] fn prune_works() { fn prepare_storage() -> InMemoryStorage { - let child_key = ChildIndex { block: 67u64, storage_key: b"1".to_vec() }.encode(); + let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); + let child_key = ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() }.encode(); let mut mdb1 = MemoryDB::::default(); let root1 = insert_into_memory_db::( &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7fb418672872b..81651dd2e719b 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -18,6 +18,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use sp_core::storage::PrefixedStorageKey; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -96,7 +97,7 @@ impl InMemoryStorage { #[cfg(test)] pub fn with_inputs( mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(StorageKey, Vec<(Number, Vec>)>)>, + children_inputs: Vec<(PrefixedStorageKey, Vec<(Number, Vec>)>)>, ) -> Self { let mut mdb = MemoryDB::default(); let mut roots = BTreeMap::new(); @@ -182,7 +183,7 @@ impl Storage for InMemoryStorage, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { self.cache.with_changed_keys(root, functor) } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 12d40873e074f..33f502a75bdb2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -458,9 +458,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(prefixed_storage_key, None); + self.overlay.set_storage(prefixed_storage_key.key(), None); } else { - self.overlay.set_storage(prefixed_storage_key, Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key.key(), Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 9245b53a0493b..b0048d90f4103 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -331,7 +331,7 @@ impl Backend for InMemory where H::Out: Codec { if let Some(child_info) = child_info.as_ref() { let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((prefix_storage_key, ch.as_ref().into())); + new_child_roots.push((prefix_storage_key.key(), ch.as_ref().into())); } else { root_map = Some(map); } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 9e90b6ecc6c6e..0df854170abaa 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -14,6 +14,7 @@ sp-std = { version = "2.0.0-alpha.4", default-features = false, path = "../std" serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0-alpha.4", path = "../debug-derive" } +ref-cast = "1.0.0" [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 984c8e4738796..de2a0d7e01856 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -23,6 +23,8 @@ use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; use sp_std::vec::Vec; +use sp_std::ops::{Deref, DerefMut}; +use ref_cast::RefCast; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -32,6 +34,67 @@ pub struct StorageKey( pub Vec, ); +/// Storage key of a child trie, it contains the prefix to the key. +#[derive(PartialEq, Eq, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[repr(transparent)] +#[derive(RefCast)] +pub struct PrefixedStorageKey( + #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] + Vec, +); + +impl Deref for PrefixedStorageKey { + type Target = Vec; + + fn deref(&self) -> &Vec { + &self.0 + } +} + +impl DerefMut for PrefixedStorageKey { + fn deref_mut(&mut self) -> &mut Vec { + &mut self.0 + } +} + +impl PrefixedStorageKey { + /// Create a prefixed storage key from its byte array + /// representation. + /// Returns `None` on unknown prefix. + pub fn new(inner: Vec) -> Option { + let result = PrefixedStorageKey(inner); + // currently only support for child trie key + // note that this function should not be use in a runtime + // as it will change its behavior with future child types. + if ChildType::from_prefixed_key(&result).is_some() { + Some(result) + } else { + None + } + } + + pub fn new_ref(inner: &Vec) -> Option<&Self> { + let result = PrefixedStorageKey::ref_cast(inner); + // currently only support for child trie key + // note that this function should not be use in a runtime + // as it will change its behavior with future child types. + if ChildType::from_prefixed_key(&result).is_some() { + Some(result) + } else { + None + } + } + + /// Get inner key, this should + /// only be needed when writing + /// into parent trie to avoid an + /// allocation. + pub fn key(self) -> Vec { + self.0 + } +} + /// Storage data associated to a [`StorageKey`]. #[derive(PartialEq, Eq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] @@ -109,24 +172,6 @@ pub mod well_known_keys { // Other code might depend on this, so be careful changing this. key.starts_with(CHILD_STORAGE_KEY_PREFIX) } - - /// Determine whether a child trie key is valid. - /// - /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. - /// - /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. - pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { - let has_right_prefix = storage_key.starts_with(super::DEFAULT_CHILD_TYPE_PARENT_PREFIX); - if has_right_prefix { - // This is an attempt to catch a change of `is_child_storage_key`, which - // just checks if the key has prefix `:child_storage:` at the moment of writing. - debug_assert!( - is_child_storage_key(&storage_key), - "`is_child_trie_key_valid` is a subset of `is_child_storage_key`", - ); - } - has_right_prefix - } } /// Information related to a child state. @@ -212,7 +257,7 @@ impl ChildInfo { /// Return a the full location in the direct parent of /// this trie. - pub fn prefixed_storage_key(&self) -> Vec { + pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, @@ -222,13 +267,13 @@ impl ChildInfo { /// Returns a the full location in the direct parent of /// this trie. - pub fn into_prefixed_storage_key(self) -> Vec { + pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { mut data, }) => { ChildType::ParentKeyId.do_prefix_key(&mut data); - data + PrefixedStorageKey(data) }, } } @@ -264,7 +309,7 @@ impl ChildType { /// Transform a prefixed key into a tuple of the child type /// and the unprefixed representation of the key. - pub fn from_prefixed_key<'a>(storage_key: &'a [u8]) -> Option<(Self, &'a [u8])> { + pub fn from_prefixed_key<'a>(storage_key: &'a PrefixedStorageKey) -> Option<(Self, &'a [u8])> { let match_type = |storage_key: &'a [u8], child_type: ChildType| { let prefix = child_type.parent_prefix(); if storage_key.starts_with(prefix) { @@ -277,12 +322,12 @@ impl ChildType { } /// Produce a prefixed key for a given child type. - fn new_prefixed_key(&self, key: &[u8]) -> Vec { + fn new_prefixed_key(&self, key: &[u8]) -> PrefixedStorageKey { let parent_prefix = self.parent_prefix(); let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); result.extend_from_slice(parent_prefix); result.extend_from_slice(key); - result + PrefixedStorageKey(result) } /// Prefixes a vec with the prefix for this child type. diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 3c56fbdcdcc2e..e4849dee99aec 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -128,7 +128,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { child_content.data.clone().into_iter().collect() ); let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); - (prefixed_storage_key, state_root.encode()) + (prefixed_storage_key.key(), state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() From 750cdd8af4e916cf74633825b1f7998a1a415eb1 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 23 Mar 2020 10:00:50 +0100 Subject: [PATCH 074/185] Fix rpc test. --- client/rpc/src/state/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 74455c99f61f8..57c91c13540ff 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -34,7 +34,7 @@ use substrate_test_runtime_client::{ const STORAGE_KEY: &[u8] = b"child"; fn prefixed_storage_key() -> PrefixedStorageKey { - let child_info = ChildInfo::new_default(&b":child_storage:default:child"[..]); + let child_info = ChildInfo::new_default(&STORAGE_KEY[..]); child_info.prefixed_storage_key() } From cc65f4483528b27012c446a590555869bb77b20a Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 23 Mar 2020 10:08:20 +0100 Subject: [PATCH 075/185] bump spec version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index ccd2eba78f329..ba219a1890e6b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -83,7 +83,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 239, + spec_version: 240, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From f1b23dfa420f80457b60ace224808bf843b0bbe1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 26 Mar 2020 20:06:54 +0100 Subject: [PATCH 076/185] Apply review suggestion --- client/network/src/protocol.rs | 23 ++++++--------- .../src/protocol/light_client_handler.rs | 19 +++++-------- client/rpc-api/src/child_state/mod.rs | 3 ++ primitives/state-machine/src/backend.rs | 4 +-- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/changes_trie/input.rs | 15 +++------- primitives/state-machine/src/ext.rs | 4 +-- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/storage/src/lib.rs | 28 ++++--------------- test-utils/runtime/client/src/lib.rs | 2 +- 10 files changed, 35 insertions(+), 67 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 788c88b620f9c..d8c8f8dc9cdd1 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -323,7 +323,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { message::RemoteReadChildRequest { id, block, - storage_key: storage_key.key(), + storage_key: storage_key.into_inner(), keys, }); @@ -365,7 +365,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { last, min, max, - storage_key: storage_key.map(|p| p.key()), + storage_key: storage_key.map(|p| p.into_inner()), key, }); @@ -1626,7 +1626,7 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { + let child_info = match ChildType::from_prefixed_key(prefixed_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -1727,24 +1727,17 @@ impl Protocol { request.last ); let key = StorageKey(request.key); - let prefixed_key = if let Some(storage_key) = request.storage_key.as_ref() { - if let Some(storage_key) = PrefixedStorageKey::new_ref(storage_key) { - Ok(Some(storage_key)) - } else { - Err("Invalid prefixed storage key".into()) - } - } else { - Ok(None) - }; + let prefixed_key = request.storage_key.as_ref() + .map(|storage_key| PrefixedStorageKey::new_ref(storage_key)); let (first, last, min, max) = (request.first, request.last, request.min, request.max); - let proof = match prefixed_key.and_then(|p_key| self.context_data.chain.key_changes_proof( + let proof = match self.context_data.chain.key_changes_proof( first, last, min, max, - p_key, + prefixed_key, &key, - )) { + ) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index dc6ad3fc34048..4f20c9ced0526 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -513,7 +513,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { + let child_info = match ChildType::from_prefixed_key(prefixed_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -592,18 +592,12 @@ where let max = Decode::decode(&mut request.max.as_ref())?; let key = StorageKey(request.key.clone()); let storage_key = if request.storage_key.is_empty() { - Ok(None) + None } else { - if let Some(storage_key) = PrefixedStorageKey::new_ref(&request.storage_key) { - Ok(Some(storage_key)) - } else { - Err("Invalid prefix for storage key.".into()) - } + Some(PrefixedStorageKey::new_ref(&request.storage_key)) }; - let proof = match storage_key.and_then(|storage_key| { - self.chain.key_changes_proof(first, last, min, max, storage_key, &key) - }) { + let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) { Ok(proof) => proof, Err(error) => { log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", @@ -922,7 +916,7 @@ fn serialize_request(request: &Request) -> api::v1::light::Request Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), - storage_key: request.storage_key.clone().key(), + storage_key: request.storage_key.clone().into_inner(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -941,7 +935,8 @@ fn serialize_request(request: &Request) -> api::v1::light::Request last: request.last_block.1.encode(), min: request.tries_roots.1.encode(), max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().map(|s| s.key()).unwrap_or_default(), + storage_key: request.storage_key.clone().map(|s| s.into_inner()) + .unwrap_or_default(), key: request.key.clone(), }; api::v1::light::request::Request::RemoteChangesRequest(r) diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index 3c530b64dec30..a46269cad6c0c 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -23,6 +23,9 @@ use crate::state::error::FutureResult; pub use self::gen_client::Client as ChildStateClient; /// Substrate child state API +/// +/// Note that all `PrefixedStorageKey` are desierialized +/// from json and not guaranted valid. #[rpc] pub trait ChildStateApi { /// RPC Metadata diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index c3b2146a73ae1..0d4134b6ad618 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -187,9 +187,9 @@ pub trait Backend: std::fmt::Debug { let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((prefixed_storage_key.key(), None)); + child_roots.push((prefixed_storage_key.into_inner(), None)); } else { - child_roots.push((prefixed_storage_key.key(), Some(child_root.encode()))); + child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index b8b3210a87c9c..f03d5c1659ba8 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -242,7 +242,7 @@ impl Externalities for BasicExternalities { if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(prefixed_storage_key.key(), child_root); + top.insert(prefixed_storage_key.into_inner(), child_root); } } diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4007620f92ca8..4f0f3da40c52b 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -177,17 +177,10 @@ impl Decode for InputKey { block: Decode::decode(input)?, key: Decode::decode(input)?, })), - 3 => { - let block = Decode::decode(input)?; - if let Some(storage_key) = PrefixedStorageKey::new(Decode::decode(input)?) { - Ok(InputKey::ChildIndex(ChildIndex { - block, - storage_key, - })) - } else { - Err("Invalid prefixed key in change trie".into()) - } - }, + 3 => Ok(InputKey::ChildIndex(ChildIndex { + block: Decode::decode(input)?, + storage_key: PrefixedStorageKey::new(Decode::decode(input)?), + })), _ => Err("Invalid input key variant".into()), } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 33f502a75bdb2..fa0e24d2ec1b9 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -458,9 +458,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(prefixed_storage_key.key(), None); + self.overlay.set_storage(prefixed_storage_key.into_inner(), None); } else { - self.overlay.set_storage(prefixed_storage_key.key(), Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index b0048d90f4103..58787597534e2 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -331,7 +331,7 @@ impl Backend for InMemory where H::Out: Codec { if let Some(child_info) = child_info.as_ref() { let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((prefix_storage_key.key(), ch.as_ref().into())); + new_child_roots.push((prefix_storage_key.into_inner(), ch.as_ref().into())); } else { root_map = Some(map); } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index de2a0d7e01856..eeb57d66770dd 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -61,36 +61,20 @@ impl DerefMut for PrefixedStorageKey { impl PrefixedStorageKey { /// Create a prefixed storage key from its byte array /// representation. - /// Returns `None` on unknown prefix. - pub fn new(inner: Vec) -> Option { - let result = PrefixedStorageKey(inner); - // currently only support for child trie key - // note that this function should not be use in a runtime - // as it will change its behavior with future child types. - if ChildType::from_prefixed_key(&result).is_some() { - Some(result) - } else { - None - } + pub fn new(inner: Vec) -> Self { + PrefixedStorageKey(inner) } - pub fn new_ref(inner: &Vec) -> Option<&Self> { - let result = PrefixedStorageKey::ref_cast(inner); - // currently only support for child trie key - // note that this function should not be use in a runtime - // as it will change its behavior with future child types. - if ChildType::from_prefixed_key(&result).is_some() { - Some(result) - } else { - None - } + /// Create a prefixed storage key reference. + pub fn new_ref(inner: &Vec) -> &Self { + PrefixedStorageKey::ref_cast(inner) } /// Get inner key, this should /// only be needed when writing /// into parent trie to avoid an /// allocation. - pub fn key(self) -> Vec { + pub fn into_inner(self) -> Vec { self.0 } } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index e4849dee99aec..10360c8076338 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -128,7 +128,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { child_content.data.clone().into_iter().collect() ); let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); - (prefixed_storage_key.key(), state_root.encode()) + (prefixed_storage_key.into_inner(), state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() From c631a462833c1f560432d1cba8fad26184a25eff Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 30 Mar 2020 18:15:59 +0200 Subject: [PATCH 077/185] Fix unrelated warning for CI --- client/network/src/protocol/generic_proto/handler/group.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 69a519134a6ff..6b23263b14c5f 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -64,7 +64,6 @@ use libp2p::swarm::{ NegotiatedSubstream, }; use log::{debug, error}; -use sp_runtime::ConsensusEngineId; use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; /// Implements the `IntoProtocolsHandler` trait of libp2p. From 7c8f3935b9a952ba2811dd6d0ca4f16e85f63e32 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 15:49:08 +0200 Subject: [PATCH 078/185] companion fix --- .maintain/gitlab/check_polkadot_companion_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 04524a736acca..65e3af5c7ab3e 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -57,7 +57,7 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" - pr_body="$(echo $pr_data | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + pr_body="$(echo "$pr_data" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ From b2240f16f4bd4d2224f88566209fc17a6385eaea Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 16:28:39 +0200 Subject: [PATCH 079/185] Update .maintain/gitlab/check_polkadot_companion_build.sh --- .maintain/gitlab/check_polkadot_companion_build.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 65e3af5c7ab3e..e6ea27679aab5 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -57,7 +57,7 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" - pr_body="$(echo "$pr_data" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + pr_body="$(echo "${pr_data}" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ @@ -102,4 +102,3 @@ cargo update # Test Polkadot pr or master branch with this Substrate commit. time cargo test --all --release --verbose - From f3b9f234d88bdfb63020b772736cb45c47f1d3e3 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 17:00:05 +0200 Subject: [PATCH 080/185] Fix compilation errors. --- client/db/src/bench.rs | 16 ++++++++++++++-- client/db/src/lib.rs | 2 +- frame/contracts/src/account_db.rs | 2 +- frame/contracts/src/exec.rs | 2 +- frame/contracts/src/lib.rs | 2 +- frame/contracts/src/tests.rs | 4 ++-- primitives/state-machine/src/backend.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 3 ++- .../state-machine/src/overlayed_changes.rs | 5 +---- .../state-machine/src/trie_backend_essence.rs | 1 - 10 files changed, 24 insertions(+), 15 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c626942084c5b..8f55f00766f21 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -98,7 +98,19 @@ impl BenchmarkingState { child_delta, false, ); - state.genesis = transaction.clone().drain(); + let mut keyspace = crate::Keyspaced::new(&[]); + for (info, mut updates) in transaction.clone().into_iter() { + keyspace.change_keyspace(info.keyspace()); + for (key, rc_val) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + + state.genesis.insert(key, rc_val); + } + } state.genesis_root = root.clone(); state.commit(root, transaction)?; Ok(state) @@ -287,8 +299,8 @@ impl StateBackend> for BenchmarkingState { } else if rc < 0 { db_transaction.delete(0, &key); } + keys.push(key); } - keys.push(key); } self.record.set(keys); db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 6e8fd4eebad3b..f5cc98ad8289c 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1169,7 +1169,7 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { + .chain(operation.child_storage_updates.iter().flat_map(|(_, s, _)| s.iter())) { ops += 1; bytes += key.len() as u64; if let Some(v) = value.as_ref() { diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 524fb376e6d71..14c9ead7e6c22 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -215,7 +215,7 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } - let child_info = &new_info.child_trie_unique_id(); + let child_info = &new_info.child_trie_info(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( child_info, diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 847bfc2cc4ec9..d8b42b2f9ecae 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -577,7 +577,7 @@ where { let (output, change_set, deferred) = { let mut nested = self.nested(dest, trie_id.map(|trie_id| { - crate::trie_unique_id(&trie_id) + crate::child_trie_info(&trie_id) })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 52a397995fe50..5a439ed3163f0 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -690,7 +690,7 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let child_info = trie_unique_id(&contract_info.trie_id); + let child_info = child_trie_info(&contract_info.trie_id); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 0c358b92ef2d6..0839aa7ea6312 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -311,8 +311,8 @@ fn account_removal_does_not_remove_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); - let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); - let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); + let child_info1 = crate::child_trie_info(trie_id1.as_ref()); + let child_info2 = crate::child_trie_info(trie_id2.as_ref()); let child_info1 = Some(&child_info1); let child_info2 = Some(&child_info2); let key1 = &[1; 32]; diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 32e7d41c68aaa..0f3af4466c69e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,7 +21,7 @@ use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, - storage::{ChildInfo, ChildrenMap, well_known_keys}}; + storage::{ChildInfo, ChildrenMap, well_known_keys, PrefixedStorageKey}}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 4fc32ed82ffd7..df731b699eb0f 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -17,7 +17,8 @@ //! Changes trie storage utilities. use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use hash_db::{Prefix, EMPTY_PREFIX}; +use sp_core::Hasher; use sp_core::storage::PrefixedStorageKey; use sp_core::storage::ChildInfo; use sp_trie::DBValue; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index fbdf3a5c1c857..c9b6a6f6defc2 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -29,7 +29,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, PrefixedStorageKey}; use std::{mem, ops}; use sp_core::Hasher; @@ -37,9 +37,6 @@ use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; -/// Storage key. -pub type PrefixedStorageKey = Vec; - /// Storage value. pub type StorageValue = Vec; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0dc7174c205f7..4c8cde131c440 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -383,7 +383,6 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> } } - /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorageRef { /// Type of in-memory overlay. From 63943ee00a222a1a910e7261b33f3c9d2bb92605 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 17:28:17 +0200 Subject: [PATCH 081/185] last attempt --- .maintain/gitlab/check_polkadot_companion_build.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index e6ea27679aab5..49d247da4e520 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -56,19 +56,24 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" + boldprint "pr_dta: #${pr_data}" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" + boldprint "pr_ref: #${pr_ref}" pr_body="$(echo "${pr_data}" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + boldprint "pr_body: #${pr_body}" pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" + boldprint "pr_comp: #${pr_companion}" if [ -z "${pr_companion}" ] then pr_companion="$(echo "${pr_body}" | sed -n -r \ 's;^.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" fi + boldprint "pr_com2: #${pr_companion}" if [ "${pr_companion}" ] then From 784cb2211dfa33f8a73f5fd011295a963f87239a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 19:13:50 +0200 Subject: [PATCH 082/185] Forcing companion pr. --- .maintain/gitlab/check_polkadot_companion_build.sh | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 49d247da4e520..e98316eb3f6a4 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -56,24 +56,19 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" - boldprint "pr_dta: #${pr_data}" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" - boldprint "pr_ref: #${pr_ref}" - pr_body="$(echo "${pr_data}" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - boldprint "pr_body: #${pr_body}" + pr_body="$(echo $pr_data | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - pr_companion="$(echo "${pr_body}" | sed -n -r \ + pr_companion="$(echo "${pr_data}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" - boldprint "pr_comp: #${pr_companion}" if [ -z "${pr_companion}" ] then pr_companion="$(echo "${pr_body}" | sed -n -r \ 's;^.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" fi - boldprint "pr_com2: #${pr_companion}" if [ "${pr_companion}" ] then @@ -107,3 +102,4 @@ cargo update # Test Polkadot pr or master branch with this Substrate commit. time cargo test --all --release --verbose + From 4bbb24dedda0d5eef20ea2d88d15c90ffb8befba Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 19:54:48 +0200 Subject: [PATCH 083/185] revert ci changes. --- .maintain/gitlab/check_polkadot_companion_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index e98316eb3f6a4..04524a736acca 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -59,7 +59,7 @@ then pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" pr_body="$(echo $pr_data | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - pr_companion="$(echo "${pr_data}" | sed -n -r \ + pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" From ee679c147d91e5559ef6e4b8bc0c867de2f98036 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 3 Apr 2020 16:56:24 +0200 Subject: [PATCH 084/185] lock update --- Cargo.lock | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 77852437f2122..c281a456db826 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5358,6 +5358,26 @@ dependencies = [ "rust-argon2", ] +[[package]] +name = "ref-cast" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a214c7875e1b63fc1618db7c80efc0954f6156c9ff07699fd9039e255accdd1" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602eb59cda66fcb9aec25841fb76bc01d2b34282dcdd705028da297db6f3eec8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "regex" version = "1.3.6" @@ -7480,6 +7500,8 @@ name = "sp-storage" version = "2.0.0-alpha.5" dependencies = [ "impl-serde 0.2.3", + "parity-scale-codec", + "ref-cast", "serde", "sp-debug-derive", "sp-std", From 5d8ae4e10c1d8438f9405d671df41c144a7ca8c2 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 3 Apr 2020 18:50:12 +0200 Subject: [PATCH 085/185] fix compilation errors --- client/src/client.rs | 19 ++++++++++-------- client/src/light/call_executor.rs | 3 ++- primitives/state-machine/src/lib.rs | 20 ++++++++++++++----- .../state-machine/src/proving_backend.rs | 18 ++++++++--------- 4 files changed, 37 insertions(+), 23 deletions(-) diff --git a/client/src/client.rs b/client/src/client.rs index 19a1c26b65dbb..f7b4b6ca38510 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -41,8 +41,8 @@ use sp_runtime::{ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, - ChangesTrieConfigurationRange, key_changes, key_changes_proof, StorageProofKind, - merge_storage_proofs, + ChangesTrieConfigurationRange, key_changes, key_changes_proof, StorageProof, + StorageProofKind, merge_storage_proofs, }; use sc_executor::{RuntimeVersion, RuntimeInfo}; use sp_consensus::{ @@ -55,7 +55,6 @@ use sp_blockchain::{self as blockchain, well_known_cache_keys::Id as CacheKeyId, HeaderMetadata, CachedHeaderMetadata, }; -use sp_trie::StorageProof; use sp_api::{ CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, @@ -479,7 +478,7 @@ impl Client where Ok(()) }, ())?; - Ok(merge_storage_proofs::, _>(proofs)?) + Ok(merge_storage_proofs::, _>(proofs)?) } /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). @@ -1097,8 +1096,10 @@ impl ProofProvider for Client where id: &BlockId, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { + // TODO keep flatten proof here?? or move choice to caller? + // TODO EMCH this should be parametereized fo client self.state_at(id) - .and_then(|state| prove_read(state, keys) + .and_then(|state| prove_read(state, keys, StorageProofKind::Flatten) .map_err(Into::into)) } @@ -1108,8 +1109,9 @@ impl ProofProvider for Client where child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { + // TODO EMCH this should be parametereized fo client self.state_at(id) - .and_then(|state| prove_child_read(state, child_info, keys) + .and_then(|state| prove_child_read(state, child_info, keys, StorageProofKind::Flatten) .map_err(Into::into)) } @@ -1136,8 +1138,9 @@ impl ProofProvider for Client where &self.executor, method, call_data, - ).map(|(r, p)| { - (r, StorageProof::merge(vec![p, code_proof])) + ).and_then(|(r, p)| { + // TODO EMCH using flatten?? + Ok((r, merge_storage_proofs::, _>(vec![p, code_proof])?)) }) } diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index fdbc0af98889b..02b8b58bce0ec 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -29,6 +29,7 @@ use sp_externalities::Extensions; use sp_state_machine::{ self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, + merge_storage_proofs, }; use sp_core::Hasher; @@ -205,7 +206,7 @@ pub fn prove_execution( method, call_data, )?; - let total_proof = merge_storage_proofs::, _>(vec![init_proof, exec_proof])?; + let total_proof = merge_storage_proofs::, _>(vec![init_proof, exec_proof])?; Ok((result, total_proof)) } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 35ca01663317a..5035995220b15 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -42,7 +42,7 @@ mod trie_backend; mod trie_backend_essence; mod stats; -pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; +pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; @@ -581,6 +581,7 @@ where } /// Check execution proof on proving backend, generated by `prove_execution` call. +/// TODO EMCH this is exact copy of non flat version: makes trie_backend a parameter! pub fn execution_flat_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, @@ -619,8 +620,10 @@ pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend>, H>, overlay: &mut OverlayedChanges, exec: &Exec, + spawn_handle: Box, method: &str, call_data: &[u8], + runtime_code: &RuntimeCode, ) -> Result, Box> where H: Hasher, @@ -629,7 +632,15 @@ where N: crate::changes_trie::BlockNumber, { let mut sm = StateMachine::<_, H, N, Exec>::new( - trie_backend, None, overlay, exec, method, call_data, Extensions::default(), + trie_backend, + None, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, ); sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( @@ -853,15 +864,14 @@ where /// Check child storage read proof on pre-created flat proving backend. pub fn read_child_proof_check_on_flat_proving_backend( proving_backend: &TrieBackend, H>, - storage_key: &[u8], + child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> where H: Hasher, H::Out: Ord + Codec, { - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, &ChildInfo::top_trie(), key) + proving_backend.child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 2e4e728a3f2d0..39a86ce2cf605 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -18,13 +18,13 @@ use std::sync::Arc; use parking_lot::RwLock; -use codec::{Decode, Codec}; +use codec::{Encode, Decode, Codec}; use log::debug; use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, - record_all_keys, StorageProof, + record_all_keys, }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; @@ -32,7 +32,7 @@ use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, TrieBackendStorage, TrieBackendStorageRef}; use crate::{Error, ExecutionError, Backend}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use crate::DBValue; use sp_core::storage::{ChildInfo, ChildType, ChildrenMap}; @@ -168,7 +168,7 @@ impl StorageProof { }; for (child_info, (compact_scheme, proof)) in children { match child_info.child_type() { - ChildType::CryptoUniqueId => { + ChildType::ParentKeyId => { match compact_scheme { CompactScheme::TrieSkipHashes => { // Note that we could check the proof from the unpacking. @@ -197,7 +197,7 @@ impl StorageProof { let mut result = ChildrenMap::default(); for (child_info, proof) in children { match child_info.child_type() { - ChildType::CryptoUniqueId => { + ChildType::ParentKeyId => { let root = roots.get(&child_info) .and_then(|r| Decode::decode(&mut &r[..]).ok()) .ok_or_else(|| "Missing root for packing".to_string())?; @@ -222,7 +222,7 @@ impl StorageProof { let mut result = Vec::new(); children.into_iter().for_each(|(child_info, proof)| { match child_info.child_type() { - ChildType::CryptoUniqueId => { + ChildType::ParentKeyId => { // this can get merged with top, since it is proof we do not use prefix result.extend(proof); } @@ -787,7 +787,7 @@ mod tests { use sp_runtime::traits::BlakeTwo256; fn test_proving<'a>( - trie_backend: &'a TrieBackend,Blake2Hasher>, + trie_backend: &'a TrieBackend, BlakeTwo256>, flat: bool, ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { ProvingBackend::new(trie_backend, flat) @@ -934,7 +934,7 @@ mod tests { ).unwrap(); assert_eq!( - proof_check.child_storage(&child_info1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), vec![64] ); } else { @@ -944,7 +944,7 @@ mod tests { ).unwrap(); assert_eq!( - proof_check.child_storage(&child_info1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), vec![64] ); } From 226f7cd879a95744662a8978d9693c54170eccc5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 14 Apr 2020 10:25:20 +0200 Subject: [PATCH 086/185] name of children in chain spec change. --- bin/node/cli/res/flaming-fir.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/cli/res/flaming-fir.json b/bin/node/cli/res/flaming-fir.json index 7ed98239b54b6..3612d7284faba 100644 --- a/bin/node/cli/res/flaming-fir.json +++ b/bin/node/cli/res/flaming-fir.json @@ -134,7 +134,7 @@ "0x5f3e4907f716ac89b6347d15ececedca0b6a45321efae92aea15e0740ec7afe7": "0x00000000", "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98e54094c2d5af8ae10b91e1288f4f59f2946d7738f2c509b7effd909e5e9ba0ad": "0x00" }, - "children": {} + "childrenDefault": {} } } } From 5938d8623c51c4fd33bd0ae5c7eb860d10ef76d4 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 10:23:08 +0200 Subject: [PATCH 087/185] remove terminal space --- primitives/storage/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index c7cbda520b6da..49f24a93cb192 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -382,10 +382,10 @@ impl ChildTrieParentKeyId { /// A few utilities methods are defined. pub struct ChildrenMap(pub BTreeMap); -/// Type alias for storage of children related content. +/// Type alias for storage of children related content. pub type ChildrenVec = Vec<(ChildInfo, T)>; -/// Type alias for storage of children related content. +/// Type alias for storage of children related content. pub type ChildrenSlice<'a, T> = &'a [(ChildInfo, T)]; #[cfg(feature = "std")] From bc2a198dc0da64899fd6d72cb3af28779b267026 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 14:25:27 +0200 Subject: [PATCH 088/185] sp-io documentation changes. --- primitives/io/src/lib.rs | 68 +++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 18 deletions(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index c8004057a78c9..5178fb7169d55 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -145,8 +145,9 @@ pub trait Storage { self.next_storage_key(&key) } - - /// Deprecated, please use dedicated runtime apis. + /// Read child key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::get`). fn child_get( &self, storage_key: &[u8], @@ -160,7 +161,9 @@ pub trait Storage { self.child_storage(&child_info, key).map(|s| s.to_vec()) } - /// Deprecated, please use dedicated runtime apis. + /// Read child key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::read`). fn child_read( &self, storage_key: &[u8], @@ -183,7 +186,9 @@ pub trait Storage { }) } - /// Deprecated, please use dedicated runtime apis. + /// Set a child storage value. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::set`). fn child_set( &mut self, storage_key: &[u8], @@ -198,7 +203,9 @@ pub trait Storage { self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Deprecated, please use dedicated runtime apis. + /// Remove child key value. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear`). fn child_clear( &mut self, storage_key: &[u8], @@ -212,7 +219,9 @@ pub trait Storage { self.clear_child_storage(&child_info, key); } - /// Deprecated, please use dedicated runtime apis. + /// Remove all child storage values. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::storage_kill`). fn child_storage_kill( &mut self, storage_key: &[u8], @@ -225,7 +234,9 @@ pub trait Storage { self.kill_child_storage(&child_info); } - /// Deprecated, please use dedicated runtime apis. + /// Check a child storage key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::exists`). fn child_exists( &self, storage_key: &[u8], @@ -239,7 +250,9 @@ pub trait Storage { self.exists_child_storage(&child_info, key) } - /// Deprecated, please use dedicated runtime apis. + /// Clear child key by prefix. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear_prefix`). fn child_clear_prefix( &mut self, storage_key: &[u8], @@ -253,7 +266,9 @@ pub trait Storage { self.clear_child_prefix(&child_info, prefix); } - /// Deprecated, please use dedicated runtime apis. + /// Child trie root calcualation. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear_root`). fn child_root( &mut self, storage_key: &[u8], @@ -268,7 +283,9 @@ pub trait Storage { self.child_storage_root(&child_info) } - /// Deprecated, please use dedicated runtime apis. + /// Child storage key iteration. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::next_key`). fn child_next_key( &mut self, storage_key: &[u8], @@ -281,18 +298,17 @@ pub trait Storage { .expect("Invalid child definition"); self.next_child_storage_key(&child_info, key) } - } - /// Interface for accessing the child storage for default child trie, /// from within the runtime. #[runtime_interface] pub trait DefaultChildStorage { - /// `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + + /// Get a default child storage value for a given key. /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. + /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + /// Result is `None` if the value for `key` in the child storage can not be found. fn get( &self, storage_key: &[u8], @@ -302,6 +318,8 @@ pub trait DefaultChildStorage { self.child_storage(&child_info, key).map(|s| s.to_vec()) } + /// Allocation efficient variant of `get`. + /// /// Get `key` from child storage, placing the value into `value_out` and return the number /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -325,6 +343,8 @@ pub trait DefaultChildStorage { }) } + /// Set a child storage value. + /// /// Set `key` to `value` in the child storage denoted by `storage_key`. fn set( &mut self, @@ -336,7 +356,9 @@ pub trait DefaultChildStorage { self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Clear the given child storage of the given `key` and its value. + /// Clear a child storage key. + /// + /// For the default child storage at `storage_key`, clear value at `key`. fn clear ( &mut self, storage_key: &[u8], @@ -347,6 +369,9 @@ pub trait DefaultChildStorage { } /// Clear an entire child storage. + /// + /// If it exists, the child storage for `storage_key` + /// is removed. fn storage_kill( &mut self, storage_key: &[u8], @@ -355,7 +380,9 @@ pub trait DefaultChildStorage { self.kill_child_storage(&child_info); } - /// Check whether the given `key` exists in storage. + /// Check a child storage key. + /// + /// Check whether the given `key` exists in default child defined at `storage_key`. fn exists( &self, storage_key: &[u8], @@ -365,6 +392,8 @@ pub trait DefaultChildStorage { self.exists_child_storage(&child_info, key) } + /// Clear child default key by prefix. + /// /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix( &mut self, @@ -375,8 +404,9 @@ pub trait DefaultChildStorage { self.clear_child_prefix(&child_info, prefix); } - /// "Commit" all existing operations and compute the resulting child storage root. + /// Default child root calculation. /// + /// "Commit" all existing operations and compute the resulting child storage root. /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. @@ -388,6 +418,8 @@ pub trait DefaultChildStorage { self.child_storage_root(&child_info) } + /// Child storage key iteration. + /// /// Get the next key in storage after the given one in lexicographic order in child storage. fn next_key( &mut self, From 619b454c3ccf7b5d35bfaf8eea3df00084ee1272 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 16:56:26 +0200 Subject: [PATCH 089/185] Retain compatibility with network protocol. --- client/network/src/protocol.rs | 28 +++++-- client/network/src/protocol/message.rs | 106 +++++++++++++++++++++++++ 2 files changed, 126 insertions(+), 8 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 84b913b284c62..9b55cc8d6a8d2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -38,7 +38,7 @@ use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message}; +use message::{BlockAnnounce, Message, MessageV6}; use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; @@ -91,7 +91,7 @@ const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead const MAX_KNOWN_EXTRINSICS: usize = 4096; // ~128kb per peer + overhead /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 6; +pub(crate) const CURRENT_VERSION: u32 = 7; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; @@ -524,12 +524,24 @@ impl Protocol { data: BytesMut, ) -> CustomMessageOutcome { - let message = match as Decode>::decode(&mut &data[..]) { - Ok(message) => message, - Err(err) => { - debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; + let input = &mut &data[..]; + let decoded_result = as Decode>::decode(input); + let all_read = input.is_empty(); + let message = match (all_read, decoded_result) { + (true, Ok(message)) => message, + (false, _) | (_, Err(_)) => match as Decode>::decode(&mut &data[..]) { + Ok(message) => if let Some(message) = message.into_latest() { + message + } else { + debug!(target: "sync", "Couldn't call packet sent by {}: {:?}: {}", who, data, "Invalid input."); + self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; + }, + Err(err) => { + debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); + self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; + } } }; diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 8638e9afc59b9..bc9d0f79facb7 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -25,6 +25,7 @@ pub use self::generic::{ RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, + RemoteReadChildRequestV6, }; use sc_client_api::StorageProof; @@ -39,6 +40,17 @@ pub type Message = generic::Message< ::Extrinsic, >; +/// Type alias for using the message type using block type parameters. +/// +/// This could be removed as soon as MIN_VERSION switch to 7. +pub type MessageV6 = generic::MessageV6< + ::Header, + ::Hash, + <::Header as HeaderT>::Number, + ::Extrinsic, +>; + + /// Type alias for using the status type using block type parameters. pub type Status = generic::Status< ::Hash, @@ -237,6 +249,49 @@ pub mod generic { Number(Number), } + /// A protocol V6 network message, this is only for backward compatibility. + /// It should only be use when we fail to decode a message + /// with the latest encoding. + #[derive(Decode)] + pub enum MessageV6 { + /// Status packet. + Status(Status), + /// Block request. + BlockRequest(BlockRequest), + /// Block response. + BlockResponse(BlockResponse), + /// Block announce. + BlockAnnounce(BlockAnnounce
), + /// Transactions. + Transactions(Transactions), + /// Consensus protocol message. + Consensus(ConsensusMessage), + /// Remote method call request. + RemoteCallRequest(RemoteCallRequest), + /// Remote method call response. + RemoteCallResponse(RemoteCallResponse), + /// Remote storage read request. + RemoteReadRequest(RemoteReadRequest), + /// Remote storage read response. + RemoteReadResponse(RemoteReadResponse), + /// Remote header request. + RemoteHeaderRequest(RemoteHeaderRequest), + /// Remote header response. + RemoteHeaderResponse(RemoteHeaderResponse
), + /// Remote changes request. + RemoteChangesRequest(RemoteChangesRequest), + /// Remote changes response. + RemoteChangesResponse(RemoteChangesResponse), + /// Remote child storage read request. + RemoteReadChildRequest(RemoteReadChildRequestV6), + /// Finality proof request. + FinalityProofRequest(FinalityProofRequest), + /// Finality proof response. + FinalityProofResponse(FinalityProofResponse), + /// Batch of consensus protocol messages. + ConsensusBatch(Vec), + } + /// A network message. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub enum Message { @@ -278,6 +333,39 @@ pub mod generic { ConsensusBatch(Vec), } + impl MessageV6 { + /// Get matching latest protocol message for a protocol V6 message. + /// + /// Note that this function expect that V6 message are only created + /// after a failed latest message decoding, so we do only convert for diverging + /// decoding path. + pub fn into_latest(self) -> Option> { + match self { + MessageV6::RemoteReadChildRequest(RemoteReadChildRequestV6 { + id, + block, + storage_key, + child_info: _, + child_type, + keys, + }) => { + // V6 protocol only got implementation for child type 1. + if child_type != 1 { + None + } else { + Some(Message::RemoteReadChildRequest(RemoteReadChildRequest { + id, + block, + storage_key, + keys, + })) + } + }, + _ => None, + } + } + } + impl Message { /// Message id useful for logging. pub fn id(&self) -> &'static str { @@ -468,6 +556,24 @@ pub mod generic { pub keys: Vec>, } + #[derive(Decode)] + /// Backward compatibility remote storage read child request. + pub struct RemoteReadChildRequestV6 { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Child Storage key. + pub storage_key: Vec, + /// Child trie source information. + pub child_info: Vec, + /// Child type, its required to resolve `child_info` + /// content and choose child implementation. + pub child_type: u32, + /// Storage key. + pub keys: Vec>, + } + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. pub struct RemoteReadChildRequest { From fa52a8c43105e1052e039368ad66e291ca850c84 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 17:16:29 +0200 Subject: [PATCH 090/185] Revert "Retain compatibility with network protocol." This reverts commit 619b454c3ccf7b5d35bfaf8eea3df00084ee1272. --- client/network/src/protocol.rs | 28 ++----- client/network/src/protocol/message.rs | 106 ------------------------- 2 files changed, 8 insertions(+), 126 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9b55cc8d6a8d2..84b913b284c62 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -38,7 +38,7 @@ use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message, MessageV6}; +use message::{BlockAnnounce, Message}; use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; @@ -91,7 +91,7 @@ const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead const MAX_KNOWN_EXTRINSICS: usize = 4096; // ~128kb per peer + overhead /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 7; +pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; @@ -524,24 +524,12 @@ impl Protocol { data: BytesMut, ) -> CustomMessageOutcome { - let input = &mut &data[..]; - let decoded_result = as Decode>::decode(input); - let all_read = input.is_empty(); - let message = match (all_read, decoded_result) { - (true, Ok(message)) => message, - (false, _) | (_, Err(_)) => match as Decode>::decode(&mut &data[..]) { - Ok(message) => if let Some(message) = message.into_latest() { - message - } else { - debug!(target: "sync", "Couldn't call packet sent by {}: {:?}: {}", who, data, "Invalid input."); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - }, - Err(err) => { - debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - } + let message = match as Decode>::decode(&mut &data[..]) { + Ok(message) => message, + Err(err) => { + debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); + self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; } }; diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index bc9d0f79facb7..8638e9afc59b9 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -25,7 +25,6 @@ pub use self::generic::{ RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, - RemoteReadChildRequestV6, }; use sc_client_api::StorageProof; @@ -40,17 +39,6 @@ pub type Message = generic::Message< ::Extrinsic, >; -/// Type alias for using the message type using block type parameters. -/// -/// This could be removed as soon as MIN_VERSION switch to 7. -pub type MessageV6 = generic::MessageV6< - ::Header, - ::Hash, - <::Header as HeaderT>::Number, - ::Extrinsic, ->; - - /// Type alias for using the status type using block type parameters. pub type Status = generic::Status< ::Hash, @@ -249,49 +237,6 @@ pub mod generic { Number(Number), } - /// A protocol V6 network message, this is only for backward compatibility. - /// It should only be use when we fail to decode a message - /// with the latest encoding. - #[derive(Decode)] - pub enum MessageV6 { - /// Status packet. - Status(Status), - /// Block request. - BlockRequest(BlockRequest), - /// Block response. - BlockResponse(BlockResponse), - /// Block announce. - BlockAnnounce(BlockAnnounce
), - /// Transactions. - Transactions(Transactions), - /// Consensus protocol message. - Consensus(ConsensusMessage), - /// Remote method call request. - RemoteCallRequest(RemoteCallRequest), - /// Remote method call response. - RemoteCallResponse(RemoteCallResponse), - /// Remote storage read request. - RemoteReadRequest(RemoteReadRequest), - /// Remote storage read response. - RemoteReadResponse(RemoteReadResponse), - /// Remote header request. - RemoteHeaderRequest(RemoteHeaderRequest), - /// Remote header response. - RemoteHeaderResponse(RemoteHeaderResponse
), - /// Remote changes request. - RemoteChangesRequest(RemoteChangesRequest), - /// Remote changes response. - RemoteChangesResponse(RemoteChangesResponse), - /// Remote child storage read request. - RemoteReadChildRequest(RemoteReadChildRequestV6), - /// Finality proof request. - FinalityProofRequest(FinalityProofRequest), - /// Finality proof response. - FinalityProofResponse(FinalityProofResponse), - /// Batch of consensus protocol messages. - ConsensusBatch(Vec), - } - /// A network message. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub enum Message { @@ -333,39 +278,6 @@ pub mod generic { ConsensusBatch(Vec), } - impl MessageV6 { - /// Get matching latest protocol message for a protocol V6 message. - /// - /// Note that this function expect that V6 message are only created - /// after a failed latest message decoding, so we do only convert for diverging - /// decoding path. - pub fn into_latest(self) -> Option> { - match self { - MessageV6::RemoteReadChildRequest(RemoteReadChildRequestV6 { - id, - block, - storage_key, - child_info: _, - child_type, - keys, - }) => { - // V6 protocol only got implementation for child type 1. - if child_type != 1 { - None - } else { - Some(Message::RemoteReadChildRequest(RemoteReadChildRequest { - id, - block, - storage_key, - keys, - })) - } - }, - _ => None, - } - } - } - impl Message { /// Message id useful for logging. pub fn id(&self) -> &'static str { @@ -556,24 +468,6 @@ pub mod generic { pub keys: Vec>, } - #[derive(Decode)] - /// Backward compatibility remote storage read child request. - pub struct RemoteReadChildRequestV6 { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Child Storage key. - pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, - /// Storage key. - pub keys: Vec>, - } - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. pub struct RemoteReadChildRequest { From 6969c74b8053dbdce81cd18ec2ca17f618e832ac Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 17:33:40 +0200 Subject: [PATCH 091/185] Add ChildenProofMap and ChildrenProofInfo to be able to rebase on child_trie_w3_change easilly. Those struct can also allow more compact proof encoding in the future. --- primitives/io/src/lib.rs | 68 +++++++++++---- primitives/state-machine/src/lib.rs | 8 +- .../state-machine/src/proving_backend.rs | 48 ++++++----- primitives/state-machine/src/trie_backend.rs | 10 +-- .../state-machine/src/trie_backend_essence.rs | 18 +++- primitives/storage/Cargo.toml | 3 - primitives/storage/src/lib.rs | 85 +++++++++++++++++++ 7 files changed, 188 insertions(+), 52 deletions(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index c8004057a78c9..5178fb7169d55 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -145,8 +145,9 @@ pub trait Storage { self.next_storage_key(&key) } - - /// Deprecated, please use dedicated runtime apis. + /// Read child key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::get`). fn child_get( &self, storage_key: &[u8], @@ -160,7 +161,9 @@ pub trait Storage { self.child_storage(&child_info, key).map(|s| s.to_vec()) } - /// Deprecated, please use dedicated runtime apis. + /// Read child key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::read`). fn child_read( &self, storage_key: &[u8], @@ -183,7 +186,9 @@ pub trait Storage { }) } - /// Deprecated, please use dedicated runtime apis. + /// Set a child storage value. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::set`). fn child_set( &mut self, storage_key: &[u8], @@ -198,7 +203,9 @@ pub trait Storage { self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Deprecated, please use dedicated runtime apis. + /// Remove child key value. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear`). fn child_clear( &mut self, storage_key: &[u8], @@ -212,7 +219,9 @@ pub trait Storage { self.clear_child_storage(&child_info, key); } - /// Deprecated, please use dedicated runtime apis. + /// Remove all child storage values. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::storage_kill`). fn child_storage_kill( &mut self, storage_key: &[u8], @@ -225,7 +234,9 @@ pub trait Storage { self.kill_child_storage(&child_info); } - /// Deprecated, please use dedicated runtime apis. + /// Check a child storage key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::exists`). fn child_exists( &self, storage_key: &[u8], @@ -239,7 +250,9 @@ pub trait Storage { self.exists_child_storage(&child_info, key) } - /// Deprecated, please use dedicated runtime apis. + /// Clear child key by prefix. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear_prefix`). fn child_clear_prefix( &mut self, storage_key: &[u8], @@ -253,7 +266,9 @@ pub trait Storage { self.clear_child_prefix(&child_info, prefix); } - /// Deprecated, please use dedicated runtime apis. + /// Child trie root calcualation. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear_root`). fn child_root( &mut self, storage_key: &[u8], @@ -268,7 +283,9 @@ pub trait Storage { self.child_storage_root(&child_info) } - /// Deprecated, please use dedicated runtime apis. + /// Child storage key iteration. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::next_key`). fn child_next_key( &mut self, storage_key: &[u8], @@ -281,18 +298,17 @@ pub trait Storage { .expect("Invalid child definition"); self.next_child_storage_key(&child_info, key) } - } - /// Interface for accessing the child storage for default child trie, /// from within the runtime. #[runtime_interface] pub trait DefaultChildStorage { - /// `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + + /// Get a default child storage value for a given key. /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. + /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + /// Result is `None` if the value for `key` in the child storage can not be found. fn get( &self, storage_key: &[u8], @@ -302,6 +318,8 @@ pub trait DefaultChildStorage { self.child_storage(&child_info, key).map(|s| s.to_vec()) } + /// Allocation efficient variant of `get`. + /// /// Get `key` from child storage, placing the value into `value_out` and return the number /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -325,6 +343,8 @@ pub trait DefaultChildStorage { }) } + /// Set a child storage value. + /// /// Set `key` to `value` in the child storage denoted by `storage_key`. fn set( &mut self, @@ -336,7 +356,9 @@ pub trait DefaultChildStorage { self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Clear the given child storage of the given `key` and its value. + /// Clear a child storage key. + /// + /// For the default child storage at `storage_key`, clear value at `key`. fn clear ( &mut self, storage_key: &[u8], @@ -347,6 +369,9 @@ pub trait DefaultChildStorage { } /// Clear an entire child storage. + /// + /// If it exists, the child storage for `storage_key` + /// is removed. fn storage_kill( &mut self, storage_key: &[u8], @@ -355,7 +380,9 @@ pub trait DefaultChildStorage { self.kill_child_storage(&child_info); } - /// Check whether the given `key` exists in storage. + /// Check a child storage key. + /// + /// Check whether the given `key` exists in default child defined at `storage_key`. fn exists( &self, storage_key: &[u8], @@ -365,6 +392,8 @@ pub trait DefaultChildStorage { self.exists_child_storage(&child_info, key) } + /// Clear child default key by prefix. + /// /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix( &mut self, @@ -375,8 +404,9 @@ pub trait DefaultChildStorage { self.clear_child_prefix(&child_info, prefix); } - /// "Commit" all existing operations and compute the resulting child storage root. + /// Default child root calculation. /// + /// "Commit" all existing operations and compute the resulting child storage root. /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. @@ -388,6 +418,8 @@ pub trait DefaultChildStorage { self.child_storage_root(&child_info) } + /// Child storage key iteration. + /// /// Get the next key in storage after the given one in lexicographic order in child storage. fn next_key( &mut self, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5035995220b15..28dbe2154f384 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,7 +23,7 @@ use log::{warn, trace}; pub use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode, Codec}; use sp_core::{ - storage::{ChildInfo, ChildrenMap}, NativeOrEncoded, NeverNativeValue, + storage::{ChildInfo, ChildrenProofMap}, NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, CallInWasmExt, RuntimeCode}, hexdisplay::HexDisplay, }; use overlayed_changes::OverlayedChangeSet; @@ -617,7 +617,7 @@ where /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend>, H>, + trie_backend: &TrieBackend>, H>, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -851,7 +851,7 @@ where /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend>, H>, + proving_backend: &TrieBackend>, H>, key: &[u8], ) -> Result>, Box> where @@ -877,7 +877,7 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend>, H>, + proving_backend: &TrieBackend>, H>, child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 39a86ce2cf605..535d869474fa9 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -34,7 +34,7 @@ use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; -use sp_core::storage::{ChildInfo, ChildType, ChildrenMap}; +use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap, ChildrenProofMap}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -112,9 +112,9 @@ pub enum StorageProof { /// of nodes. TopTrieCompact(ProofCompacted),*/ /// Fully descriped proof, it includes the child trie individual descriptions. - Full(ChildrenMap), + Full(ChildrenProofMap), /// Fully descriped proof, compact encoded. - FullCompact(ChildrenMap), + FullCompact(ChildrenProofMap), } impl StorageProof { @@ -132,8 +132,8 @@ impl StorageProof { pub fn empty_for(kind: StorageProofKind) -> Self { match kind { StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), - StorageProofKind::Full => StorageProof::Full(ChildrenMap::default()), - StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenMap::default()), + StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), + StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenProofMap::default()), } } @@ -155,14 +155,19 @@ impl StorageProof { } /// This unpacks `FullCompact` to `Full` or do nothing. - pub fn unpack(self, with_roots: bool) -> Result<(Self, Option>>), String> + /// TODO EMCH document and use case for with_roots to true?? (probably unpack -> merge -> pack + /// but no code for it here) + pub fn unpack( + self, + with_roots: bool, + ) -> Result<(Self, Option>>), String> where H::Out: Codec, { let map_e = |e| format!("Trie unpack error: {}", e); if let StorageProof::FullCompact(children) = self { - let mut result = ChildrenMap::default(); + let mut result = ChildrenProofMap::default(); let mut roots = if with_roots { - Some(ChildrenMap::default()) + Some(ChildrenProofMap::default()) } else { None }; @@ -188,13 +193,13 @@ impl StorageProof { } /// This packs `Full` to `FullCompact`, using needed roots. - pub fn pack(self, roots: &ChildrenMap>) -> Result + pub fn pack(self, roots: &ChildrenProofMap>) -> Result where H::Out: Codec, { let map_e = |e| format!("Trie pack error: {}", e); if let StorageProof::Full(children) = self { - let mut result = ChildrenMap::default(); + let mut result = ChildrenProofMap::default(); for (child_info, proof) in children { match child_info.child_type() { ChildType::ParentKeyId => { @@ -276,11 +281,12 @@ pub fn merge_storage_proofs(proofs: I) -> Result H::Out: Codec, { let mut do_flatten = false; - let mut child_sets = ChildrenMap::>>::default(); - let mut unique_set = HashSet::>::default(); + let mut child_sets = ChildrenProofMap::>>::default(); + let mut unique_set = HashSet::>::default(); // lookup for best encoding for mut proof in proofs { if let &StorageProof::FullCompact(..) = &proof { + // TODO EMCH pack back so set to true. proof = proof.unpack::(false)?.0; } let proof = proof; @@ -310,7 +316,7 @@ pub fn merge_storage_proofs(proofs: I) -> Result Ok(if do_flatten { StorageProof::Flatten(unique_set.into_iter().collect()) } else { - let mut result = ChildrenMap::default(); + let mut result = ChildrenProofMap::default(); for (child_info, set) in child_sets.into_iter() { result.insert(child_info, set.into_iter().collect()); } @@ -324,7 +330,7 @@ pub fn merge_flatten_storage_proofs(proofs: I) -> Option where I: IntoIterator, { - let mut unique_set = HashSet::>::default(); + let mut unique_set = HashSet::>::default(); // lookup for best encoding for proof in proofs { if let StorageProof::Flatten(set) = proof { @@ -490,13 +496,13 @@ impl ProofRecorder { StorageProof::Flatten(trie_nodes) }, ProofRecorder::Full(rec) => { - let mut children = ChildrenMap::default(); + let mut children = ChildrenProofMap::default(); for (child_info, set) in rec.read().iter() { let trie_nodes: Vec> = set .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) .collect(); - children.insert(child_info.clone(), trie_nodes); + children.insert(child_info.proof_info(), trie_nodes); } StorageProof::Full(children) }, @@ -672,7 +678,7 @@ where pub fn create_proof_check_backend( root: H::Out, proof: StorageProof, -) -> Result>, H>, Box> +) -> Result>, H>, Box> where H: Hasher, H::Out: Codec, @@ -680,7 +686,7 @@ where use std::ops::Deref; let db = create_proof_check_backend_storage(proof) .map_err(|e| Box::new(e) as Box)?; - if db.deref().get(&ChildInfo::top_trie()) + if db.deref().get(&ChildInfoProof::top_trie()) .map(|db| db.contains(&root, EMPTY_PREFIX)) .unwrap_or(false) { Ok(TrieBackend::new_with_roots(db, root)) @@ -698,19 +704,19 @@ where /// somehow). pub fn create_proof_check_backend_storage( proof: StorageProof, -) -> Result>, String> +) -> Result>, String> where H: Hasher, { let map_e = |e| format!("Trie unpack error: {}", e); - let mut result = ChildrenMap::default(); + let mut result = ChildrenProofMap::default(); match proof { s@StorageProof::Flatten(..) => { let mut db = MemoryDB::default(); for item in s.iter_nodes_flatten() { db.insert(EMPTY_PREFIX, &item); } - result.insert(ChildInfo::top_trie(), db); + result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::Full(children) => { for (child_info, proof) in children.into_iter() { diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index bca68423c4c60..cce84b13b841b 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,7 +19,7 @@ use log::{warn, debug}; use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType, ChildrenMap}; +use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap, ChildrenProofMap}; use codec::{Codec, Decode, Encode}; use crate::{ StorageKey, StorageValue, Backend, @@ -62,13 +62,13 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } /// Get registered roots - pub fn extract_registered_roots(&self) -> Option>> { + pub fn extract_registered_roots(&self) -> Option>> { if let Some(register_roots) = self.register_roots.as_ref() { - let mut dest = ChildrenMap::default(); - dest.insert(ChildInfo::top_trie(), self.essence.root().encode()); + let mut dest = ChildrenProofMap::default(); + dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); let read_lock = register_roots.read(); for (child_info, root) in read_lock.iter() { - dest.insert(child_info.clone(), root.encode()); + dest.insert(child_info.proof_info(), root.encode()); } Some(dest) } else { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index aab55ed0a7c20..27e1ca0117cd6 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -28,7 +28,7 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, TrieDBIterator, for_keys_in_trie}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::{ChildInfo, ChildrenMap}; +use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenProofMap}; use codec::Encode; /// Patricia trie-based storage trait. @@ -458,6 +458,7 @@ impl TrieBackendStorageRef for MemoryDB { } } +// TOOD EMCH try remove impl TrieBackendStorageRef for ChildrenMap> { type Overlay = MemoryDB; @@ -473,6 +474,21 @@ impl TrieBackendStorageRef for ChildrenMap> { } } +impl TrieBackendStorageRef for ChildrenProofMap> { + type Overlay = MemoryDB; + + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + let child_info_proof = child_info.proof_info(); + Ok(self.deref().get(&child_info_proof).and_then(|s| + hash_db::HashDB::get(s, key, prefix) + )) + } +} #[cfg(test)] mod test { diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 5d6cf94cc8acb..b943f11942da3 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -9,9 +9,6 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-storage/" -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - [dependencies] codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-alpha.6", default-features = false, path = "../std" } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 25941cdbf2d00..eff6ff590200a 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -161,6 +161,22 @@ pub mod well_known_keys { } } +/// Child information needed for proof construction. +/// +/// It is similar to standard child information but can +/// be a bit more lightweight as long term storage is not +/// needed in proof. +/// +/// One can also use this information to use different compaction +/// strategy in a same proof. +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +pub enum ChildInfoProof { + /// A child using the default trie layout, identified by its + /// unprefixed location in the first level trie. + /// Empty location is reserved for the top level trie of the proof. + Default(ChildTrieParentKeyId), +} + /// Information related to a child state. #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] pub enum ChildInfo { @@ -284,6 +300,36 @@ impl ChildInfo { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, } } + + /// Get corresponding info for proof definition. + pub fn proof_info(&self) -> ChildInfoProof { + match self { + ChildInfo::ParentKeyId(parent) => ChildInfoProof::Default(parent.clone()), + } + } +} + +impl ChildInfoProof { + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn top_trie() -> Self { + ChildInfoProof::Default(ChildTrieParentKeyId { data: Vec::new() }) + } + + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn is_top_trie(&self) -> bool { + match self { + ChildInfoProof::Default(ChildTrieParentKeyId { data }) => data.len() == 0, + } + } + + /// Returns the type for this child info. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfoProof::Default(..) => ChildType::ParentKeyId, + } + } } /// Type of child. @@ -381,6 +427,12 @@ impl ChildTrieParentKeyId { /// A few utilities methods are defined. pub struct ChildrenMap(pub BTreeMap); +#[cfg(feature = "std")] +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +/// Type for storing a map of child trie proof related information. +/// A few utilities methods are defined. +pub struct ChildrenProofMap(pub BTreeMap); + /// Type alias for storage of children related content. pub type ChildrenVec = Vec<(ChildInfo, T)>; @@ -466,6 +518,39 @@ impl IntoIterator for ChildrenMap { } } +#[cfg(feature = "std")] +impl sp_std::ops::Deref for ChildrenProofMap { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::DerefMut for ChildrenProofMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::default::Default for ChildrenProofMap { + fn default() -> Self { + ChildrenProofMap(BTreeMap::new()) + } +} + +#[cfg(feature = "std")] +impl IntoIterator for ChildrenProofMap { + type Item = (ChildInfoProof, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; #[test] From 48dfe7d938d5b98bdf95d6e634c75cdd54d18b69 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 19:17:09 +0200 Subject: [PATCH 092/185] Failure to run this on non split child payload: see non implemented state machine function: we need to pass child_info as parameter to split thing in the trie backend that register. --- client/api/src/backend.rs | 2 +- client/db/src/bench.rs | 65 +-- client/db/src/changes_tries_storage.rs | 11 +- client/db/src/lib.rs | 182 ++----- client/db/src/storage_cache.rs | 5 +- client/src/cht.rs | 11 +- client/src/client.rs | 8 +- client/src/in_mem.rs | 5 +- client/src/light/backend.rs | 4 +- client/src/light/call_executor.rs | 2 +- client/src/light/fetcher.rs | 3 +- frame/contracts/src/account_db.rs | 24 +- frame/contracts/src/exec.rs | 20 +- frame/contracts/src/lib.rs | 3 +- frame/contracts/src/tests.rs | 12 +- primitives/api/src/lib.rs | 2 +- primitives/core/src/lib.rs | 33 +- primitives/runtime/src/traits.rs | 26 +- primitives/state-machine/src/backend.rs | 44 +- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/changes_trie/build.rs | 23 +- .../src/changes_trie/changes_iterator.rs | 14 +- .../state-machine/src/changes_trie/mod.rs | 20 +- .../state-machine/src/changes_trie/prune.rs | 9 +- .../state-machine/src/changes_trie/storage.rs | 24 +- primitives/state-machine/src/ext.rs | 54 +-- .../state-machine/src/in_memory_backend.rs | 8 +- primitives/state-machine/src/lib.rs | 47 +- .../state-machine/src/overlayed_changes.rs | 15 +- .../state-machine/src/proving_backend.rs | 114 ++--- primitives/state-machine/src/testing.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 110 ++--- .../state-machine/src/trie_backend_essence.rs | 457 ++++++++---------- primitives/storage/src/lib.rs | 72 +-- primitives/trie/src/lib.rs | 221 ++++++++- primitives/trie/src/storage_proof.rs | 1 + 36 files changed, 691 insertions(+), 964 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 33e0dd1ac67ea..33a370c7cb2c5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -24,7 +24,7 @@ use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - ChildStorageCollection, StorageCollection, + StorageCollection, ChildStorageCollection, }; use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 56c2484001baa..9d6f595498bd0 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -21,8 +21,8 @@ use std::cell::{Cell, RefCell}; use std::collections::HashMap; use hash_db::{Prefix, Hasher}; -use sp_trie::MemoryDB; -use sp_core::storage::{ChildInfo, ChildType}; +use sp_trie::{MemoryDB, prefixed_key}; +use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend}; @@ -41,13 +41,8 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get( - &self, - child_info: &ChildInfo, - key: &Block::Hash, - prefix: Prefix, - ) -> Result, String> { - let key = crate::keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + let key = prefixed_key::>(key, prefix); self.db.get(0, &key) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -86,24 +81,11 @@ impl BenchmarkingState { child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); - let (root, transaction, _): (B::Hash, _, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, - false, ); - let mut keyspace = crate::Keyspaced::new(&[]); - for (info, mut updates) in transaction.clone().into_iter() { - keyspace.change_keyspace(info.keyspace()); - for (key, rc_val) in updates.drain() { - let key = if info.is_top_trie() { - key - } else { - keyspace.prefix_key(key.as_slice()).to_vec() - }; - - state.genesis.insert(key, rc_val); - } - } + state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); state.commit(root, transaction)?; state.record.take(); @@ -247,37 +229,20 @@ impl StateBackend> for BenchmarkingState { None } - fn commit(&self, storage_root: as Hasher>::Out, transaction: Self::Transaction) + fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); - let mut keys = Vec::new(); - let mut keyspace = crate::Keyspaced::new(&[]); - for (info, mut updates) in transaction.into_iter() { - // child info with strong unique id are using the same state-db with prefixed key - if info.child_type() != ChildType::ParentKeyId { - // Unhandled child kind - unimplemented!( - "Data for {:?} without a backend implementation", - info.child_type(), - ); - } - keyspace.change_keyspace(info.keyspace()); - for (key, (val, rc)) in updates.drain() { - let key = if info.is_top_trie() { - key - } else { - keyspace.prefix_key(key.as_slice()).to_vec() - }; - - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); - } - keys.push(key); + let changes = transaction.drain(); + let mut keys = Vec::with_capacity(changes.len()); + for (key, (val, rc)) in changes { + if rc > 0 { + db_transaction.put(0, &key, &val); + } else if rc < 0 { + db_transaction.delete(0, &key); } + keys.push(key); } self.record.set(keys); db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 677fefdaa25d6..55e740f43462a 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -487,11 +487,7 @@ where self.build_cache.read().with_changed_keys(root, functor) } - fn get( - &self, - key: &Block::Hash, - _prefix: Prefix, - ) -> Result, String> { + fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result, String> { self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } @@ -705,10 +701,7 @@ mod tests { .log(DigestItem::as_changes_trie_root) .cloned(); match trie_root { - Some(trie_root) => backend.changes_tries_storage.get( - &trie_root, - EMPTY_PREFIX, - ).unwrap().is_none(), + Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), None => true, } }; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index bc6186a865452..afe932e251beb 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -58,10 +58,10 @@ use sp_blockchain::{ use codec::{Decode, Encode}; use hash_db::Prefix; use kvdb::{KeyValueDB, DBTransaction}; -use sp_trie::{MemoryDB, PrefixedMemoryDB}; +use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap, ChildType}; +use sp_core::storage::{well_known_keys, ChildInfo}; use sp_runtime::{ generic::BlockId, Justification, Storage, BuildStorage, @@ -526,7 +526,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: SyncingCachingState, Block>, - db_updates: ChildrenMap>>, + db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -581,10 +581,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc // Currently cache isn't implemented on full nodes. } - fn update_db_storage( - &mut self, - update: ChildrenMap>>, - ) -> ClientResult<()> { + fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -604,7 +601,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - let (root, transaction, _) = self.old_state.full_storage_root( + let (root, transaction) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| { if k == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( @@ -614,8 +611,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } (k, Some(v)) }), - child_delta, - false, + child_delta ); self.db_updates = transaction; @@ -672,15 +668,8 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get( - &self, - child_info: &ChildInfo, - key: &Block::Hash, - prefix: Prefix, - ) -> Result, String> { - // Default child trie (those with strong unique id) are put - // directly into the same address space at state_db level. - let key = keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -691,11 +680,7 @@ impl sc_state_db::NodeDb for StorageDb { type Key = [u8]; fn get(&self, key: &[u8]) -> Result>, Self::Error> { - // note this implementation should ONLY be call from state_db, - // as it rely on the fact that we address a key that is already - // prefixed with keyspace - self.db.get(columns::STATE, key) - .map(|r| r.map(|v| v.to_vec())) + self.db.get(columns::STATE, key).map(|r| r.map(|v| v.to_vec())) } } @@ -711,12 +696,7 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get( - &self, - _trie: &ChildInfo, - _key: &Block::Hash, - _prefix: Prefix, - ) -> Result, String> { + fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { Ok(None) } } @@ -1129,39 +1109,22 @@ impl Backend { } let finalized = if operation.commit_state { - let mut state_db_changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; - let mut bytes = 0; + let mut bytes: u64 = 0; let mut removal: u64 = 0; let mut bytes_removal: u64 = 0; - let mut keyspace = Keyspaced::new(&[]); - for (info, mut updates) in operation.db_updates.into_iter() { - // child info with strong unique id are using the same state-db with prefixed key - if info.child_type() != ChildType::ParentKeyId { - // Unhandled child kind - return Err(ClientError::Backend(format!( - "Data for {:?} without a backend implementation", - info.child_type(), - ))); - } - keyspace.change_keyspace(info.keyspace()); - for (key, (val, rc)) in updates.drain() { - let key = if info.is_top_trie() { - key - } else { - keyspace.prefix_key(key.as_slice()).to_vec() - }; - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - - state_db_changeset.inserted.push((key, val.to_vec())); - } else if rc < 0 { - removal += 1; - bytes_removal += key.len() as u64; - - state_db_changeset.deleted.push(key); - } + for (key, (val, rc)) in operation.db_updates.drain() { + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + + changeset.inserted.push((key, val.to_vec())); + } else if rc < 0 { + removal += 1; + bytes_removal += key.len() as u64; + + changeset.deleted.push(key); } } self.state_usage.tally_writes_nodes(ops, bytes); @@ -1170,7 +1133,7 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s, _)| s.iter())) { + .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { ops += 1; bytes += key.len() as u64; if let Some(v) = value.as_ref() { @@ -1183,7 +1146,7 @@ impl Backend { &hash, number_u64, &pending_block.header.parent_hash(), - state_db_changeset, + changeset, ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)) )?; @@ -1382,7 +1345,6 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - // state_db commit set is only for column STATE for (key, val) in commit.data.inserted.into_iter() { transaction.put(columns::STATE, &key[..], &val); } @@ -1434,7 +1396,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(BlockImportOperation { pending_block: None, old_state, - db_updates: Default::default(), + db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), changes_trie_config_update: None, @@ -1712,7 +1674,6 @@ impl sc_client_api::backend::Backend for Backend { Ok(Some(header)) => { sp_state_machine::Storage::get( self.storage.as_ref(), - &ChildInfo::top_trie(), &header.state_root(), (&[], None), ).unwrap_or(None).is_some() @@ -1731,47 +1692,6 @@ impl sc_client_api::backend::Backend for Backend { impl sc_client_api::backend::LocalBackend for Backend {} -/// Rules for storing a default child trie with unique id. -struct Keyspaced { - keyspace_len: usize, - buffer: Vec, -} - -impl Keyspaced { - fn new(keyspace: &[u8]) -> Self { - Keyspaced { - keyspace_len: keyspace.len(), - buffer: keyspace.to_vec(), - } - } - - fn change_keyspace(&mut self, new_keyspace: &[u8]) { - self.keyspace_len = new_keyspace.len(); - self.buffer.resize(new_keyspace.len(), 0); - self.buffer[..new_keyspace.len()].copy_from_slice(new_keyspace); - } - - fn prefix_key(&mut self, key: &[u8]) -> &[u8] { - self.buffer.resize(self.keyspace_len + key.len(), 0); - self.buffer[self.keyspace_len..].copy_from_slice(key); - self.buffer.as_slice() - } -} - -// Prefix key and add keyspace with a single vec alloc -// Warning if memory_db `sp_trie::prefixed_key` implementation change, this function -// will need change too. -fn keyspace_and_prefixed_key(key: &[u8], keyspace: &[u8], prefix: Prefix) -> Vec { - let mut prefixed_key = Vec::with_capacity(key.len() + keyspace.len() + prefix.0.len() + 1); - prefixed_key.extend_from_slice(keyspace); - prefixed_key.extend_from_slice(prefix.0); - if let Some(last) = prefix.1 { - prefixed_key.push(last); - } - prefixed_key.extend_from_slice(key); - prefixed_key -} - #[cfg(test)] pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; @@ -1900,9 +1820,6 @@ pub(crate) mod tests { #[test] fn set_state_data() { let db = Backend::::new_test(2, 0); - - let child_info = sp_core::storage::ChildInfo::new_default(b"key1"); - let hash = { let mut op = db.begin_operation().unwrap(); db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); @@ -1919,28 +1836,16 @@ pub(crate) mod tests { (vec![1, 2, 3], vec![9, 9, 9]), ]; - let child_storage = vec![ - (vec![2, 3, 5], Some(vec![4, 4, 6])), - (vec![2, 2, 3], Some(vec![7, 9, 9])), - ]; - - header.state_root = op.old_state.full_storage_root(storage + header.state_root = op.old_state.storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))), - vec![(child_info.clone(), child_storage.clone())], - false, + .map(|(x, y)| (x, Some(y))) ).0.into(); let hash = header.hash(); - let mut children_default = HashMap::default(); - children_default.insert(child_info.storage_key().to_vec(), sp_core::storage::StorageChild { - child_info: child_info.clone(), - data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), - }); op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children_default, + children_default: Default::default(), }).unwrap(); op.set_block_data( header.clone(), @@ -1956,10 +1861,6 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); - assert_eq!( - state.child_storage(&child_info, &[2, 3, 5]).unwrap(), - Some(vec![4, 4, 6]), - ); hash }; @@ -1999,12 +1900,6 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); - assert_eq!( - state.child_storage(&child_info, &[2, 3, 5]).unwrap(), - Some(vec![4, 4, 6]), - ); - - } } @@ -2039,9 +1934,7 @@ pub(crate) mod tests { children_default: Default::default(), }).unwrap(); - key = op.db_updates.entry(ChildInfo::top_trie()) - .or_insert_with(Default::default) - .insert(EMPTY_PREFIX, b"hello"); + key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -2077,14 +1970,8 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates - .entry(ChildInfo::top_trie()) - .or_insert_with(Default::default) - .insert(EMPTY_PREFIX, b"hello"); - op.db_updates - .entry(ChildInfo::top_trie()) - .or_insert_with(Default::default) - .remove(&key, EMPTY_PREFIX); + op.db_updates.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -2120,10 +2007,7 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates - .entry(ChildInfo::top_trie()) - .or_insert_with(Default::default) - .remove(&key, EMPTY_PREFIX); + op.db_updates.remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 77fba3d1c78d8..66ac74afa4f2a 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -381,7 +381,7 @@ impl CacheChanges { } let mut modifications = HashSet::new(); let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes, _ci)| + child_changes.into_iter().for_each(|(sk, changes)| for (k, v) in changes.into_iter() { let k = (sk.clone(), k); if is_best { @@ -1174,7 +1174,6 @@ mod tests { #[test] fn should_track_used_size_correctly() { - let child_info1 = ChildInfo::new_default(b"unique_id_1"); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); @@ -1202,7 +1201,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1)], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], Some(h0), Some(0), true, diff --git a/client/src/cht.rs b/client/src/cht.rs index b9e92e9654827..111070200782d 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -23,6 +23,7 @@ //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. +use hash_db; use codec::Encode; use sp_trie; @@ -86,7 +87,7 @@ pub fn compute_root( ) -> ClientResult where Header: HeaderT, - Hasher: sp_core::Hasher, + Hasher: hash_db::Hasher, Hasher::Out: Ord, I: IntoIterator>>, { @@ -105,7 +106,7 @@ pub fn build_proof( ) -> ClientResult where Header: HeaderT, - Hasher: sp_core::Hasher, + Hasher: hash_db::Hasher, Hasher::Out: Ord + codec::Codec, BlocksI: IntoIterator, HashesI: IntoIterator>>, @@ -134,7 +135,7 @@ pub fn check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: sp_core::Hasher, + Hasher: hash_db::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -163,7 +164,7 @@ pub fn check_proof_on_proving_backend( ) -> ClientResult<()> where Header: HeaderT, - Hasher: sp_core::Hasher, + Hasher: hash_db::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -187,7 +188,7 @@ fn do_check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: sp_core::Hasher, + Hasher: hash_db::Hasher, Hasher::Out: Ord, F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { diff --git a/client/src/client.rs b/client/src/client.rs index 156bcae263406..3aad8700015e7 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -398,11 +398,7 @@ impl Client where self.storage.with_cached_changed_keys(root, functor) } - fn get( - &self, - key: &Block::Hash, - prefix: Prefix, - ) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { self.storage.get(key, prefix) } } @@ -957,7 +953,7 @@ impl Client where .trigger( ¬ify_import.hash, storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v, _ci)| (sk, v.into_iter())), + storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), ); } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index c9c65b9aeda08..20b227e790f6e 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -520,10 +520,9 @@ impl backend::BlockImportOperation for BlockImportOperatio .map(|(_storage_key, child_content)| (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); - let (root, transaction, _) = self.old_state.full_storage_root( + let (root, transaction) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta, - false, + child_delta ); self.new_state = Some(InMemoryBackend::from(transaction)); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index fa7aec5338222..01e9854864062 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -46,7 +46,7 @@ use sc_client_api::{ UsageInfo, }; use crate::light::blockchain::Blockchain; -use sp_core::Hasher; +use hash_db::Hasher; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; @@ -326,7 +326,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, false); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); self.storage_update = Some(storage_update); Ok(storage_root) diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 02b8b58bce0ec..61aeabfc4d016 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -31,7 +31,7 @@ use sp_state_machine::{ execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, merge_storage_proofs, }; -use sp_core::Hasher; +use hash_db::Hasher; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index a20a35bc3bce6..5fa88b3b46ded 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -20,8 +20,7 @@ use std::sync::Arc; use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; -use hash_db::{HashDB, EMPTY_PREFIX}; -use sp_core::Hasher; +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; use sp_core::storage::{ChildInfo, ChildType}; diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 14c9ead7e6c22..aae853d2ff996 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -17,7 +17,7 @@ //! Auxiliaries to help with managing partial changes to accounts state. use super::{ - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, TrieIdGenerator, }; use crate::exec::StorageKey; @@ -27,7 +27,7 @@ use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_runtime::traits::{Bounded, Zero}; use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance}; -use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; +use frame_support::{storage::child, StorageMap}; use frame_system; // Note: we don't provide Option because we can't create @@ -108,12 +108,7 @@ pub trait AccountDb { /// /// Trie id is None iff account doesn't have an associated trie id in >. /// Because DirectAccountDb bypass the lookup for this association. - fn get_storage( - &self, - account: &T::AccountId, - trie_id: Option<&ChildInfo>, - location: &StorageKey - ) -> Option>; + fn get_storage(&self, account: &T::AccountId, trie_id: Option<&TrieId>, location: &StorageKey) -> Option>; /// If account has an alive contract then return the code hash associated. fn get_code_hash(&self, account: &T::AccountId) -> Option>; /// If account has an alive contract then return the rent allowance associated. @@ -130,10 +125,10 @@ impl AccountDb for DirectAccountDb { fn get_storage( &self, _account: &T::AccountId, - trie_id: Option<&ChildInfo>, + trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|child_info| child::get_raw(child_info, &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -215,19 +210,18 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } - let child_info = &new_info.child_trie_info(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - child_info, + &new_info.child_trie_info(), &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(child_info, &blake2_256(&k), &value[..]); + child::put_raw(&new_info.child_trie_info(), &blake2_256(&k), &value[..]); } else { - child::kill(child_info, &blake2_256(&k)); + child::kill(&new_info.child_trie_info(), &blake2_256(&k)); } } @@ -332,7 +326,7 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<&ChildInfo>, + trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index d8b42b2f9ecae..402622331d0ec 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -26,7 +26,6 @@ use frame_support::{ storage::unhashed, dispatch::DispatchError, traits::{WithdrawReason, Currency, Time, Randomness}, }; -use sp_core::storage::ChildInfo; pub type AccountIdOf = ::AccountId; pub type CallOf = ::Call; @@ -292,7 +291,7 @@ pub enum DeferredAction { pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, - pub self_trie_info: Option, + pub self_trie_id: Option, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -316,7 +315,7 @@ where pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { ExecutionContext { caller: None, - self_trie_info: None, + self_trie_id: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, @@ -329,12 +328,12 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option) -> ExecutionContext<'b, T, V, L> { ExecutionContext { caller: Some(self), - self_trie_info: trie_info, + self_trie_id: trie_id, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, @@ -576,9 +575,7 @@ where where F: FnOnce(&mut ExecutionContext) -> ExecResult { let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id.map(|trie_id| { - crate::child_trie_info(&trie_id) - })); + let mut nested = self.nested(dest, trie_id); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) }; @@ -738,12 +735,7 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - let trie_id = self.ctx.self_trie_info.as_ref(); - self.ctx.overlay.get_storage( - &self.ctx.self_account, - trie_id, - key, - ) + self.ctx.overlay.get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) } fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index b994954e0a517..af03df536fe4b 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -692,11 +692,10 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let child_info = child_trie_info(&contract_info.trie_id); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, - Some(&child_info), + Some(&contract_info.trie_id), &key, ); Ok(maybe_value) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 77a6e6e240358..1f97b94467eb5 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -311,10 +311,6 @@ fn account_removal_does_not_remove_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); - let child_info1 = crate::child_trie_info(trie_id1.as_ref()); - let child_info2 = crate::child_trie_info(trie_id2.as_ref()); - let child_info1 = Some(&child_info1); - let child_info2 = Some(&child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -363,20 +359,20 @@ fn account_removal_does_not_remove_storage() { // Verify that no entries are removed. { assert_eq!( - >::get_storage(&DirectAccountDb, &1, child_info1, key1), + >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1), Some(b"1".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &1, child_info1, key2), + >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2), Some(b"2".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, child_info2, key1), + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, child_info2, key2), + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), Some(b"4".to_vec()) ); } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 168e467e78f47..cca2c3f8de0fd 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -43,7 +43,7 @@ pub use sp_state_machine::{ pub use sp_core::NativeOrEncoded; #[doc(hidden)] #[cfg(feature = "std")] -pub use sp_state_machine::{Hasher, InnerHasher}; +pub use hash_db::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 18d25f1333e48..8d5ad7daaec83 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -79,8 +79,7 @@ pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher as InnerHasher; -pub use hash_db::{Prefix, EMPTY_PREFIX}; +pub use hash_db::Hasher; #[cfg(feature = "std")] pub use self::hasher::blake2::Blake2Hasher; @@ -358,33 +357,3 @@ macro_rules! impl_maybe_marker { )+ } } - -/// Technical trait to avoid calculating empty root. -/// This assumes (same wrong asumption as for hashdb trait), -/// an empty node is `[0u8]`. -pub trait Hasher: InnerHasher { - /// Value for an empty root node, this - /// is the hash of `[0u8]` value. - const EMPTY_ROOT: &'static [u8]; -} - -#[cfg(feature = "std")] -impl Hasher for Blake2Hasher { - const EMPTY_ROOT: &'static [u8] = &[ - 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, - 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, - 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, - 19, 20, - ]; -} - -#[cfg(test)] -mod test { - use super::{Blake2Hasher, Hasher, InnerHasher}; - - #[test] - fn empty_root_const() { - let empty = Blake2Hasher::hash(&[0u8]); - assert_eq!(Blake2Hasher::EMPTY_ROOT, empty.as_ref()); - } -} diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 5a49ec519124d..fdf1d6396d26f 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -25,7 +25,7 @@ use std::fmt::Display; use std::str::FromStr; #[cfg(feature = "std")] use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, InnerHasher, Hasher, TypeId, RuntimeDebug}; +use sp_core::{self, Hasher, TypeId, RuntimeDebug}; use crate::codec::{Codec, Encode, Decode}; use crate::transaction_validity::{ ValidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, @@ -322,20 +322,19 @@ impl::Output> + Hasher { +pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + PartialEq + Hasher::Output> { /// The hash type produced. type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { - ::hash(s) + ::hash(s) } /// Produce the hash of some codec-encodable value. fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, ::hash) + Encode::using_encoded(s, ::hash) } /// The ordered Patricia tree root of the given `input`. @@ -350,7 +349,7 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwo256; -impl InnerHasher for BlakeTwo256 { +impl Hasher for BlakeTwo256 { type Out = sp_core::H256; type StdHasher = hash256_std_hasher::Hash256StdHasher; const LENGTH: usize = 32; @@ -372,15 +371,6 @@ impl Hash for BlakeTwo256 { } } -impl Hasher for BlakeTwo256 { - const EMPTY_ROOT: &'static [u8] = &[ - 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, - 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, - 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, - 19, 20, - ]; -} - /// Something that can be checked for equality and printed out to a debug channel if bad. pub trait CheckEqual { /// Perform the equality check. @@ -1422,10 +1412,4 @@ mod tests { assert!(signature.verify(msg, &pair.public())); assert!(signature.verify(msg, &pair.public())); } - - #[test] - fn empty_root_const() { - let empty = ::hash(&[0u8]); - assert_eq!(BlakeTwo256::EMPTY_ROOT, empty.as_ref()); - } } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 0f3af4466c69e..df8f810ceb7ce 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,12 +17,12 @@ //! State machine backends. These manage the code and storage of contracts. use log::warn; -use sp_core::{Hasher, InnerHasher}; +use hash_db::Hasher; use codec::{Decode, Encode}; -use sp_core::{traits::RuntimeCode, - storage::{ChildInfo, ChildrenMap, well_known_keys, PrefixedStorageKey}}; +use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; + use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, @@ -170,9 +170,8 @@ pub trait Backend: std::fmt::Debug { fn full_storage_root( &self, delta: I1, - child_deltas: I2, - return_child_roots: bool, - ) -> (H::Out, Self::Transaction, Vec<(PrefixedStorageKey, Option)>) + child_deltas: I2) + -> (H::Out, Self::Transaction) where I1: IntoIterator)>, I2i: IntoIterator)>, @@ -181,7 +180,6 @@ pub trait Backend: std::fmt::Debug { { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); - let mut result_child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = @@ -189,24 +187,16 @@ pub trait Backend: std::fmt::Debug { let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - if return_child_roots { - result_child_roots.push((prefixed_storage_key.clone(), None)); - } child_roots.push((prefixed_storage_key.into_inner(), None)); } else { - if return_child_roots { - child_roots.push((prefixed_storage_key.clone().into_inner(), Some(child_root.encode()))); - result_child_roots.push((prefixed_storage_key, Some(child_root))); - } else { - child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); - } + child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); - (root, txs, result_child_roots) + (root, txs) } /// Register stats from overlay of state machine. @@ -337,24 +327,6 @@ impl Consolidate for Vec<( } } -impl Consolidate for ChildrenMap { - fn consolidate(&mut self, other: Self) { - self.extend_with(other.into_iter(), Consolidate::consolidate) - } -} - -impl Consolidate for Option { - fn consolidate(&mut self, other: Self) { - if let Some(v) = self.as_mut() { - if let Some(other) = other { - v.consolidate(other); - } - } else { - *self = other; - } - } -} - impl> Consolidate for sp_trie::GenericMemoryDB { fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) @@ -367,7 +339,7 @@ pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Op H: Hasher, I: IntoIterator, { - let mut root = ::Out::default(); + let mut root = ::Out::default(); { let mut trie = TrieDBMut::::new(mdb, &mut root); for (key, value) in input { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 7d473846058cb..f03d5c1659ba8 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -317,7 +317,7 @@ mod tests { children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: child_info.clone(), + child_info: child_info.to_owned(), } ] }); diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index d128d136a6336..45535204e0884 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use codec::{Decode, Encode}; -use sp_core::{Hasher, InnerHasher}; +use hash_db::Hasher; use num_traits::One; use crate::{ StorageKey, @@ -280,9 +280,6 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } - // change trie content are all stored as top_trie (default child trie with empty keyspace) - let child_info = sp_core::storage::ChildInfo::top_trie(); - let child_info = &child_info; let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( @@ -290,21 +287,21 @@ fn prepare_digest_input<'a, H, Number>( trie_root, ); - trie_storage.for_key_values_with_prefix(child_info, &child_prefix, |key, value| + trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); + let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.insert(trie_key.storage_key, trie_root); } }); - trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| + trie_storage.for_keys_with_prefix(&digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); @@ -321,12 +318,12 @@ fn prepare_digest_input<'a, H, Number>( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, ); - trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| + trie_storage.for_keys_with_prefix(&digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); @@ -437,13 +434,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), child_info_1.clone())), + ].into_iter().collect(), child_info_1.to_owned())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), child_info_2)), + ].into_iter().collect(), child_info_2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -466,7 +463,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), child_info_1)), + ].into_iter().collect(), child_info_1.to_owned())), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index c1e3266b7ca11..f5a936069ba40 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -20,8 +20,7 @@ use std::cell::RefCell; use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; -use sp_core::Hasher; -use sp_core::storage::ChildInfo; +use hash_db::Hasher; use num_traits::Zero; use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; @@ -69,7 +68,6 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, - child_info: ChildInfo::top_trie(), }) } @@ -180,7 +178,6 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, - child_info: ChildInfo::top_trie(), }.collect() } @@ -318,10 +315,6 @@ pub struct DrilldownIterator<'a, H, Number> H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, - /// This is always top trie info, but it cannot be - /// statically instantiated at the time (vec of null - /// size could be in theory). - child_info: ChildInfo, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> @@ -330,11 +323,8 @@ impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, N type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { - let child_info = &self.child_info; self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root) - .storage(child_info, key) - ) + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) } } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index bed84726e5a03..ee6c6778e0aad 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -67,9 +67,7 @@ pub use self::prune::prune; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; -use hash_db::Prefix; -use sp_core::Hasher; -use sp_core::storage::ChildInfo; +use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; @@ -162,26 +160,16 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. - fn get( - &self, - key: &H::Out, - prefix: Prefix, - ) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } /// Changes trie storage -> trie backend essence adapter. pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - debug_assert!(child_info.is_top_trie()); + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 088773c72cabd..05555df305b7c 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -16,7 +16,7 @@ //! Changes trie pruning-related functions. -use sp_core::{Hasher, InnerHasher}; +use hash_db::Hasher; use sp_trie::Recorder; use log::warn; use num_traits::One; @@ -65,11 +65,10 @@ pub fn prune( ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); - let child_info = sp_core::storage::ChildInfo::top_trie(); - trie_storage.for_key_values_with_prefix(&child_info, &child_prefix, |key, value| { + trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); + let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.push(trie_root); } @@ -101,7 +100,7 @@ fn prune_trie( backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), proof_recorder: &mut proof_recorder, }; - trie.record_all_top_trie_keys(); + trie.record_all_keys(); } // all nodes of this changes trie should be pruned diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index df731b699eb0f..81651dd2e719b 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -17,16 +17,14 @@ //! Changes trie storage utilities. use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Prefix, EMPTY_PREFIX}; -use sp_core::Hasher; +use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; use sp_core::storage::PrefixedStorageKey; -use sp_core::storage::ChildInfo; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; use crate::{ StorageKey, - trie_backend_essence::TrieBackendStorageRef, + trie_backend_essence::TrieBackendStorage, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; @@ -190,12 +188,8 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, &ChildInfo::top_trie(), key, prefix) + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + MemoryDB::::get(&self.data.read().mdb, key, prefix) } } @@ -205,20 +199,14 @@ impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { } } -impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Number> +impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> where Number: BlockNumber, H: Hasher, { type Overlay = MemoryDB; - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - debug_assert!(child_info.is_top_trie()); + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 416ee5a448cee..cf33d65622e96 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -22,8 +22,8 @@ use crate::{ changes_trie::State as ChangesTrieState, }; +use hash_db::Hasher; use sp_core::{ - Hasher, storage::{well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; @@ -184,9 +184,6 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { - if child_info.is_top_trie() { - return self.storage(key); - } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(child_info, key) @@ -211,9 +208,6 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option> { - if child_info.is_top_trie() { - return self.storage_hash(key); - } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(child_info, key) @@ -254,9 +248,6 @@ where child_info: &ChildInfo, key: &[u8], ) -> bool { - if child_info.is_top_trie() { - return self.exists_storage(key); - } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = match self.overlay.child_storage(child_info, key) { @@ -295,9 +286,6 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { - if child_info.is_top_trie() { - return self.next_storage_key(key); - } let next_backend_key = self.backend .next_child_storage_key(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -342,9 +330,6 @@ where key: StorageKey, value: Option, ) { - if child_info.is_top_trie() { - return self.place_storage(key, value); - } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, HexDisplay::from(&child_info.storage_key()), @@ -361,10 +346,6 @@ where &mut self, child_info: &ChildInfo, ) { - if child_info.is_top_trie() { - trace!(target: "state-trace", "Ignoring kill_child_storage on top trie"); - return; - } trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, HexDisplay::from(&child_info.storage_key()), @@ -401,10 +382,6 @@ where child_info: &ChildInfo, prefix: &[u8], ) { - if child_info.is_top_trie() { - return self.clear_prefix(prefix); - } - trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, HexDisplay::from(&child_info.storage_key()), @@ -446,22 +423,22 @@ where let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { - let root = self.storage_transaction_cache.transaction_child_storage_root - .get(&prefixed_storage_key) - .map(|root| root.encode()) + let root = self + .storage(prefixed_storage_key.as_slice()) + .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - empty_child_trie_root::>().encode() + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, HexDisplay::from(&storage_key), HexDisplay::from(&root.as_ref()), ); - root + root.encode() } else { if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { - let (root, _is_empty, _) = { + let (root, is_empty, _) = { let delta = self.overlay.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) @@ -475,6 +452,16 @@ where }; let root = root.encode(); + // We store update in the overlay in order to be able to use 'self.storage_transaction' + // cache. This is brittle as it rely on Ext only querying the trie backend for + // storage root. + // A better design would be to manage 'child_storage_transaction' in a + // similar way as 'storage_transaction' but for each child trie. + if is_empty { + self.overlay.set_storage(prefixed_storage_key.into_inner(), None); + } else { + self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); + } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", self.id, @@ -685,7 +672,6 @@ mod tests { #[test] fn next_child_storage_key_works() { - let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; @@ -702,7 +688,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - child_info: child_info.clone(), + child_info: child_info.to_owned(), } ], }.into(); @@ -732,8 +718,6 @@ mod tests { #[test] fn child_storage_works() { - use sp_core::InnerHasher; - let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; let mut cache = StorageTransactionCache::default(); @@ -749,7 +733,7 @@ mod tests { vec![20] => vec![20], vec![30] => vec![40] ], - child_info: child_info.clone(), + child_info: child_info.to_owned(), } ], }.into(); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ccba85b24ad6f..299ad20fc4f11 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -23,9 +23,9 @@ use crate::{ stats::UsageInfo, }; use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; -use sp_core::{Hasher, InnerHasher}; +use hash_db::Hasher; use sp_trie::{ - MemoryDB, empty_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, ChildType, Storage}; @@ -228,7 +228,7 @@ impl Backend for InMemory where H::Out: Codec { fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)>, - ::Out: Ord, + ::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() @@ -263,7 +263,7 @@ impl Backend for InMemory where H::Out: Codec { .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); - let root = Layout::::trie_root( + let root = child_trie_root::, _, _, _>( existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 28dbe2154f384..5cf39c1a53f48 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -20,7 +20,7 @@ use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; use log::{warn, trace}; -pub use sp_core::{Hasher, InnerHasher}; +use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use sp_core::{ storage::{ChildInfo, ChildrenProofMap}, NativeOrEncoded, NeverNativeValue, @@ -72,7 +72,7 @@ pub use proving_backend::{ create_flat_proof_check_backend, create_flat_proof_check_backend_storage, merge_flatten_storage_proofs, }; -pub use trie_backend_essence::{TrieBackendStorage, TrieBackendStorageRef, Storage}; +pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::InMemory as InMemoryBackend; @@ -87,7 +87,7 @@ pub type DefaultHandler = fn(CallResult, CallResult) -> CallRe /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( MemoryDB, - ChangesTrieCacheAction<::Out, N>, + ChangesTrieCacheAction<::Out, N>, ); /// Strategy for executing a call into the runtime. @@ -617,7 +617,7 @@ where /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend>, H>, + trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -851,7 +851,7 @@ where /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend>, H>, + proving_backend: &TrieBackend, H>, key: &[u8], ) -> Result>, Box> where @@ -877,7 +877,7 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend>, H>, + proving_backend: &TrieBackend, H>, child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> @@ -1257,4 +1257,39 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } + + #[test] + fn child_storage_uuid() { + + let child_info_1 = ChildInfo::new_default(b"sub_test1"); + let child_info_2 = ChildInfo::new_default(b"sub_test2"); + + use crate::trie_backend::tests::test_trie; + let mut overlay = OverlayedChanges::default(); + + let mut transaction = { + let backend = test_trie(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); + ext.storage_root(); + cache.transaction.unwrap() + }; + let mut duplicate = false; + for (k, (value, rc)) in transaction.drain().iter() { + // look for a key inserted twice: transaction rc is 2 + if *rc == 2 { + duplicate = true; + println!("test duplicate for {:?} {:?}", k, value); + } + } + assert!(!duplicate); + } } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index c9b6a6f6defc2..f57d13ee3ffec 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -29,10 +29,10 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, PrefixedStorageKey}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use std::{mem, ops}; -use sp_core::Hasher; +use hash_db::Hasher; /// Storage key. pub type StorageKey = Vec; @@ -44,7 +44,7 @@ pub type StorageValue = Vec; pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, ChildInfo)>; +pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; /// The overlayed changes to state to be queried on top of the backend. /// @@ -133,8 +133,6 @@ pub struct StorageTransactionCache { pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, - /// The storage child roots after applying the transaction. - pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. @@ -153,7 +151,6 @@ impl Default for StorageTransactionCache Self { transaction: None, transaction_storage_root: None, - transaction_child_storage_root: Default::default(), changes_trie_transaction: None, changes_trie_transaction_storage_root: None, } @@ -510,8 +507,7 @@ impl OverlayedChanges { Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes - .map(|(sk, it)| (sk, it.0.collect(), it.1)).collect(), + child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), transaction, transaction_storage_root, changes_trie_transaction, @@ -574,11 +570,10 @@ impl OverlayedChanges { let delta = self.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) .chain(self.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - let (root, transaction, child_roots) = backend.full_storage_root(delta, child_delta_iter, true); + let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); - cache.transaction_child_storage_root = child_roots.into_iter().collect(); root } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 535d869474fa9..abc96afb93b90 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -20,17 +20,15 @@ use std::sync::Arc; use parking_lot::RwLock; use codec::{Encode, Decode, Codec}; use log::debug; -use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; -use sp_core::{Hasher, InnerHasher}; +use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, - TrieBackendStorage, TrieBackendStorageRef}; +use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; @@ -350,15 +348,15 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> { /// Produce proof for a key query. pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let child_info = ChildInfo::top_trie(); - let eph = BackendStorageDBRef::new( + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new( self.backend.backend_storage(), - &child_info, + &mut read_overlay, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, BackendStorageDBRef>( + read_trie_value_with::, _, Ephemeral>( &eph, self.backend.root(), key, @@ -370,34 +368,36 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub fn child_storage( &mut self, child_info: &ChildInfo, - key: &[u8], + key: &[u8] ) -> Result>, String> { let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or(empty_child_trie_root::>()); - let eph = BackendStorageDBRef::new( + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new( self.backend.backend_storage(), - child_info, + &mut read_overlay, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, _>( + read_child_trie_value_with::, _, _>( + child_info.keyspace(), &eph, - &root, + &root.as_ref(), key, &mut *self.proof_recorder ).map_err(map_e) } /// Produce proof for the whole backend. - pub fn record_all_top_trie_keys(&mut self) { - let child_info = ChildInfo::top_trie(); - let eph = BackendStorageDBRef::new( + pub fn record_all_keys(&mut self) { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new( self.backend.backend_storage(), - &child_info, + &mut read_overlay, ); let mut iter = move || -> Result<(), Box>> { @@ -413,23 +413,23 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Global proof recorder, act as a layer over a hash db for recording queried /// data. -pub enum ProofRecorder { +pub enum ProofRecorder { // root of each child is added to be able to pack. /// Proof keep a separation between child trie content, this is usually useless, /// but when we use proof compression we want this separation. - Full(Arc::Out, Option>>>>), + Full(Arc::Out, Option>>>>), /// Single level of storage for all recoded nodes. - Flat(Arc::Out, Option>>>), + Flat(Arc::Out, Option>>>), } -impl Default for ProofRecorder { +impl Default for ProofRecorder { fn default() -> Self { // Default to flat proof. ProofRecorder::Flat(Default::default()) } } -impl Clone for ProofRecorder { +impl Clone for ProofRecorder { fn clone(&self) -> Self { match self { ProofRecorder::Full(a) => ProofRecorder::Full(a.clone()), @@ -483,7 +483,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } -impl ProofRecorder { +impl ProofRecorder { /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> Result { Ok(match self { @@ -510,29 +510,24 @@ impl ProofRecorder { } } -// proof run on a flatten storage of tries and currently only need implement a single -// trie backend storage api. -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage for ProofRecorderBackend<'a, S, H> { type Overlay = S::Overlay; - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { match &self.proof_recorder { ProofRecorder::Flat(rec) => { if let Some(v) = rec.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(child_info, key, prefix)?; + let backend_value = self.backend.get(key, prefix)?; rec.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) }, ProofRecorder::Full(rec) => { + unimplemented!() +/* // TODO need flattening -> use another struct Proof Recordertrie backend. if let Some(v) = rec.read().get(child_info).and_then(|s| s.get(key)) { return Ok(v.clone()); } @@ -540,7 +535,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef rec.write().entry(child_info.clone()) .or_default() .insert(key.clone(), backend_value.clone()); - Ok(backend_value) + Ok(backend_value)*/ }, } } @@ -561,7 +556,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord + Codec, { type Error = String; - type Transaction = Option; + type Transaction = S::Overlay; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -632,8 +627,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)> { - let (root, mut tx) = self.0.storage_root(delta); - (root, tx.remove(&ChildInfo::top_trie())) + self.0.storage_root(delta) } fn child_storage_root( @@ -645,8 +639,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { - let (root, is_empty, mut tx) = self.0.child_storage_root(child_info, delta); - (root, is_empty, tx.remove(child_info)) + self.0.child_storage_root(child_info, delta) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } @@ -678,17 +671,14 @@ where pub fn create_proof_check_backend( root: H::Out, proof: StorageProof, -) -> Result>, H>, Box> +) -> Result, H>, Box> where H: Hasher, H::Out: Codec, { - use std::ops::Deref; let db = create_proof_check_backend_storage(proof) .map_err(|e| Box::new(e) as Box)?; - if db.deref().get(&ChildInfoProof::top_trie()) - .map(|db| db.contains(&root, EMPTY_PREFIX)) - .unwrap_or(false) { + if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new_with_roots(db, root)) } else { Err(Box::new(ExecutionError::InvalidProof)) @@ -704,44 +694,42 @@ where /// somehow). pub fn create_proof_check_backend_storage( proof: StorageProof, -) -> Result>, String> +) -> Result, String> where H: Hasher, { let map_e = |e| format!("Trie unpack error: {}", e); - let mut result = ChildrenProofMap::default(); + let mut db = MemoryDB::default(); match proof { s@StorageProof::Flatten(..) => { - let mut db = MemoryDB::default(); for item in s.iter_nodes_flatten() { db.insert(EMPTY_PREFIX, &item); } - result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::Full(children) => { - for (child_info, proof) in children.into_iter() { - let mut db = MemoryDB::default(); + for (_child_info, proof) in children.into_iter() { for item in proof.into_iter() { db.insert(EMPTY_PREFIX, &item); } - result.insert(child_info, db); } }, + // TODO EMCH it is rather interesting to notice that child_info in proof + // does not look useful. StorageProof::FullCompact(children) => { - for (child_info, (compact_scheme, proof)) in children.into_iter() { + for (_child_info, (compact_scheme, proof)) in children.into_iter() { match compact_scheme { CompactScheme::TrieSkipHashes => { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, db) = sp_trie::unpack_proof_to_memdb::>(proof.as_slice()) - .map_err(map_e)?; - result.insert(child_info, db); + for item in sp_trie::unpack_proof::>(proof.as_slice()) + .map_err(map_e)? + .1.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } }, } } }, } - Ok(result) + Ok(db) } /// Create in-memory storage of proof check backend. @@ -837,10 +825,9 @@ mod tests { assert_eq!(trie_backend.pairs(), proving_backend.pairs()); let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - let mut trie_mdb = trie_mdb.remove(&ChildInfo::top_trie()).unwrap(); - assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); }; test(true); test(false); @@ -889,8 +876,7 @@ mod tests { let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())), - false, + in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 8bd39aca46ae7..8f2e9ad3fc74a 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -18,7 +18,7 @@ use std::any::{Any, TypeId}; use codec::Decode; -use sp_core::Hasher; +use hash_db::Hasher; use crate::{ backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, StorageKey, StorageValue, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index cce84b13b841b..957abf841d00a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -15,21 +15,21 @@ // along with Substrate. If not, see . //! Trie-based state machine backend. + use log::{warn, debug}; -use sp_core::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root}; +use hash_db::Hasher; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap, ChildrenProofMap}; use codec::{Codec, Decode, Encode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; use std::sync::Arc; use parking_lot::RwLock; -/// Patricia trie-based backend. Transaction type is overlays of changes to commit -/// for this trie and child tries. +/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, // storing child_info of top trie even if it is in @@ -107,11 +107,11 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = ChildrenMap; + type Transaction = S::Overlay; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.storage(&self.top_trie, key) + self.essence.storage(key) } fn child_storage( @@ -119,15 +119,11 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(child_info)? { - essence.storage(child_info, key) - } else { - Ok(None) - } + self.essence.child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.next_storage_key(&self.top_trie, key) + self.essence.next_storage_key(key) } fn next_child_storage_key( @@ -135,19 +131,15 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(child_info)? { - essence.next_storage_key(child_info, key) - } else { - Ok(None) - } + self.essence.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_keys_with_prefix(&self.top_trie, prefix, f) + self.essence.for_keys_with_prefix(prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_key_values_with_prefix(&self.top_trie, prefix, f) + self.essence.for_key_values_with_prefix(prefix, f) } fn for_keys_in_child_storage( @@ -155,9 +147,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - if let Ok(Some(essence)) = self.child_essence(child_info) { - essence.for_keys(child_info, f) - } + self.essence.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( @@ -166,13 +156,12 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - if let Ok(Some(essence)) = self.child_essence(child_info) { - essence.for_keys_with_prefix(child_info, prefix, f) - } + self.essence.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -195,7 +184,8 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &[u8]) -> Vec { - let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -213,7 +203,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, ChildrenMap) + fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -222,7 +212,6 @@ impl, H: Hasher> Backend for TrieBackend where { let mut eph = Ephemeral::new( self.essence.backend_storage(), - &self.top_trie, &mut write_overlay, ); @@ -231,9 +220,8 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } - let mut tx = ChildrenMap::default(); - tx.insert(self.top_trie.clone(), write_overlay); - (root, tx) + + (root, write_overlay) } fn child_storage_root( @@ -261,15 +249,13 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let storage = self.essence.backend_storage(); - // Do not write prefix in overlay. let mut eph = Ephemeral::new( - storage, - child_info, + self.essence.backend_storage(), &mut write_overlay, ); - match delta_trie_root::, _, _, _, _>( + match child_delta_trie_root::, _, _, _, _, _>( + child_info.keyspace(), &mut eph, root, delta @@ -281,9 +267,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - let mut tx = ChildrenMap::default(); - tx.insert(child_info.clone(), write_overlay); - (root, is_default, tx) + (root, is_default, write_overlay) } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { @@ -297,42 +281,12 @@ impl, H: Hasher> Backend for TrieBackend where } } -impl, H: Hasher> TrieBackend where - H::Out: Ord + Codec, -{ - fn child_essence<'a>( - &'a self, - child_info: &ChildInfo, - ) -> Result>, >::Error> { - if let Some(cache) = self.register_roots.as_ref() { - if let Some(result) = cache.read().get(child_info) { - return Ok(result.map(|root| - TrieBackendEssence::new(self.essence.backend_storage(), root.clone()) - )); - } - } - - let root: Option = self.storage(&child_info.prefixed_storage_key()[..])? - .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); - - if let Some(cache) = self.register_roots.as_ref() { - cache.write().insert(child_info.clone(), root.clone()); - } - - Ok(if let Some(root) = root { - Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) - } else { - None - }) - } -} - #[cfg(test)] pub mod tests { use std::collections::HashSet; use sp_core::H256; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use sp_runtime::traits::BlakeTwo256; use super::*; @@ -343,6 +297,7 @@ pub mod tests { let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -409,18 +364,13 @@ pub mod tests { #[test] fn storage_root_transaction_is_empty() { - let tx = test_trie().storage_root(::std::iter::empty()).1; - for (_ct, mut tx) in tx.into_iter() { - assert!(tx.drain().is_empty()); - } + assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); } #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - for (_ct, mut tx) in tx.into_iter() { - assert!(!tx.drain().is_empty()); - } + let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + assert!(!tx.drain().is_empty()); assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 27e1ca0117cd6..fda51326cb246 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -19,13 +19,11 @@ use std::ops::Deref; use std::sync::Arc; -use std::marker::PhantomData; use log::{debug, warn}; -use sp_core::Hasher; -use hash_db::{self, EMPTY_PREFIX, Prefix}; +use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - check_if_empty_root, read_trie_value, - TrieDBIterator, for_keys_in_trie}; + empty_child_trie_root, read_trie_value, read_child_trie_value, + for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenProofMap}; @@ -34,21 +32,16 @@ use codec::Encode; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get( - &self, - trie: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher> { +pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence where H::Out: Encode { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -74,10 +67,60 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { - let eph = BackendStorageDBRef::new(&self.storage, child_info); + pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { + self.next_storage_key_from_root(&self.root, None, key) + } + + /// Access the root of the child storage in its parent trie + fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + self.storage(child_info.prefixed_storage_key().as_slice()) + } + + /// Return the next key in the child trie i.e. the minimum key that is strictly superior to + /// `key` in lexicographic order. + pub fn next_child_storage_key( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, String> { + let child_root = match self.child_root(child_info)? { + Some(child_root) => child_root, + None => return Ok(None), + }; + + let mut hash = H::Out::default(); + + if child_root.len() != hash.as_ref().len() { + return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); + } + // note: child_root and hash must be same size, panics otherwise. + hash.as_mut().copy_from_slice(&child_root[..]); + + self.next_storage_key_from_root(&hash, Some(child_info), key) + } + + /// Return next key from main trie or child trie by providing corresponding root. + fn next_storage_key_from_root( + &self, + root: &H::Out, + child_info: Option<&ChildInfo>, + key: &[u8], + ) -> Result, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + let dyn_eph: &dyn hash_db::HashDBRef<_, _>; + let keyspace_eph; + if let Some(child_info) = child_info.as_ref() { + keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); + dyn_eph = &keyspace_eph; + } else { + dyn_eph = &eph; + } - let trie = TrieDB::::new(&eph, &self.root) + let trie = TrieDB::::new(dyn_eph, root) .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -107,25 +150,63 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Get the value of storage at given key. - pub fn storage(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { - let eph = BackendStorageDBRef::new(&self.storage, child_info); + pub fn storage(&self, key: &[u8]) -> Result, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; let map_e = |e| format!("Trie lookup error: {}", e); read_trie_value::, _>(&eph, &self.root, key).map_err(map_e) } - /// Retrieve all entries keys of storage and call `f` for each of those keys. - pub fn for_keys( + /// Get the value of child storage at given key. + pub fn child_storage( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, String> { + let root = self.child_root(child_info)? + .unwrap_or(empty_child_trie_root::>().encode()); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) + .map_err(map_e) + } + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + pub fn for_keys_in_child_storage( &self, child_info: &ChildInfo, f: F, ) { - let eph = BackendStorageDBRef::new(&self.storage, child_info); + let root = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; - if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( + if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( + child_info.keyspace(), &eph, - &self.root, + &root, f, ) { debug!(target: "trie", "Error while iterating child storage: {}", e); @@ -133,8 +214,27 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), child_info) + pub fn for_child_keys_with_prefix( + &self, + child_info: &ChildInfo, + prefix: &[u8], + mut f: F, + ) { + let root_vec = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(&root_vec); + self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) + } + + /// Execute given closure for all keys starting with prefix. + pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) } fn keys_values_with_prefix_inner( @@ -142,9 +242,13 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, - child_info: &ChildInfo, + child_info: Option<&ChildInfo>, ) { - let eph = BackendStorageDBRef::new(&self.storage, child_info); + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; let mut iter = move |db| -> Result<(), Box>> { let trie = TrieDB::::new(db, root)?; @@ -160,41 +264,30 @@ impl, H: Hasher> TrieBackendEssence where H::O Ok(()) }; - if let Err(e) = iter(&eph) { + let result = if let Some(child_info) = child_info { + let db = KeySpacedDB::new(&eph, child_info.keyspace()); + iter(&db) + } else { + iter(&eph) + }; + if let Err(e) = result { debug!(target: "trie", "Error while iterating by prefix: {}", e); } } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, child_info) + pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, f, None) } } -pub(crate) struct Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, -{ - storage: &'a S, - child_info: &'a ChildInfo, - overlay: &'a mut O, - _ph: PhantomData, -} - -pub(crate) struct BackendStorageDBRef<'a, S, H> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, -{ +pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { storage: &'a S, - child_info: &'a ChildInfo, - _ph: PhantomData, + overlay: &'a mut S::Overlay, } -impl<'a, S, H, O> hash_db::AsPlainDB for Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB + for Ephemeral<'a, S, H> { fn as_plain_db<'b>(&'b self) -> &'b (dyn hash_db::PlainDB + 'b) { self } fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::PlainDB + 'b) { @@ -202,54 +295,41 @@ impl<'a, S, H, O> hash_db::AsPlainDB for Ephemeral<'a, S, H, O> } } -impl<'a, S, H, O> hash_db::AsHashDB for Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB + for Ephemeral<'a, S, H> { fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl<'a, S, H, O> Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, -{ - pub fn new(storage: &'a S, child_info: &'a ChildInfo, overlay: &'a mut O) -> Self { +impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { + pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { Ephemeral { storage, - child_info, overlay, - _ph: PhantomData, } } } -impl<'a, S, H> BackendStorageDBRef<'a, S, H> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, -{ - pub fn new(storage: &'a S, child_info: &'a ChildInfo) -> Self { - BackendStorageDBRef { - storage, - child_info, - _ph: PhantomData, - } - } -} - -impl<'a, S, H, O> hash_db::PlainDB for Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB + for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out) -> Option { - hash_db::PlainDBRef::get(self, key) + if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { + Some(val) + } else { + match self.storage.get(&key, EMPTY_PREFIX) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } + } } fn contains(&self, key: &H::Out) -> bool { - hash_db::PlainDBRef::contains(self, key) + hash_db::HashDB::get(self, key, EMPTY_PREFIX).is_some() } fn emplace(&mut self, key: H::Out, value: DBValue) { @@ -261,16 +341,21 @@ impl<'a, S, H, O> hash_db::PlainDB for Ephemeral<'a, S, H, O> w } } -impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDBRef + for Ephemeral<'a, S, H> { - fn get(&self, key: &H::Out) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { + fn get(&self, key: &H::Out) -> Option { hash_db::PlainDB::get(self, key) } + fn contains(&self, key: &H::Out) -> bool { hash_db::PlainDB::contains(self, key) } +} + +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB + for Ephemeral<'a, S, H> +{ + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -280,47 +365,8 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O } } - fn contains(&self, key: &H::Out) -> bool { - hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() - } -} - -impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, S, H> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, -{ - fn get(&self, key: &H::Out) -> Option { - if check_if_empty_root::(key.as_ref()) { - return Some(vec![0u8]); - } - - match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } - - fn contains(&self, key: &H::Out) -> bool { - hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() - } -} - - -impl<'a, S, H, O> hash_db::HashDB for Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, -{ - - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDBRef::get(self, key, prefix) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDBRef::contains(self, key, prefix) + hash_db::HashDB::get(self, key, prefix).is_some() } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { @@ -336,167 +382,57 @@ impl<'a, S, H, O> hash_db::HashDB for Ephemeral<'a, S, H, O> where } } -impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, - O: hash_db::HashDB + Default + Consolidate, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { - Some(val) - } else { - match self.storage.get(self.child_info, &key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDBRef::get(self, key, prefix).is_some() - } -} - -impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> where - S: 'a + TrieBackendStorageRef, - H: 'a + Hasher, +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef + for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if check_if_empty_root::(key.as_ref()) { - return Some(vec![0u8]); - } - - match self.storage.get(self.child_info, &key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } + hash_db::HashDB::get(self, key, prefix) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDBRef::get(self, key, prefix).is_some() + hash_db::HashDB::contains(self, key, prefix) } } /// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorageRef { +pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } -/// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync { } - -impl + Send + Sync> TrieBackendStorage for B {} - -impl TrieBackendStorageRef for Arc> { +// This implementation is used by normal storage trie clients. +impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - Storage::::get(self.deref(), child_info, key, prefix) - } -} - -impl> TrieBackendStorageRef for &S { - type Overlay = >::Overlay; - - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - >::get(self, child_info, key, prefix) + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + Storage::::get(self.deref(), key, prefix) } } // This implementation is used by test storage trie clients. -// TODO try to remove this implementation!!! (use a ChildrenMap variant) -impl TrieBackendStorageRef for PrefixedMemoryDB { +impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get( - &self, - _child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - // No need to use keyspace for in memory db, ignoring child_info parameter. + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { Ok(hash_db::HashDB::get(self, key, prefix)) } } -impl TrieBackendStorageRef for MemoryDB { +impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get( - &self, - _child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - // No need to use keyspace for in memory db, ignoring child_info parameter. - // TODO try to remove this implementation!!! + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { Ok(hash_db::HashDB::get(self, key, prefix)) } } -// TOOD EMCH try remove -impl TrieBackendStorageRef for ChildrenMap> { - type Overlay = MemoryDB; - - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - Ok(self.deref().get(child_info).and_then(|s| - hash_db::HashDB::get(s, key, prefix) - )) - } -} - -impl TrieBackendStorageRef for ChildrenProofMap> { - type Overlay = MemoryDB; - - fn get( - &self, - child_info: &ChildInfo, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { - let child_info_proof = child_info.proof_info(); - Ok(self.deref().get(&child_info_proof).and_then(|s| - hash_db::HashDB::get(s, key, prefix) - )) - } -} - #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - use crate::trie_backend::TrieBackend; - use crate::backend::Backend; #[test] fn next_storage_key_and_next_child_storage_key_work() { @@ -514,13 +450,22 @@ mod test { trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } + { + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); + // reuse of root_1 implicitly assert child trie root is same + // as top trie (contents must remain the same). + let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + trie.insert(b"3", &[1]).expect("insert failed"); + trie.insert(b"4", &[1]).expect("insert failed"); + trie.insert(b"6", &[1]).expect("insert failed"); + } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; - let essence_1 = TrieBackend::new(mdb, root_1); + let essence_1 = TrieBackendEssence::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -529,7 +474,7 @@ mod test { assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); let mdb = essence_1.into_storage(); - let essence_2 = TrieBackend::new(mdb, root_2); + let essence_2 = TrieBackendEssence::new(mdb, root_2); assert_eq!( essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index eff6ff590200a..24f7d9c3ec94f 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -21,11 +21,10 @@ use codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -#[cfg(feature = "std")] -use sp_std::collections::btree_map::BTreeMap; use sp_debug_derive::RuntimeDebug; use sp_std::vec::Vec; +use sp_std::collections::btree_map::BTreeMap; use sp_std::ops::{Deref, DerefMut}; use ref_cast::RefCast; @@ -93,7 +92,7 @@ pub struct StorageData( /// Map of data to use in a storage, it is a collection of /// byte key and values. #[cfg(feature = "std")] -pub type StorageMap = BTreeMap, Vec>; +pub type StorageMap = std::collections::BTreeMap, Vec>; #[cfg(feature = "std")] #[derive(Debug, PartialEq, Eq, Clone)] @@ -420,25 +419,12 @@ impl ChildTrieParentKeyId { } } } - #[cfg(feature = "std")] -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +#[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. /// A few utilities methods are defined. pub struct ChildrenMap(pub BTreeMap); -#[cfg(feature = "std")] -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -/// Type for storing a map of child trie proof related information. -/// A few utilities methods are defined. -pub struct ChildrenProofMap(pub BTreeMap); - -/// Type alias for storage of children related content. -pub type ChildrenVec = Vec<(ChildInfo, T)>; - -/// Type alias for storage of children related content. -pub type ChildrenSlice<'a, T> = &'a [(ChildInfo, T)]; - #[cfg(feature = "std")] impl sp_std::ops::Deref for ChildrenMap { type Target = BTreeMap; @@ -462,52 +448,6 @@ impl sp_std::default::Default for ChildrenMap { } } -#[cfg(feature = "std")] -impl ChildrenMap { - /// Extend for `ChildrenMap` is usually about merging entries, - /// this method extends two maps, by applying a merge function - /// on each of its entries. - pub fn extend_with( - &mut self, - other: impl Iterator, - merge: impl Fn(&mut T, T), - ) { - use sp_std::collections::btree_map::Entry; - for (child_info, child_content) in other { - match self.0.entry(child_info) { - Entry::Occupied(mut entry) => { - merge(entry.get_mut(), child_content) - }, - Entry::Vacant(entry) => { - entry.insert(child_content); - }, - } - } - } - - /// Extends two maps, by extending entries with the same key. - pub fn extend_replace( - &mut self, - other: impl Iterator, - ) { - self.0.extend(other) - } - - /// Retains only the elements specified by the predicate. - pub fn retain(&mut self, mut f: impl FnMut(&ChildInfo, &mut T) -> bool) { - let mut to_del = Vec::new(); - for (k, v) in self.0.iter_mut() { - if !f(k, v) { - // this clone can be avoid with unsafe code - to_del.push(k.clone()); - } - } - for k in to_del { - self.0.remove(&k); - } - } -} - #[cfg(feature = "std")] impl IntoIterator for ChildrenMap { type Item = (ChildInfo, T); @@ -518,6 +458,12 @@ impl IntoIterator for ChildrenMap { } } +#[cfg(feature = "std")] +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +/// Type for storing a map of child trie proof related information. +/// A few utilities methods are defined. +pub struct ChildrenProofMap(pub BTreeMap); + #[cfg(feature = "std")] impl sp_std::ops::Deref for ChildrenProofMap { type Target = BTreeMap; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 0c99b0013891b..6e23ec6f19ed7 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -27,7 +27,7 @@ mod trie_stream; use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use sp_core::{Hasher, InnerHasher}; +use hash_db::{Hasher, Prefix}; use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. @@ -49,16 +49,16 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; #[derive(Default)] /// substrate trie layout -pub struct Layout(PhantomData); +pub struct Layout(sp_std::marker::PhantomData); -impl TrieLayout for Layout { +impl TrieLayout for Layout { const USE_EXTENSION: bool = false; type Hash = H; type Codec = NodeCodec; } -impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where +impl TrieConfiguration for Layout { + fn trie_root(input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -82,8 +82,8 @@ impl TrieConfiguration for Layout { /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for key only. @@ -106,7 +106,7 @@ pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Querying interface, as in `trie_db` but less generic. pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. -pub type TrieHash = <::Hash as InnerHasher>::Out; +pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. @@ -213,28 +213,75 @@ pub fn read_trie_value_with< /// Determine the empty child trie root. pub fn empty_child_trie_root( -) -> ::Out { +) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } -/// Test if this is an empty root node. -pub fn check_if_empty_root ( - root: &[u8], -) -> bool { - H::EMPTY_ROOT == root +/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, +/// but a generic implementation may ignore this type parameter and use other hashers. +pub fn child_trie_root( + input: I, +) -> ::Out + where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, +{ + L::trie_root(input) +} + +/// Determine a child trie root given a hash DB and delta values. H is the default hasher, +/// but a generic implementation may ignore this type parameter and use other hashers. +pub fn child_delta_trie_root( + keyspace: &[u8], + db: &mut DB, + root_data: RD, + delta: I, +) -> Result<::Out, Box>> + where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + RD: AsRef<[u8]>, + DB: hash_db::HashDB + + hash_db::PlainDB, trie_db::DBValue>, +{ + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_data.as_ref()); + + { + let mut db = KeySpacedDBMut::new(&mut *db, keyspace); + let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; + + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, + }; + } + } + + Ok(root) } /// Call `f` for all keys in a child trie. -pub fn for_keys_in_trie( +pub fn for_keys_in_child_trie( + keyspace: &[u8], db: &DB, - root: &TrieHash, + root_slice: &[u8], mut f: F ) -> Result<(), Box>> where DB: hash_db::HashDBRef + hash_db::PlainDBRef, trie_db::DBValue>, { - let trie = TrieDB::::new(&*db, &root)?; + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_slice); + + let db = KeySpacedDB::new(&*db, keyspace); + let trie = TrieDB::::new(&db, &root)?; let iter = trie.iter()?; for x in iter { @@ -296,6 +343,141 @@ pub fn unpack_proof_to_memdb(input: &[Vec]) Ok((root.0, memory_db)) } +/// Read a value from the child trie. +pub fn read_child_trie_value( + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + key: &[u8] +) -> Result>, Box>> + where + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, +{ + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_slice); + + let db = KeySpacedDB::new(&*db, keyspace); + Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) +} + +/// Read a value from the child trie with given query. +pub fn read_child_trie_value_with, DB>( + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + key: &[u8], + query: Q +) -> Result>, Box>> + where + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, +{ + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_slice); + + let db = KeySpacedDB::new(&*db, keyspace); + Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) +} + +/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the +/// prefix of every key value. +pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); + +/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the +/// prefix of every key value. +/// +/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. +pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); + +/// Utility function used to merge some byte data (keyspace) and `prefix` data +/// before calling key value database primitives. +fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { + let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; + result[..ks.len()].copy_from_slice(ks); + result[ks.len()..].copy_from_slice(prefix.0); + (result, prefix.1) +} + +impl<'a, DB, H> KeySpacedDB<'a, DB, H> where + H: Hasher, +{ + /// instantiate new keyspaced db + pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { + KeySpacedDB(db, ks, PhantomData) + } +} + +impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where + H: Hasher, +{ + /// instantiate new keyspaced db + pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { + KeySpacedDBMut(db, ks, PhantomData) + } +} + +impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where + DB: hash_db::HashDBRef, + H: Hasher, + T: From<&'static [u8]>, +{ + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + } +} + +impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, + H: Hasher, + T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, +{ + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.insert((&derived_prefix.0, derived_prefix.1), value) + } + + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) + } + + fn remove(&mut self, key: &H::Out, prefix: Prefix) { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) + } +} + +impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, + H: Hasher, + T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, +{ + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + &mut *self + } +} + /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; @@ -310,8 +492,7 @@ mod tests { use super::*; use codec::{Encode, Compact}; use sp_core::Blake2Hasher; - use hash_db::HashDB; - use sp_core::InnerHasher; + use hash_db::{HashDB, Hasher}; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; @@ -495,7 +676,7 @@ mod tests { #[test] fn random_should_work() { - let mut seed = ::Out::zero(); + let mut seed = ::Out::zero(); for test_i in 0..10000 { if test_i % 50 == 0 { println!("{:?} of 10000 stress tests done", test_i); diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 254adc2fcb48a..f0aad5f432d13 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -25,6 +25,7 @@ use hash_db::{Hasher, HashDB}; /// The proof consists of the set of serialized nodes in the storage trie accessed when looking up /// the keys covered by the proof. Verifying the proof requires constructing the partial trie from /// the serialized nodes and performing the key lookups. +/// TODO EMCH fuse with proving backend one. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct StorageProof { trie_nodes: Vec>, From 38fa2c0ecb4f2c81f057f5c4610628ca2ff74a91 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 19:54:37 +0200 Subject: [PATCH 093/185] it was actually fine --- .../state-machine/src/changes_trie/mod.rs | 4 +- .../state-machine/src/changes_trie/storage.rs | 6 +- .../state-machine/src/proving_backend.rs | 11 ++-- primitives/state-machine/src/trie_backend.rs | 6 +- .../state-machine/src/trie_backend_essence.rs | 65 +++++++++++++++++-- 5 files changed, 73 insertions(+), 19 deletions(-) diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index ee6c6778e0aad..ddc6c93ff86e5 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,7 +71,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; -use sp_core::storage::PrefixedStorageKey; +use sp_core::storage::{PrefixedStorageKey, ChildInfo}; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -169,7 +169,7 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 81651dd2e719b..54c971c83f29d 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -18,7 +18,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; -use sp_core::storage::PrefixedStorageKey; +use sp_core::storage::{PrefixedStorageKey, ChildInfo}; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -189,7 +189,7 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + MemoryDB::::get(&self.data.read().mdb, &ChildInfo::top_trie(), key, prefix) } } @@ -206,7 +206,7 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index abc96afb93b90..241682c5a86e8 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -352,6 +352,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let eph = Ephemeral::new( self.backend.backend_storage(), &mut read_overlay, + None, ); let map_e = |e| format!("Trie lookup error: {}", e); @@ -379,6 +380,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let eph = Ephemeral::new( self.backend.backend_storage(), &mut read_overlay, + Some(child_info), ); let map_e = |e| format!("Trie lookup error: {}", e); @@ -398,6 +400,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let eph = Ephemeral::new( self.backend.backend_storage(), &mut read_overlay, + None, ); let mut iter = move || -> Result<(), Box>> { @@ -515,19 +518,17 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { match &self.proof_recorder { ProofRecorder::Flat(rec) => { if let Some(v) = rec.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(child_info, key, prefix)?; rec.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) }, ProofRecorder::Full(rec) => { - unimplemented!() -/* // TODO need flattening -> use another struct Proof Recordertrie backend. if let Some(v) = rec.read().get(child_info).and_then(|s| s.get(key)) { return Ok(v.clone()); } @@ -535,7 +536,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage rec.write().entry(child_info.clone()) .or_default() .insert(key.clone(), backend_value.clone()); - Ok(backend_value)*/ + Ok(backend_value) }, } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 957abf841d00a..6c1156d6ee360 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -161,7 +161,7 @@ impl, H: Hasher> Backend for TrieBackend where fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay, None); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -185,7 +185,7 @@ impl, H: Hasher> Backend for TrieBackend where fn keys(&self, prefix: &[u8]) -> Vec { let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay, None); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -213,6 +213,7 @@ impl, H: Hasher> Backend for TrieBackend where let mut eph = Ephemeral::new( self.essence.backend_storage(), &mut write_overlay, + None, ); match delta_trie_root::, _, _, _, _>(&mut eph, root, delta) { @@ -252,6 +253,7 @@ impl, H: Hasher> Backend for TrieBackend where let mut eph = Ephemeral::new( self.essence.backend_storage(), &mut write_overlay, + Some(child_info), ); match child_delta_trie_root::, _, _, _, _, _>( diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index fda51326cb246..49e7529d0396c 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -19,6 +19,7 @@ use std::ops::Deref; use std::sync::Arc; +use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, @@ -110,6 +111,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let eph = Ephemeral { storage: &self.storage, overlay: &mut read_overlay, + child_info, + _ph: PhantomData, }; let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; @@ -155,6 +158,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let eph = Ephemeral { storage: &self.storage, overlay: &mut read_overlay, + child_info: None, + _ph: PhantomData, }; let map_e = |e| format!("Trie lookup error: {}", e); @@ -175,6 +180,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let eph = Ephemeral { storage: &self.storage, overlay: &mut read_overlay, + child_info: Some(child_info), + _ph: PhantomData, }; let map_e = |e| format!("Trie lookup error: {}", e); @@ -201,6 +208,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let eph = Ephemeral { storage: &self.storage, overlay: &mut read_overlay, + child_info: Some(child_info), + _ph: PhantomData, }; if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( @@ -248,6 +257,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let eph = Ephemeral { storage: &self.storage, overlay: &mut read_overlay, + child_info, + _ph: PhantomData, }; let mut iter = move |db| -> Result<(), Box>> { @@ -284,6 +295,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { storage: &'a S, overlay: &'a mut S::Overlay, + child_info: Option<&'a ChildInfo>, + _ph: PhantomData, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB @@ -303,10 +316,16 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { + pub fn new( + storage: &'a S, + overlay: &'a mut S::Overlay, + child_info: Option<&'a ChildInfo>, + ) -> Self { Ephemeral { storage, overlay, + child_info, + _ph: PhantomData, } } } @@ -318,7 +337,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -355,7 +381,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - match self.storage.get(&key, prefix) { + let top; + let child_info = if let Some(child_info) = self.child_info { + child_info + } else { + top = ChildInfo::top_trie(); + &top + }; + match self.storage.get(child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -399,14 +432,14 @@ pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String>; } // This implementation is used by normal storage trie clients. impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { Storage::::get(self.deref(), key, prefix) } } @@ -415,7 +448,7 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -423,11 +456,29 @@ impl TrieBackendStorage for PrefixedMemoryDB { impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { Ok(hash_db::HashDB::get(self, key, prefix)) } } +impl TrieBackendStorage for ChildrenProofMap> { + type Overlay = MemoryDB; + + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + let child_info_proof = child_info.proof_info(); + Ok(self.deref().get(&child_info_proof).and_then(|s| + hash_db::HashDB::get(s, key, prefix) + )) + } +} + + + #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; From b7f7ee5296146205e01e102d35872982fc0ac6f3 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 16 Apr 2020 11:06:06 +0200 Subject: [PATCH 094/185] doc --- .../state-machine/src/proving_backend.rs | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 535d869474fa9..6f9e5e2738212 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -48,12 +48,10 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has #[repr(u32)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { - /// The proof can be build by multiple child trie only if - /// they are of the same kind, that way we can store all - /// encoded node in the same container. + /// The proof can be build by multiple child trie only when + /// their query can be done on a single memory backend, + /// all encoded node can be stored in the same container. Flatten, -/* /// Top trie proof only, in compact form. - TopTrieCompact,*/ /// Proofs split by child trie. Full, /// Compact form of proofs split by child trie. @@ -88,6 +86,14 @@ pub enum CompactScheme { /// calculated when reading the structue /// of the trie. TrieSkipHashes = 1, +/* /// Skip encoding of hashes and values, + /// we need to know them when when unpacking. + KnownQueryPlanAndValues = 2, + /// Skip encoding of hashes, this need knowing + /// the queried keys when unpacking, can be faster + /// than `TrieSkipHashes` but with similar packing + /// gain. + KnownQueryPlan = 3,*/ } type ProofNodes = Vec>; @@ -106,12 +112,14 @@ pub enum StorageProof { /// container, no child trie information is provided, this works only for proof accessing /// the same kind of child trie. Flatten(ProofNodes), -/* /// If proof only cover a single trie, we compact the proof by ommitting some content - /// that can be rebuild on construction. For patricia merkle trie it will be hashes that - /// are not necessary between node, with indexing of the missing hash based on orders - /// of nodes. - TopTrieCompact(ProofCompacted),*/ - /// Fully descriped proof, it includes the child trie individual descriptions. +/* TODO EMCH implement as it will be default for trie skip hashes /// Proof can address multiple child trie, but results in a single flatten + /// db backend. + FlattenCompact(Vec),*/ + /// Fully descriBed proof, it includes the child trie individual descriptions. + /// Currently Full variant are not of any use as we have only child trie that can use the same + /// memory db backend. + /// TODO EMCH consider removal: could be put back when needed, and probably + /// with a new StorageProof key that is the same for a flattenable kind. Full(ChildrenProofMap), /// Fully descriped proof, compact encoded. FullCompact(ChildrenProofMap), From 091ca80c3470f0cc84295ee2234e5887df8526e6 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 16 Apr 2020 11:34:30 +0200 Subject: [PATCH 095/185] pushing back full backend (will probably be removed with the Full variant later as they are not needed). --- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- .../state-machine/src/changes_trie/mod.rs | 2 +- primitives/state-machine/src/lib.rs | 6 +- .../state-machine/src/proving_backend.rs | 66 +++++++++++-------- primitives/state-machine/src/trie_backend.rs | 5 -- .../state-machine/src/trie_backend_essence.rs | 2 +- 6 files changed, 46 insertions(+), 37 deletions(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index f6498d084be62..d369d5aeb9074 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -280,7 +280,7 @@ fn generate_runtime_api_base_structures() -> Result { } fn record_proof(&mut self) { - // TODO should we use full and then use some packing + // TODO should we use full and then use some packing Most certainly. self.recorder = Some(#crate_::ProofRecorder::::Flat(Default::default())); } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index ddc6c93ff86e5..8b352e35a181b 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -169,7 +169,7 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5cf39c1a53f48..67f40bad11099 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -617,7 +617,7 @@ where /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, + trie_backend: &TrieBackend>, H>, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -851,7 +851,7 @@ where /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend>, H>, key: &[u8], ) -> Result>, Box> where @@ -877,7 +877,7 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, + proving_backend: &TrieBackend>, H>, child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 241682c5a86e8..dfc819219950d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -86,6 +86,14 @@ pub enum CompactScheme { /// calculated when reading the structue /// of the trie. TrieSkipHashes = 1, +/* /// Skip encoding of hashes and values, + /// we need to know them when when unpacking. + KnownQueryPlanAndValues = 2, + /// Skip encoding of hashes, this need knowing + /// the queried keys when unpacking, can be faster + /// than `TrieSkipHashes` but with similar packing + /// gain. + KnownQueryPlan = 3,*/ } type ProofNodes = Vec>; @@ -104,12 +112,14 @@ pub enum StorageProof { /// container, no child trie information is provided, this works only for proof accessing /// the same kind of child trie. Flatten(ProofNodes), -/* /// If proof only cover a single trie, we compact the proof by ommitting some content - /// that can be rebuild on construction. For patricia merkle trie it will be hashes that - /// are not necessary between node, with indexing of the missing hash based on orders - /// of nodes. - TopTrieCompact(ProofCompacted),*/ - /// Fully descriped proof, it includes the child trie individual descriptions. +/* TODO EMCH implement as it will be default for trie skip hashes /// Proof can address multiple child trie, but results in a single flatten + /// db backend. + FlattenCompact(Vec),*/ + /// Fully descriBed proof, it includes the child trie individual descriptions. + /// Currently Full variant are not of any use as we have only child trie that can use the same + /// memory db backend. + /// TODO EMCH consider removal: could be put back when needed, and probably + /// with a new StorageProof key that is the same for a flattenable kind. Full(ChildrenProofMap), /// Fully descriped proof, compact encoded. FullCompact(ChildrenProofMap), @@ -195,7 +205,7 @@ impl StorageProof { where H::Out: Codec, { let map_e = |e| format!("Trie pack error: {}", e); - + if let StorageProof::Full(children) = self { let mut result = ChildrenProofMap::default(); for (child_info, proof) in children { @@ -672,14 +682,17 @@ where pub fn create_proof_check_backend( root: H::Out, proof: StorageProof, -) -> Result, H>, Box> +) -> Result>, H>, Box> where H: Hasher, H::Out: Codec, { + use std::ops::Deref; let db = create_proof_check_backend_storage(proof) .map_err(|e| Box::new(e) as Box)?; - if db.contains(&root, EMPTY_PREFIX) { + if db.deref().get(&ChildInfoProof::top_trie()) + .map(|db| db.contains(&root, EMPTY_PREFIX)) + .unwrap_or(false) { Ok(TrieBackend::new_with_roots(db, root)) } else { Err(Box::new(ExecutionError::InvalidProof)) @@ -690,47 +703,48 @@ where /// Currently child trie are all with same backend /// implementation, therefore using /// `create_flat_proof_check_backend_storage` is prefered. -/// TODO consider removing this `ChildrenMap>` -/// for now (still we do not merge unpack, that can be good -/// somehow). +/// TODO flat proof check is enough for now, do we want to +/// maintain the full variant? pub fn create_proof_check_backend_storage( proof: StorageProof, -) -> Result, String> +) -> Result>, String> where H: Hasher, { let map_e = |e| format!("Trie unpack error: {}", e); - let mut db = MemoryDB::default(); + let mut result = ChildrenProofMap::default(); match proof { s@StorageProof::Flatten(..) => { + let mut db = MemoryDB::default(); for item in s.iter_nodes_flatten() { db.insert(EMPTY_PREFIX, &item); } + result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::Full(children) => { - for (_child_info, proof) in children.into_iter() { + for (child_info, proof) in children.into_iter() { + let mut db = MemoryDB::default(); for item in proof.into_iter() { db.insert(EMPTY_PREFIX, &item); } + result.insert(child_info, db); } }, - // TODO EMCH it is rather interesting to notice that child_info in proof - // does not look useful. StorageProof::FullCompact(children) => { - for (_child_info, (compact_scheme, proof)) in children.into_iter() { + for (child_info, (compact_scheme, proof)) in children.into_iter() { match compact_scheme { CompactScheme::TrieSkipHashes => { - for item in sp_trie::unpack_proof::>(proof.as_slice()) - .map_err(map_e)? - .1.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, db) = sp_trie::unpack_proof_to_memdb::>(proof.as_slice()) + .map_err(map_e)?; + result.insert(child_info, db); }, } } }, } - Ok(db) + Ok(result) } /// Create in-memory storage of proof check backend. @@ -925,7 +939,7 @@ mod tests { in_memory_root.into(), proof ).unwrap(); - + assert_eq!( proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), vec![64] @@ -935,7 +949,7 @@ mod tests { in_memory_root.into(), proof ).unwrap(); - + assert_eq!( proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), vec![64] diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 6c1156d6ee360..00e6e0ab3f39f 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -32,9 +32,6 @@ use parking_lot::RwLock; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, - // storing child_info of top trie even if it is in - // theory a bit useless (no heap alloc on empty vec). - top_trie: ChildInfo, /// If defined, we store encoded visited roots for top_trie and child trie in this /// map. It also act as a cache. register_roots: Option>>>>, @@ -46,7 +43,6 @@ impl, H: Hasher> TrieBackend where H::Out: Codec pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), - top_trie: ChildInfo::top_trie(), register_roots: None, } } @@ -56,7 +52,6 @@ impl, H: Hasher> TrieBackend where H::Out: Codec pub fn new_with_roots(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), - top_trie: ChildInfo::top_trie(), register_roots: Some(Arc::new(RwLock::new(Default::default()))), } } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 49e7529d0396c..f58fe9c012404 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -27,7 +27,7 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenProofMap}; +use sp_core::storage::{ChildInfo, ChildrenProofMap}; use codec::Encode; /// Patricia trie-based storage trait. From e10387a6323fbf06ae44f491870c7c63df4d93bc Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 16 Apr 2020 12:06:11 +0200 Subject: [PATCH 096/185] Register full in runtime, otherwhise packing would not be possible, note that we extract a full proof, so we still need it but probably not all the helpers function. --- primitives/api/proc-macro/src/impl_runtime_apis.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index d369d5aeb9074..0e6ac213bd1ee 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -280,12 +280,9 @@ fn generate_runtime_api_base_structures() -> Result { } fn record_proof(&mut self) { - // TODO should we use full and then use some packing Most certainly. - self.recorder = Some(#crate_::ProofRecorder::::Flat(Default::default())); + self.recorder = Some(#crate_::ProofRecorder::::Full(Default::default())); } - // TODO should we make a storage kind configurable then - // we could pack full proof if needed fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() From 557e6efe3df20878b3a66dd4bc8d88d40e82426d Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 16 Apr 2020 14:57:41 +0200 Subject: [PATCH 097/185] passing around a target storage kind --- bin/node/cli/src/service.rs | 4 +- client/api/src/call_executor.rs | 3 +- client/block-builder/src/lib.rs | 6 +- client/src/call_executor.rs | 4 +- client/src/light/call_executor.rs | 6 +- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 18 +++-- .../proc-macro/src/mock_impl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 4 +- primitives/consensus/common/src/lib.rs | 35 +++++++-- primitives/state-machine/src/lib.rs | 6 +- .../state-machine/src/proving_backend.rs | 72 +++++++++++++------ 12 files changed, 110 insertions(+), 52 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 757022655dd83..b87d2140b75aa 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -371,7 +371,7 @@ mod tests { use sc_consensus_epochs::descendent_query; use sp_consensus::{ Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - RecordProof, + RecordProof, StorageProofKind, }; use node_primitives::{Block, DigestItem, Signature}; use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; @@ -565,7 +565,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes, + RecordProof::Yes(StorageProofKind::Flatten), ).await }).expect("Error making test block").block; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 3afd29be8d49c..f3d54986c45ba 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -23,6 +23,7 @@ use sp_runtime::{ }; use sp_state_machine::{ OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, + StorageProofKind, }; use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; @@ -90,7 +91,7 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: &Option>, + proof_recorder: &Option<(ProofRecorder, StorageProofKind)>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 480a759e30d70..63b695efe2efa 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -128,8 +128,8 @@ where let mut api = api.runtime_api(); - if record_proof.yes() { - api.record_proof(); + if let Some(kind) = record_proof.kind() { + api.record_proof(kind); } let block_id = BlockId::Hash(parent_hash); @@ -230,7 +230,7 @@ mod tests { &client, client.info().best_hash, client.info().best_number, - RecordProof::Yes, + RecordProof::Yes(sp_api::StorageProofKind::Flatten), Default::default(), &*backend, ).unwrap().build().unwrap(); diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index f795b686ee1d0..94b257f3596ed 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -126,7 +126,7 @@ where initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, - recorder: &Option>, + recorder: &Option<(ProofRecorder, StorageProofKind)>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { match initialize_block { @@ -144,7 +144,7 @@ where let mut state = self.backend.state_at(*at)?; match recorder { - Some(recorder) => { + Some((recorder, _target_proof_kind)) => { let trie_state = state.as_trie_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 61aeabfc4d016..a7ef8156f1816 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -29,7 +29,7 @@ use sp_externalities::Extensions; use sp_state_machine::{ self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, - merge_storage_proofs, + merge_storage_proofs, StorageProofKind, }; use hash_db::Hasher; @@ -113,7 +113,7 @@ impl CallExecutor for initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, - recorder: &Option>, + recorder: &Option<(ProofRecorder, StorageProofKind)>, extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { // there's no actual way/need to specify native/wasm execution strategy on light node @@ -348,7 +348,7 @@ mod tests { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: &Option>, + _proof_recorder: &Option<(ProofRecorder, StorageProofKind)>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index ef50bd840a7cc..d310a6b992f7c 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -413,7 +413,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, - recorder: &Option<#crate_::ProofRecorder>, + recorder: &Option<(#crate_::ProofRecorder, #crate_::StorageProofKind)>, ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { let version = call_runtime_at.runtime_version_at(at)?; use #crate_::InitializeBlock; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 0e6ac213bd1ee..6236767f8c71f 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -210,7 +210,7 @@ fn generate_runtime_api_base_structures() -> Result { storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, - recorder: Option<#crate_::ProofRecorder>, + recorder: Option<(#crate_::ProofRecorder, #crate_::StorageProofKind)>, } // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a @@ -279,15 +279,19 @@ fn generate_runtime_api_base_structures() -> Result { self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) } - fn record_proof(&mut self) { - self.recorder = Some(#crate_::ProofRecorder::::Full(Default::default())); + fn record_proof(&mut self, kind: #crate_::StorageProofKind) { + if kind.need_register_full() { + self.recorder = Some((#crate_::ProofRecorder::::Full(Default::default()), kind)); + } else { + self.recorder = Some((#crate_::ProofRecorder::::Flat(Default::default()), kind)); + } } fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .and_then(|recorder| { - recorder.extract_proof().ok() + .and_then(|(recorder, kind)| { + recorder.extract_proof(&kind).ok() }) } @@ -331,7 +335,7 @@ fn generate_runtime_api_base_structures() -> Result { commit_on_success: true.into(), initialized_block: None.into(), changes: Default::default(), - recorder: Default::default(), + recorder: None, storage_transaction_cache: Default::default(), }.into() } @@ -351,7 +355,7 @@ fn generate_runtime_api_base_structures() -> Result { &std::cell::RefCell<#crate_::OverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, &std::cell::RefCell>>, - &Option<#crate_::ProofRecorder>, + &Option<(#crate_::ProofRecorder, #crate_::StorageProofKind)>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, >( diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 0767c804a637e..a90e6e1812bb5 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -94,7 +94,7 @@ fn implement_common_api_traits( Ok(pred(A::VERSION)) } - fn record_proof(&mut self) { + fn record_proof(&mut self, _kind: #crate_::StorageProofKind) { unimplemented!("`record_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index cca2c3f8de0fd..b3dc97d716189 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -371,7 +371,7 @@ pub trait ApiExt: ApiErrorExt { ) -> Result where Self: Sized; /// Start recording all accessed trie nodes for generating proofs. - fn record_proof(&mut self); + fn record_proof(&mut self, kind: StorageProofKind); /// Extract the recorded proof. /// @@ -438,7 +438,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>, + pub recorder: &'a Option<(ProofRecorder, StorageProofKind)>, } /// Something that can call into the an api at a given block. diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 09dc031dc9ba0..0d2252e93dc31 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -36,7 +36,8 @@ use sp_runtime::{ }; use futures::prelude::*; pub use sp_inherents::InherentData; - +use sp_state_machine::StorageProof; +pub use sp_state_machine::StorageProofKind; pub mod block_validation; pub mod offline_tracker; pub mod error; @@ -91,7 +92,7 @@ pub struct Proposal { /// The block that was build. pub block: Block, /// Optional proof that was recorded while building the block. - pub proof: Option, + pub proof: Option, /// The storage changes while building this block. pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, } @@ -104,7 +105,7 @@ pub struct Proposal { #[derive(Copy, Clone, PartialEq)] pub enum RecordProof { /// `Yes`, record a proof. - Yes, + Yes(StorageProofKind), /// `No`, don't record any proof. No, } @@ -113,16 +114,40 @@ impl RecordProof { /// Returns if `Self` == `Yes`. pub fn yes(&self) -> bool { match self { - Self::Yes => true, + Self::Yes(_) => true, Self::No => false, } } + + /// Returns storage proof kind. + pub fn kind(self) -> Option { + match self { + Self::Yes(kind) => Some(kind), + Self::No => None, + } + } +} + +impl From for RecordProof { + fn from(val: StorageProofKind) -> Self { + Self::Yes(val) + } +} + +impl From> for RecordProof { + fn from(val: Option) -> Self { + match val { + Some(kind) => Self::Yes(kind), + None => Self::No, + } + } } impl From for RecordProof { fn from(val: bool) -> Self { if val { - Self::Yes + // default to a flatten proof. + Self::Yes(StorageProofKind::Flatten) } else { Self::No } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 67f40bad11099..c2c2be6558f02 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -525,7 +525,7 @@ where always_wasm(), None, )?; - let mut proof = sm.backend.extract_proof() + let mut proof = sm.backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; if kind.is_compact() { let roots = trie_backend.extract_registered_roots(); @@ -707,7 +707,7 @@ where .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - let mut proof = proving_backend.extract_proof() + let mut proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; if kind.is_compact() { let roots = trie_backend.extract_registered_roots(); @@ -739,7 +739,7 @@ where .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - let mut proof = proving_backend.extract_proof() + let mut proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; if kind.is_compact() { let roots = trie_backend.extract_registered_roots(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index aeef938ec8c03..7c003a5df2b43 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -43,16 +43,17 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has /// Different kind of proof representation are allowed. /// This definition is used as input parameter when producing /// a storage proof. -#[repr(u32)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { /// The proof can be build by multiple child trie only when /// their query can be done on a single memory backend, /// all encoded node can be stored in the same container. Flatten, + // FlattenCompact(CompactScheme), /// Proofs split by child trie. Full, /// Compact form of proofs split by child trie. + /// TODO indicate compact scheme to use (BtreeMap?) FullCompact, } @@ -62,7 +63,7 @@ impl StorageProofKind { pub fn is_flatten(&self) -> bool { match self { StorageProofKind::Flatten => true, - StorageProofKind::Full | StorageProofKind::FullCompact => false + StorageProofKind::Full | StorageProofKind::FullCompact => false, } } @@ -71,7 +72,17 @@ impl StorageProofKind { pub fn is_compact(&self) -> bool { match self { StorageProofKind::FullCompact => true, - StorageProofKind::Full | StorageProofKind::Flatten => false + StorageProofKind::Full | StorageProofKind::Flatten => false, + } + } + + /// Indicate if we need all child trie information + /// to get register for producing the proof. + pub fn need_register_full(&self) -> bool { + match self { + StorageProofKind::Flatten => false, + // StorageProofKind::FlattenCompact => true, + StorageProofKind::Full | StorageProofKind::FullCompact => true, } } } @@ -198,6 +209,10 @@ impl StorageProof { } } + pub fn kind(&self) -> StorageProofKind { + unimplemented!() + } + /// This packs `Full` to `FullCompact`, using needed roots. pub fn pack(self, roots: &ChildrenProofMap>) -> Result where H::Out: Codec, @@ -489,14 +504,18 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> Result { - self.0.essence().backend_storage().proof_recorder.extract_proof() + pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { + self.0.essence().backend_storage().proof_recorder.extract_proof(kind) } } impl ProofRecorder { /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> Result { + pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { + // TODO EMCH run the kind correctly + // flat -> can compress with query plan only and stay flat + // full -> can flatte + // -> can compress (compress with query plan is better after being flattened) Ok(match self { ProofRecorder::Flat(rec) => { let trie_nodes = rec @@ -804,19 +823,25 @@ mod tests { #[test] fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); - assert!(test_proving(&trie_backend, true).extract_proof().unwrap().is_empty()); - assert!(test_proving(&trie_backend, false).extract_proof().unwrap().is_empty()); + let kind = StorageProofKind::Flatten; + assert!(test_proving(&trie_backend, kind.is_flatten()).extract_proof(&kind).unwrap().is_empty()); + let kind = StorageProofKind::Full; + assert!(test_proving(&trie_backend, kind.is_flatten()).extract_proof(&kind).unwrap().is_empty()); + let kind = StorageProofKind::FullCompact; + assert!(test_proving(&trie_backend, kind.is_flatten()).extract_proof(&kind).unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { let trie_backend = test_trie(); - let backend = test_proving(&trie_backend, true); + let kind = StorageProofKind::Flatten; + let backend = test_proving(&trie_backend, kind.is_flatten()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().unwrap().is_empty()); - let backend = test_proving(&trie_backend, false); + assert!(!backend.extract_proof(&kind).unwrap().is_empty()); + let kind = StorageProofKind::Full; + let backend = test_proving(&trie_backend, kind.is_flatten()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().unwrap().is_empty()); + assert!(!backend.extract_proof(&kind).unwrap().is_empty()); } #[test] @@ -859,17 +884,18 @@ mod tests { assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let test = |flat| { - let proving = ProvingBackend::new(trie, flat); + let test = |kind: StorageProofKind| { + let proving = ProvingBackend::new(trie, kind.is_flatten()); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof().unwrap(); + let proof = proving.extract_proof(&kind).unwrap(); let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); }; - test(true); - test(false); + test(StorageProofKind::Flatten); + test(StorageProofKind::Full); + test(StorageProofKind::FullCompact); } #[test] @@ -912,11 +938,12 @@ mod tests { vec![i] )); - let test = |flat| { + let test = |kind: StorageProofKind| { + let flat = kind.is_flatten(); let proving = ProvingBackend::new(trie, flat); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof().unwrap(); + let proof = proving.extract_proof(&kind).unwrap(); let proof_check = create_proof_check_backend::( in_memory_root.into(), @@ -931,7 +958,7 @@ mod tests { let proving = ProvingBackend::new(trie, flat); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - let proof = proving.extract_proof().unwrap(); + let proof = proving.extract_proof(&kind).unwrap(); if flat { let proof_check = create_flat_proof_check_backend::( in_memory_root.into(), @@ -954,7 +981,8 @@ mod tests { ); } }; - test(true); - test(false); + test(StorageProofKind::Flatten); + test(StorageProofKind::Full); + test(StorageProofKind::FullCompact); } } From f687fc227e59c2b40eba344731371df18a272ef8 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 16 Apr 2020 18:15:48 +0200 Subject: [PATCH 098/185] fix compact proof failure. --- .../api/proc-macro/src/impl_runtime_apis.rs | 4 +- .../state-machine/src/changes_trie/build.rs | 6 +- .../src/changes_trie/changes_iterator.rs | 10 +-- .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/prune.rs | 3 +- primitives/state-machine/src/lib.rs | 9 +-- .../state-machine/src/overlayed_changes.rs | 12 ++- .../state-machine/src/proving_backend.rs | 47 +++++++++--- primitives/state-machine/src/trie_backend.rs | 21 +++--- .../state-machine/src/trie_backend_essence.rs | 73 ++++++++++++++----- 10 files changed, 123 insertions(+), 64 deletions(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 6236767f8c71f..bc9813bf2094b 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -291,7 +291,9 @@ fn generate_runtime_api_base_structures() -> Result { self.recorder .take() .and_then(|(recorder, kind)| { - recorder.extract_proof(&kind).ok() + // TODO EMCH this will fail for compact as we need the register + // root + recorder.extract_proof(&kind, None).ok() }) } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 45535204e0884..97993bd231705 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -52,7 +52,7 @@ pub(crate) fn prepare_input<'a, B, H, Number>( where B: Backend, H: Hasher + 'a, - H::Out: Encode, + H::Out: Decode + Encode, Number: BlockNumber, { let number = parent.number.clone() + One::one(); @@ -212,7 +212,7 @@ fn prepare_digest_input<'a, H, Number>( ), String> where H: Hasher, - H::Out: 'a + Encode, + H::Out: 'a + Decode + Encode, Number: BlockNumber, { let build_skewed_digest = config.end.as_ref() == Some(&block); @@ -285,6 +285,7 @@ fn prepare_digest_input<'a, H, Number>( let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, + None, ); trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| @@ -317,6 +318,7 @@ fn prepare_digest_input<'a, H, Number>( let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, + None, ); trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index f5a936069ba40..62121185866a9 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -130,7 +130,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( max: Number, storage_key: Option<&PrefixedStorageKey>, key: &[u8] -) -> Result, String> where H::Out: Encode { +) -> Result, String> where H::Out: Decode + Encode { key_changes_proof_check_with_db( config, roots_storage, @@ -153,7 +153,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( max: Number, storage_key: Option<&PrefixedStorageKey>, key: &[u8] -) -> Result, String> where H::Out: Encode { +) -> Result, String> where H::Out: Decode + Encode { // we can't query any roots before root let max = ::std::cmp::min(max.clone(), end.number.clone()); @@ -318,13 +318,13 @@ pub struct DrilldownIterator<'a, H, Number> } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> - where H::Out: Encode + where H::Out: Decode + Encode { type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root, None).storage(key)) } } @@ -368,7 +368,7 @@ impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> .expect("only fails when already borrowed; storage() is non-reentrant; qed"); self.essence.next(|storage, root, key| ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root, None), proof_recorder, }.storage(key)) } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 8b352e35a181b..09453d00eef56 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -230,7 +230,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( panic_on_storage_error: bool, ) -> Result, H::Out, CacheAction)>, ()> where - H::Out: Ord + 'static + Encode, + H::Out: Ord + 'static + Decode + Encode, { /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. fn maybe_panic( diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 05555df305b7c..836315b3694ba 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -62,6 +62,7 @@ pub fn prune( let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), root, + None, ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); @@ -97,7 +98,7 @@ fn prune_trie( let mut proof_recorder: Recorder = Default::default(); { let mut trie = ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root, None), proof_recorder: &mut proof_recorder, }; trie.record_all_keys(); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c2c2be6558f02..f0851db7f789a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -525,15 +525,8 @@ where always_wasm(), None, )?; - let mut proof = sm.backend.extract_proof(&kind) + let proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; - if kind.is_compact() { - let roots = trie_backend.extract_registered_roots(); - if let Some(roots) = roots { - proof = proof.pack::(&roots) - .map_err(|e| Box::new(e) as Box)?; - } - } Ok((result.into_encoded(), proof)) } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index f57d13ee3ffec..d2085c3577c5a 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -467,7 +467,10 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { + ) -> Result, String> + where + H::Out: Ord + Decode + Encode + 'static + { self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) } @@ -478,7 +481,10 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { + ) -> Result, String> + where + H::Out: Ord + Decode + Encode + 'static + { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { self.storage_root(backend, &mut cache); @@ -592,7 +598,7 @@ impl OverlayedChanges { parent_hash: H::Out, panic_on_storage_error: bool, cache: &mut StorageTransactionCache, - ) -> Result, ()> where H::Out: Ord + Encode + 'static { + ) -> Result, ()> where H::Out: Ord + Decode + Encode + 'static { build_changes_trie::<_, H, N>( backend, changes_trie_state, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 7c003a5df2b43..6ee5fc7f4d2ed 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -209,10 +209,6 @@ impl StorageProof { } } - pub fn kind(&self) -> StorageProofKind { - unimplemented!() - } - /// This packs `Full` to `FullCompact`, using needed roots. pub fn pack(self, roots: &ChildrenProofMap>) -> Result where H::Out: Codec, @@ -241,8 +237,6 @@ impl StorageProof { /// This flatten `Full` to `Flatten`. /// Note that if for some reason child proof were not /// attached to the top trie, they will be lost. - /// Generally usage of Flatten kind or this function - /// when using child trie is not recommended. pub fn flatten(self) -> Self { if let StorageProof::Full(children) = self { let mut result = Vec::new(); @@ -500,18 +494,34 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; + // TODO registering root can be disabled in most case: + // would simply need target proof as parameter (same thing for new + // function). ProvingBackend(TrieBackend::new_with_roots(recorder, root)) } /// Extracting the gathered unordered proof. pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { - self.0.essence().backend_storage().proof_recorder.extract_proof(kind) + // TODO we actually check a given type of compaction. + let roots = if kind.is_compact() { + self.0.extract_registered_roots() + } else { + None + }; + self.0.essence().backend_storage().proof_recorder.extract_proof(kind, roots) } } -impl ProofRecorder { +impl ProofRecorder + where + H::Out: Codec, +{ /// Extracting the gathered unordered proof. - pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { + pub fn extract_proof( + &self, + kind: &StorageProofKind, + registered_roots: Option>>, + ) -> Result { // TODO EMCH run the kind correctly // flat -> can compress with query plan only and stay flat // full -> can flatte @@ -523,7 +533,11 @@ impl ProofRecorder { .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) .collect(); - StorageProof::Flatten(trie_nodes) + match kind { + StorageProofKind::Flatten => StorageProof::Flatten(trie_nodes), +// TODO flatten compact for a given set of keys work StorageProofKind::FlattenCompact => StorageProof::Flatten(trie_nodes), + _ => return Err("Invalid proof kind for a flat proof record".to_string()), + } }, ProofRecorder::Full(rec) => { let mut children = ChildrenProofMap::default(); @@ -534,7 +548,18 @@ impl ProofRecorder { .collect(); children.insert(child_info.proof_info(), trie_nodes); } - StorageProof::Full(children) + let unpacked_full = StorageProof::Full(children); + match kind { + StorageProofKind::Flatten => unpacked_full.flatten(), + StorageProofKind::Full => unpacked_full, + StorageProofKind::FullCompact => { + if let Some(roots) = registered_roots { + unpacked_full.pack::(&roots)? + } else { + return Err("Cannot compact without roots".to_string()); + } + }, + } }, }) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 00e6e0ab3f39f..eeccb68743d18 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,7 +20,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap, ChildrenProofMap}; +use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenProofMap}; use codec::{Codec, Decode, Encode}; use crate::{ StorageKey, StorageValue, Backend, @@ -32,9 +32,6 @@ use parking_lot::RwLock; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, - /// If defined, we store encoded visited roots for top_trie and child trie in this - /// map. It also act as a cache. - register_roots: Option>>>>, } impl, H: Hasher> TrieBackend where H::Out: Codec { @@ -42,28 +39,29 @@ impl, H: Hasher> TrieBackend where H::Out: Codec /// TODO check if still used pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { - essence: TrieBackendEssence::new(storage, root), - register_roots: None, + essence: TrieBackendEssence::new(storage, root, None), } } /// Activate storage of roots (can be use /// to pack proofs and does small caching of child trie root)). pub fn new_with_roots(storage: S, root: H::Out) -> Self { + let register_roots = Some(Arc::new(RwLock::new(Default::default()))); TrieBackend { - essence: TrieBackendEssence::new(storage, root), - register_roots: Some(Arc::new(RwLock::new(Default::default()))), + essence: TrieBackendEssence::new(storage, root, register_roots), } } /// Get registered roots pub fn extract_registered_roots(&self) -> Option>> { - if let Some(register_roots) = self.register_roots.as_ref() { + if let Some(register_roots) = self.essence.register_roots.as_ref() { let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); let read_lock = register_roots.read(); for (child_info, root) in read_lock.iter() { - dest.insert(child_info.proof_info(), root.encode()); + if let Some(root) = root { + dest.insert(child_info.proof_info(), root.encode()); + } } Some(dest) } else { @@ -234,8 +232,7 @@ impl, H: Hasher> Backend for TrieBackend where }; let mut write_overlay = S::Overlay::default(); - let prefixed_storage_key = child_info.prefixed_storage_key(); - let mut root = match self.storage(prefixed_storage_key.as_slice()) { + let mut root = match self.essence.child_root_encoded(child_info) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index f58fe9c012404..5cf34af3cc60f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -27,8 +27,9 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::{ChildInfo, ChildrenProofMap}; -use codec::Encode; +use sp_core::storage::{ChildInfo, ChildrenProofMap, ChildrenMap}; +use codec::{Decode, Encode}; +use parking_lot::RwLock; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { @@ -40,14 +41,23 @@ pub trait Storage: Send + Sync { pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, + /// If defined, we store encoded visited roots for top_trie and child trie in this + /// map. It also act as a cache. + /// TODO EMCH switch to register encoded value (this assumes same hash out between child) + pub register_roots: Option>>>>, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence where H::Out: Decode + Encode { /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { + pub fn new( + storage: S, + root: H::Out, + register_roots: Option>>>>, + ) -> Self { TrieBackendEssence { storage, root, + register_roots, } } @@ -73,8 +83,39 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { - self.storage(child_info.prefixed_storage_key().as_slice()) + pub(crate) fn child_root_encoded(&self, child_info: &ChildInfo) -> Result, String> { + if let Some(cache) = self.register_roots.as_ref() { + if let Some(result) = cache.read().get(child_info) { + return Ok(result.map(|root| root.encode())); + } + } + + let root: Option = self.storage(&child_info.prefixed_storage_key()[..])?; + + if let Some(cache) = self.register_roots.as_ref() { + let root = root.as_ref().and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + cache.write().insert(child_info.clone(), root); + } + + Ok(root) + } + + /// Access the root of the child storage in its parent trie + fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + if let Some(cache) = self.register_roots.as_ref() { + if let Some(result) = cache.read().get(child_info) { + return Ok(result.clone()); + } + } + + let root: Option = self.storage(&child_info.prefixed_storage_key()[..])? + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + + if let Some(cache) = self.register_roots.as_ref() { + cache.write().insert(child_info.clone(), root); + } + + Ok(root) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to @@ -84,19 +125,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: &ChildInfo, key: &[u8], ) -> Result, String> { - let child_root = match self.child_root(child_info)? { + let hash = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), }; - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - self.next_storage_key_from_root(&hash, Some(child_info), key) } @@ -173,7 +206,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: &ChildInfo, key: &[u8], ) -> Result, String> { - let root = self.child_root(child_info)? + let root = self.child_root_encoded(child_info)? .unwrap_or(empty_child_trie_root::>().encode()); let mut read_overlay = S::Overlay::default(); @@ -196,7 +229,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: &ChildInfo, f: F, ) { - let root = match self.child_root(child_info) { + let root = match self.child_root_encoded(child_info) { Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); @@ -229,7 +262,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: prefix: &[u8], mut f: F, ) { - let root_vec = match self.child_root(child_info) { + let root_vec = match self.child_root_encoded(child_info) { Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); @@ -516,7 +549,7 @@ mod test { .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackendEssence::new(mdb, root_1, None); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -525,7 +558,7 @@ mod test { assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let essence_2 = TrieBackendEssence::new(mdb, root_2, None); assert_eq!( essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) From a83f8e6347ca73db837589bffe6aabe7dbdf56f1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 16 Apr 2020 20:52:50 +0200 Subject: [PATCH 099/185] moving storage proof to trie as in master --- Cargo.lock | 1 + client/src/client.rs | 8 +- client/src/light/call_executor.rs | 5 +- client/src/light/fetcher.rs | 6 +- primitives/state-machine/src/lib.rs | 17 +- .../state-machine/src/proving_backend.rs | 431 +---------------- primitives/state-machine/src/trie_backend.rs | 5 +- .../state-machine/src/trie_backend_essence.rs | 4 +- primitives/storage/src/lib.rs | 40 +- primitives/trie/Cargo.toml | 2 + primitives/trie/src/lib.rs | 9 +- primitives/trie/src/storage_proof.rs | 433 ++++++++++++++++-- 12 files changed, 442 insertions(+), 519 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 34ea0ebd3c748..5c797abdc33ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7735,6 +7735,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-std", + "sp-storage", "trie-bench", "trie-db", "trie-root", diff --git a/client/src/client.rs b/client/src/client.rs index 3aad8700015e7..76394072d608a 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -41,7 +41,7 @@ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, StorageProof, - StorageProofKind, merge_storage_proofs, + StorageProofKind, }; use sc_executor::{RuntimeVersion, RuntimeInfo}; use sp_consensus::{ @@ -474,7 +474,8 @@ impl Client where Ok(()) }, ())?; - Ok(merge_storage_proofs::, _>(proofs)?) + Ok(StorageProof::merge::, _>(proofs) + .map_err(|e| format!("{}", e))?) } /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). @@ -1136,7 +1137,8 @@ impl ProofProvider for Client where call_data, ).and_then(|(r, p)| { // TODO EMCH using flatten?? - Ok((r, merge_storage_proofs::, _>(vec![p, code_proof])?)) + Ok((r, StorageProof::merge::, _>(vec![p, code_proof]) + .map_err(|e| format!("{}", e))?)) }) } diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index a7ef8156f1816..884153598b85d 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -29,7 +29,7 @@ use sp_externalities::Extensions; use sp_state_machine::{ self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, - merge_storage_proofs, StorageProofKind, + StorageProofKind, }; use hash_db::Hasher; @@ -206,7 +206,8 @@ pub fn prove_execution( method, call_data, )?; - let total_proof = merge_storage_proofs::, _>(vec![init_proof, exec_proof])?; + let total_proof = StorageProof::merge::, _>(vec![init_proof, exec_proof]) + .map_err(|e| format!("{}", e))?; Ok((result, total_proof)) } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 5fa88b3b46ded..f0a46610dd94b 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -31,8 +31,9 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - create_flat_proof_check_backend_storage, read_child_proof_check, CloneableSpawn, + read_child_proof_check, CloneableSpawn, }; +use sp_trie::create_flat_proof_check_backend_storage; pub use sp_state_machine::StorageProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -157,7 +158,8 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = create_flat_proof_check_backend_storage(remote_roots_proof)?; + let storage = create_flat_proof_check_backend_storage(remote_roots_proof) + .map_err(|e| format!("{}", e))?; // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index f0851db7f789a..2203e2f42d107 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,7 +23,7 @@ use log::{warn, trace}; use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use sp_core::{ - storage::{ChildInfo, ChildrenProofMap}, NativeOrEncoded, NeverNativeValue, + storage::ChildInfo, NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, CallInWasmExt, RuntimeCode}, hexdisplay::HexDisplay, }; use overlayed_changes::OverlayedChangeSet; @@ -42,7 +42,8 @@ mod trie_backend; mod trie_backend_essence; mod stats; -pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB}; +pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, + StorageProof, StorageProofKind, ChildrenProofMap}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; @@ -66,12 +67,8 @@ pub use overlayed_changes::{ OverlayedChanges, StorageChanges, StorageTransactionCache, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; -pub use proving_backend::{ - create_proof_check_backend, create_proof_check_backend_storage, merge_storage_proofs, - ProofRecorder, ProvingBackend, ProvingBackendRecorder, StorageProof, StorageProofKind, - create_flat_proof_check_backend, create_flat_proof_check_backend_storage, - merge_flatten_storage_proofs, -}; +pub use proving_backend::{ProofRecorder, ProvingBackend, ProvingBackendRecorder, + create_proof_check_backend, create_flat_proof_check_backend}; pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; @@ -706,7 +703,7 @@ where let roots = trie_backend.extract_registered_roots(); if let Some(roots) = roots { proof = proof.pack::(&roots) - .map_err(|e| Box::new(e) as Box)?; + .map_err(|e| Box::new(format!("{}", e)) as Box)?; } } Ok(proof) @@ -738,7 +735,7 @@ where let roots = trie_backend.extract_registered_roots(); if let Some(roots) = roots { proof = proof.pack::(&roots) - .map_err(|e| Box::new(e) as Box)?; + .map_err(|e| Box::new(format!("{}", e)) as Box)?; } } Ok(proof) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6ee5fc7f4d2ed..0b43a670441e9 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -18,21 +18,21 @@ use std::sync::Arc; use parking_lot::RwLock; -use codec::{Encode, Decode, Codec}; +use codec::{Decode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, + record_all_keys, StorageProofKind, StorageProof, }; -pub use sp_trie::Recorder; +pub use sp_trie::{Recorder, ChildrenProofMap}; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, Backend}; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use crate::DBValue; -use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap, ChildrenProofMap}; +use sp_core::storage::{ChildInfo, ChildInfoProof, ChildrenMap}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -40,323 +40,6 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has pub(crate) proof_recorder: &'a mut Recorder, } -/// Different kind of proof representation are allowed. -/// This definition is used as input parameter when producing -/// a storage proof. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum StorageProofKind { - /// The proof can be build by multiple child trie only when - /// their query can be done on a single memory backend, - /// all encoded node can be stored in the same container. - Flatten, - // FlattenCompact(CompactScheme), - /// Proofs split by child trie. - Full, - /// Compact form of proofs split by child trie. - /// TODO indicate compact scheme to use (BtreeMap?) - FullCompact, -} - -impl StorageProofKind { - /// Is proof stored in a unique structure or - /// different structure depending on child trie. - pub fn is_flatten(&self) -> bool { - match self { - StorageProofKind::Flatten => true, - StorageProofKind::Full | StorageProofKind::FullCompact => false, - } - } - - /// Is the proof compacted. Compaction requires - /// using state root of every child trie. - pub fn is_compact(&self) -> bool { - match self { - StorageProofKind::FullCompact => true, - StorageProofKind::Full | StorageProofKind::Flatten => false, - } - } - - /// Indicate if we need all child trie information - /// to get register for producing the proof. - pub fn need_register_full(&self) -> bool { - match self { - StorageProofKind::Flatten => false, - // StorageProofKind::FlattenCompact => true, - StorageProofKind::Full | StorageProofKind::FullCompact => true, - } - } -} - -/// The possible compactions for proofs. -#[repr(u32)] -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub enum CompactScheme { - /// This skip encoding of hashes that are - /// calculated when reading the structue - /// of the trie. - TrieSkipHashes = 1, -/* /// Skip encoding of hashes and values, - /// we need to know them when when unpacking. - KnownQueryPlanAndValues = 2, - /// Skip encoding of hashes, this need knowing - /// the queried keys when unpacking, can be faster - /// than `TrieSkipHashes` but with similar packing - /// gain. - KnownQueryPlan = 3,*/ -} - -type ProofNodes = Vec>; -type ProofCompacted = (CompactScheme, Vec>); - -/// A proof that some set of key-value pairs are included in the storage trie. The proof contains -/// the storage values so that the partial storage backend can be reconstructed by a verifier that -/// does not already have access to the key-value pairs. -/// -/// For default trie, the proof component consists of the set of serialized nodes in the storage trie -/// accessed when looking up the keys covered by the proof. Verifying the proof requires constructing -/// the partial trie from the serialized nodes and performing the key lookups. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub enum StorageProof { - /// Single flattened proof component, all default child trie are flattened over a same - /// container, no child trie information is provided, this works only for proof accessing - /// the same kind of child trie. - Flatten(ProofNodes), -/* TODO EMCH implement as it will be default for trie skip hashes /// Proof can address multiple child trie, but results in a single flatten - /// db backend. - FlattenCompact(Vec),*/ - /// Fully descriBed proof, it includes the child trie individual descriptions. - /// Currently Full variant are not of any use as we have only child trie that can use the same - /// memory db backend. - /// TODO EMCH consider removal: could be put back when needed, and probably - /// with a new StorageProof key that is the same for a flattenable kind. - Full(ChildrenProofMap), - /// Fully descriped proof, compact encoded. - FullCompact(ChildrenProofMap), -} - -impl StorageProof { - /// Returns a new empty proof. - /// - /// An empty proof is capable of only proving trivial statements (ie. that an empty set of - /// key-value pairs exist in storage). - pub fn empty() -> Self { - // we default to full as it can be reduce to flatten when reducing - // flatten to full is not possible without making asumption over the content. - Self::empty_for(StorageProofKind::Full) - } - - /// Returns a new empty proof of a given kind. - pub fn empty_for(kind: StorageProofKind) -> Self { - match kind { - StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), - StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), - StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenProofMap::default()), - } - } - - /// Returns whether this is an empty proof. - pub fn is_empty(&self) -> bool { - match self { - StorageProof::Flatten(data) => data.is_empty(), - StorageProof::Full(data) => data.is_empty(), - StorageProof::FullCompact(data) => data.is_empty(), - } - } - - /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed - /// to be traversed in any particular order. - /// This iterator is only for `Flatten` proofs, other kind of proof will return an iterator with - /// no content. - pub fn iter_nodes_flatten(self) -> StorageProofNodeIterator { - StorageProofNodeIterator::new(self) - } - - /// This unpacks `FullCompact` to `Full` or do nothing. - /// TODO EMCH document and use case for with_roots to true?? (probably unpack -> merge -> pack - /// but no code for it here) - pub fn unpack( - self, - with_roots: bool, - ) -> Result<(Self, Option>>), String> - where H::Out: Codec, - { - let map_e = |e| format!("Trie unpack error: {}", e); - if let StorageProof::FullCompact(children) = self { - let mut result = ChildrenProofMap::default(); - let mut roots = if with_roots { - Some(ChildrenProofMap::default()) - } else { - None - }; - for (child_info, (compact_scheme, proof)) in children { - match child_info.child_type() { - ChildType::ParentKeyId => { - match compact_scheme { - CompactScheme::TrieSkipHashes => { - // Note that we could check the proof from the unpacking. - let (root, unpacked_proof) = sp_trie::unpack_proof::>(proof.as_slice()) - .map_err(map_e)?; - roots.as_mut().map(|roots| roots.insert(child_info.clone(), root.encode())); - result.insert(child_info, unpacked_proof); - }, - } - } - } - } - Ok((StorageProof::Full(result), roots)) - } else { - Ok((self, None)) - } - } - - /// This packs `Full` to `FullCompact`, using needed roots. - pub fn pack(self, roots: &ChildrenProofMap>) -> Result - where H::Out: Codec, - { - let map_e = |e| format!("Trie pack error: {}", e); - - if let StorageProof::Full(children) = self { - let mut result = ChildrenProofMap::default(); - for (child_info, proof) in children { - match child_info.child_type() { - ChildType::ParentKeyId => { - let root = roots.get(&child_info) - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| "Missing root for packing".to_string())?; - let trie_nodes = sp_trie::pack_proof::>(&root, &proof[..]).map_err(map_e)?; - result.insert(child_info.clone(), (CompactScheme::TrieSkipHashes, trie_nodes)); - } - } - } - Ok(StorageProof::FullCompact(result)) - } else { - Ok(self) - } - } - - /// This flatten `Full` to `Flatten`. - /// Note that if for some reason child proof were not - /// attached to the top trie, they will be lost. - pub fn flatten(self) -> Self { - if let StorageProof::Full(children) = self { - let mut result = Vec::new(); - children.into_iter().for_each(|(child_info, proof)| { - match child_info.child_type() { - ChildType::ParentKeyId => { - // this can get merged with top, since it is proof we do not use prefix - result.extend(proof); - } - } - }); - StorageProof::Flatten(result) - } else { - self - } - } -} - -/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to -/// be traversed in any particular order. -pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, -} - -impl StorageProofNodeIterator { - fn new(proof: StorageProof) -> Self { - match proof { - StorageProof::Flatten(data) => StorageProofNodeIterator { - inner: data.into_iter(), - }, - _ => StorageProofNodeIterator { - inner: Vec::new().into_iter(), - }, - } - } -} - -impl Iterator for StorageProofNodeIterator { - type Item = Vec; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -/// Merges multiple storage proofs covering potentially different sets of keys into one proof -/// covering all keys. The merged proof output may be smaller than the aggregate size of the input -/// proofs due to deduplication of trie nodes. -/// Merge to `Flatten` if any item is flatten (we cannot unflatten), if not `Flatten` we output to -/// non compact form. -/// TODO EMCH on master this has moved to a StorageProof function: do it (same for the other merge -/// function) -pub fn merge_storage_proofs(proofs: I) -> Result - where - I: IntoIterator, - H: Hasher, - H::Out: Codec, -{ - let mut do_flatten = false; - let mut child_sets = ChildrenProofMap::>>::default(); - let mut unique_set = HashSet::>::default(); - // lookup for best encoding - for mut proof in proofs { - if let &StorageProof::FullCompact(..) = &proof { - // TODO EMCH pack back so set to true. - proof = proof.unpack::(false)?.0; - } - let proof = proof; - match proof { - StorageProof::Flatten(proof) => { - if !do_flatten { - do_flatten = true; - for (_, set) in std::mem::replace(&mut child_sets, Default::default()).into_iter() { - unique_set.extend(set); - } - } - unique_set.extend(proof); - }, - StorageProof::Full(children) => { - for (child_info, child) in children.into_iter() { - if do_flatten { - unique_set.extend(child); - } else { - let set = child_sets.entry(child_info).or_default(); - set.extend(child); - } - } - }, - StorageProof::FullCompact(_children) => unreachable!("unpacked when entering function"), - } - } - Ok(if do_flatten { - StorageProof::Flatten(unique_set.into_iter().collect()) - } else { - let mut result = ChildrenProofMap::default(); - for (child_info, set) in child_sets.into_iter() { - result.insert(child_info, set.into_iter().collect()); - } - StorageProof::Full(result) - }) -} - -/// Merge over flatten proof, return `None` if one of the proofs is not -/// a flatten proof. -pub fn merge_flatten_storage_proofs(proofs: I) -> Option - where - I: IntoIterator, -{ - let mut unique_set = HashSet::>::default(); - // lookup for best encoding - for proof in proofs { - if let StorageProof::Flatten(set) = proof { - unique_set.extend(set); - } else { - return None; - } - } - Some(StorageProof::Flatten(unique_set.into_iter().collect())) -} - impl<'a, S, H> ProvingBackendRecorder<'a, S, H> where S: TrieBackendStorage, @@ -522,10 +205,7 @@ impl ProofRecorder kind: &StorageProofKind, registered_roots: Option>>, ) -> Result { - // TODO EMCH run the kind correctly - // flat -> can compress with query plan only and stay flat - // full -> can flatte - // -> can compress (compress with query plan is better after being flattened) + // TODO EMCH logic should be in sp_trie Ok(match self { ProofRecorder::Flat(rec) => { let trie_nodes = rec @@ -554,7 +234,8 @@ impl ProofRecorder StorageProofKind::Full => unpacked_full, StorageProofKind::FullCompact => { if let Some(roots) = registered_roots { - unpacked_full.pack::(&roots)? + unpacked_full.pack::(&roots) + .map_err(|e| format!("{}", e))? } else { return Err("Cannot compact without roots".to_string()); } @@ -711,8 +392,8 @@ where H: Hasher, H::Out: Codec, { - let db = create_flat_proof_check_backend_storage(proof) - .map_err(|e| Box::new(e) as Box)?; + let db = sp_trie::create_flat_proof_check_backend_storage(proof) + .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new_with_roots(db, root)) } else { @@ -730,8 +411,8 @@ where H::Out: Codec, { use std::ops::Deref; - let db = create_proof_check_backend_storage(proof) - .map_err(|e| Box::new(e) as Box)?; + let db = sp_trie::create_proof_check_backend_storage(proof) + .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.deref().get(&ChildInfoProof::top_trie()) .map(|db| db.contains(&root, EMPTY_PREFIX)) .unwrap_or(false) { @@ -741,93 +422,6 @@ where } } -/// Create in-memory storage of proof check backend. -/// Currently child trie are all with same backend -/// implementation, therefore using -/// `create_flat_proof_check_backend_storage` is prefered. -/// TODO flat proof check is enough for now, do we want to -/// maintain the full variant? -pub fn create_proof_check_backend_storage( - proof: StorageProof, -) -> Result>, String> -where - H: Hasher, -{ - let map_e = |e| format!("Trie unpack error: {}", e); - let mut result = ChildrenProofMap::default(); - match proof { - s@StorageProof::Flatten(..) => { - let mut db = MemoryDB::default(); - for item in s.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item); - } - result.insert(ChildInfoProof::top_trie(), db); - }, - StorageProof::Full(children) => { - for (child_info, proof) in children.into_iter() { - let mut db = MemoryDB::default(); - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - result.insert(child_info, db); - } - }, - StorageProof::FullCompact(children) => { - for (child_info, (compact_scheme, proof)) in children.into_iter() { - match compact_scheme { - CompactScheme::TrieSkipHashes => { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, db) = sp_trie::unpack_proof_to_memdb::>(proof.as_slice()) - .map_err(map_e)?; - result.insert(child_info, db); - }, - } - } - }, - } - Ok(result) -} - -/// Create in-memory storage of proof check backend. -pub fn create_flat_proof_check_backend_storage( - proof: StorageProof, -) -> Result, String> -where - H: Hasher, -{ - let map_e = |e| format!("Trie unpack error: {}", e); - let mut db = MemoryDB::default(); - match proof { - s@StorageProof::Flatten(..) => { - for item in s.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item); - } - }, - StorageProof::Full(children) => { - for (_child_info, proof) in children.into_iter() { - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - } - }, - StorageProof::FullCompact(children) => { - for (_child_info, (compact_scheme, proof)) in children.into_iter() { - match compact_scheme { - CompactScheme::TrieSkipHashes => { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, child_db) = sp_trie::unpack_proof_to_memdb::>(proof.as_slice()) - .map_err(map_e)?; - db.consolidate(child_db); - }, - } - } - }, - } - Ok(db) -} - #[cfg(test)] mod tests { use crate::InMemoryBackend; @@ -844,7 +438,6 @@ mod tests { ProvingBackend::new(trie_backend, flat) } - #[test] fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index eeccb68743d18..ddef355883ae6 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -18,9 +18,10 @@ use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, + ChildrenProofMap}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenProofMap}; +use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; use crate::{ StorageKey, StorageValue, Backend, diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 5cf34af3cc60f..708596cb9a277 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -22,12 +22,12 @@ use std::sync::Arc; use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; -use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, +use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, ChildrenProofMap, empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::{ChildInfo, ChildrenProofMap, ChildrenMap}; +use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Decode, Encode}; use parking_lot::RwLock; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 24f7d9c3ec94f..388d27907abad 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -419,6 +419,7 @@ impl ChildTrieParentKeyId { } } } + #[cfg(feature = "std")] #[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. @@ -458,45 +459,6 @@ impl IntoIterator for ChildrenMap { } } -#[cfg(feature = "std")] -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -/// Type for storing a map of child trie proof related information. -/// A few utilities methods are defined. -pub struct ChildrenProofMap(pub BTreeMap); - -#[cfg(feature = "std")] -impl sp_std::ops::Deref for ChildrenProofMap { - type Target = BTreeMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[cfg(feature = "std")] -impl sp_std::ops::DerefMut for ChildrenProofMap { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -#[cfg(feature = "std")] -impl sp_std::default::Default for ChildrenProofMap { - fn default() -> Self { - ChildrenProofMap(BTreeMap::new()) - } -} - -#[cfg(feature = "std")] -impl IntoIterator for ChildrenProofMap { - type Item = (ChildInfoProof, T); - type IntoIter = sp_std::collections::btree_map::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; #[test] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 2530cc3f9905b..93c1ed738037e 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -19,6 +19,7 @@ harness = false [dependencies] codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } sp-std = { version = "2.0.0-alpha.6", default-features = false, path = "../std" } +sp-storage = { version = "2.0.0-alpha.6", default-features = false, path = "../storage" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.20.1", default-features = false } trie-root = { version = "0.16.0", default-features = false } @@ -42,4 +43,5 @@ std = [ "trie-db/std", "trie-root/std", "sp-core/std", + "sp-storage/std", ] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 6e23ec6f19ed7..3adb48f530462 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,7 +36,8 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::StorageProof; +pub use storage_proof::{StorageProof, create_proof_check_backend_storage, + create_flat_proof_check_backend_storage, ChildrenProofMap, StorageProofKind}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, @@ -316,7 +317,7 @@ pub fn record_all_keys( } /// Pack proof. -pub fn pack_proof(root: &TrieHash, input: &[Vec]) +fn pack_proof(root: &TrieHash, input: &[Vec]) -> Result>, Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); for i in input.as_ref() { @@ -327,7 +328,7 @@ pub fn pack_proof(root: &TrieHash, input: &[Vec]) } /// Unpack packed proof. -pub fn unpack_proof(input: &[Vec]) +fn unpack_proof(input: &[Vec]) -> Result<(TrieHash, Vec>), Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); let root = trie_db::decode_compact::(&mut memory_db, input)?; @@ -336,7 +337,7 @@ pub fn unpack_proof(input: &[Vec]) /// Unpack packed proof. /// This is faster than `unpack_proof`. -pub fn unpack_proof_to_memdb(input: &[Vec]) +fn unpack_proof_to_memdb(input: &[Vec]) -> Result<(TrieHash, MemoryDB::<::Hash>), Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); let root = trie_db::decode_compact::(&mut memory_db, input)?; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f0aad5f432d13..6ce1fc6e5504f 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -14,69 +14,316 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use sp_std::collections::btree_map::BTreeMap; +use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; -use codec::{Encode, Decode}; -use hash_db::{Hasher, HashDB}; +use codec::{Codec, Encode, Decode}; +use hash_db::{Hasher, HashDB, EMPTY_PREFIX}; +use crate::{MemoryDB, Layout}; +use sp_storage::{ChildInfoProof, ChildType}; +use crate::TrieError; + +type Result = sp_std::result::Result>>>; + +fn missing_pack_input() -> sp_std::boxed::Box>> { + // TODO better error in trie db crate eg Packing error + sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +} + +/// Different kind of proof representation are allowed. +/// This definition is used as input parameter when producing +/// a storage proof. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum StorageProofKind { + /// The proof can be build by multiple child trie only when + /// their query can be done on a single memory backend, + /// all encoded node can be stored in the same container. + Flatten, + // FlattenCompact(CompactScheme), + /// Proofs split by child trie. + Full, + /// Compact form of proofs split by child trie. + /// TODO indicate compact scheme to use (BtreeMap?) + FullCompact, +} + +impl StorageProofKind { + /// Is proof stored in a unique structure or + /// different structure depending on child trie. + pub fn is_flatten(&self) -> bool { + match self { + StorageProofKind::Flatten => true, + StorageProofKind::Full | StorageProofKind::FullCompact => false, + } + } + + /// Is the proof compacted. Compaction requires + /// using state root of every child trie. + pub fn is_compact(&self) -> bool { + match self { + StorageProofKind::FullCompact => true, + StorageProofKind::Full | StorageProofKind::Flatten => false, + } + } + + /// Indicate if we need all child trie information + /// to get register for producing the proof. + pub fn need_register_full(&self) -> bool { + match self { + StorageProofKind::Flatten => false, + // StorageProofKind::FlattenCompact => true, + StorageProofKind::Full | StorageProofKind::FullCompact => true, + } + } +} + +/// The possible compactions for proofs. +#[repr(u32)] +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub enum CompactScheme { + /// This skip encoding of hashes that are + /// calculated when reading the structue + /// of the trie. + TrieSkipHashes = 1, +/* /// Skip encoding of hashes and values, + /// we need to know them when when unpacking. + KnownQueryPlanAndValues = 2, + /// Skip encoding of hashes, this need knowing + /// the queried keys when unpacking, can be faster + /// than `TrieSkipHashes` but with similar packing + /// gain. + KnownQueryPlan = 3,*/ +} + +type ProofNodes = Vec>; +// TODO EMCH do not alloc scheme per child for now +type ProofCompacted = (CompactScheme, Vec>); /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that /// does not already have access to the key-value pairs. /// -/// The proof consists of the set of serialized nodes in the storage trie accessed when looking up -/// the keys covered by the proof. Verifying the proof requires constructing the partial trie from -/// the serialized nodes and performing the key lookups. -/// TODO EMCH fuse with proving backend one. +/// For default trie, the proof component consists of the set of serialized nodes in the storage trie +/// accessed when looking up the keys covered by the proof. Verifying the proof requires constructing +/// the partial trie from the serialized nodes and performing the key lookups. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct StorageProof { - trie_nodes: Vec>, +pub enum StorageProof { + /// Single flattened proof component, all default child trie are flattened over a same + /// container, no child trie information is provided, this works only for proof accessing + /// the same kind of child trie. + Flatten(ProofNodes), +/* TODO EMCH implement as it will be default for trie skip hashes /// Proof can address multiple child trie, but results in a single flatten + /// db backend. + FlattenCompact(Vec),*/ + /// Fully descriBed proof, it includes the child trie individual descriptions. + /// Currently Full variant are not of any use as we have only child trie that can use the same + /// memory db backend. + /// TODO EMCH consider removal: could be put back when needed, and probably + /// with a new StorageProof key that is the same for a flattenable kind. + Full(ChildrenProofMap), + /// Fully descriped proof, compact encoded. + FullCompact(ChildrenProofMap), } impl StorageProof { - /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. - pub fn new(trie_nodes: Vec>) -> Self { - StorageProof { trie_nodes } - } - /// Returns a new empty proof. /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). pub fn empty() -> Self { - StorageProof { - trie_nodes: Vec::new(), + // we default to full as it can be reduce to flatten when reducing + // flatten to full is not possible without making asumption over the content. + Self::empty_for(StorageProofKind::Full) + } + + /// Returns a new empty proof of a given kind. + pub fn empty_for(kind: StorageProofKind) -> Self { + match kind { + StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), + StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), + StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenProofMap::default()), } } /// Returns whether this is an empty proof. pub fn is_empty(&self) -> bool { - self.trie_nodes.is_empty() + match self { + StorageProof::Flatten(data) => data.is_empty(), + StorageProof::Full(data) => data.is_empty(), + StorageProof::FullCompact(data) => data.is_empty(), + } } /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed /// to be traversed in any particular order. - pub fn iter_nodes(self) -> StorageProofNodeIterator { + /// This iterator is only for `Flatten` proofs, other kind of proof will return an iterator with + /// no content. + pub fn iter_nodes_flatten(self) -> StorageProofNodeIterator { StorageProofNodeIterator::new(self) } - /// Creates a `MemoryDB` from `Self`. - pub fn into_memory_db(self) -> crate::MemoryDB { - self.into() + /// This unpacks `FullCompact` to `Full` or do nothing. + /// TODO EMCH document and use case for with_roots to true?? (probably unpack -> merge -> pack + /// but no code for it here) + pub fn unpack( + self, + with_roots: bool, + ) -> Result<(Self, Option>>), H> + where H::Out: Codec, + { + if let StorageProof::FullCompact(children) = self { + let mut result = ChildrenProofMap::default(); + let mut roots = if with_roots { + Some(ChildrenProofMap::default()) + } else { + None + }; + for (child_info, (compact_scheme, proof)) in children { + match child_info.child_type() { + ChildType::ParentKeyId => { + match compact_scheme { + CompactScheme::TrieSkipHashes => { + // Note that we could check the proof from the unpacking. + let (root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; + roots.as_mut().map(|roots| roots.insert(child_info.clone(), root.encode())); + result.insert(child_info, unpacked_proof); + }, + } + } + } + } + Ok((StorageProof::Full(result), roots)) + } else { + Ok((self, None)) + } + } + + /// This packs `Full` to `FullCompact`, using needed roots. + pub fn pack( + self, + roots: &ChildrenProofMap>, + ) -> Result + where H::Out: Codec, + { + if let StorageProof::Full(children) = self { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in children { + match child_info.child_type() { + ChildType::ParentKeyId => { + let root = roots.get(&child_info) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input::())?; + // TODO EMCH pack directly from memory db, here we switch + let trie_nodes = crate::pack_proof::>(&root, &proof[..])?; + result.insert(child_info.clone(), (CompactScheme::TrieSkipHashes, trie_nodes)); + } + } + } + Ok(StorageProof::FullCompact(result)) + } else { + Ok(self) + } + } + + /// This flatten `Full` to `Flatten`. + /// Note that if for some reason child proof were not + /// attached to the top trie, they will be lost. + pub fn flatten(self) -> Self { + if let StorageProof::Full(children) = self { + let mut result = Vec::new(); + children.into_iter().for_each(|(child_info, proof)| { + match child_info.child_type() { + ChildType::ParentKeyId => { + // this can get merged with top, since it is proof we do not use prefix + result.extend(proof); + } + } + }); + StorageProof::Flatten(result) + } else { + self + } } /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. - pub fn merge(proofs: I) -> Self where I: IntoIterator { - let trie_nodes = proofs.into_iter() - .flat_map(|proof| proof.iter_nodes()) - .collect::>() - .into_iter() - .collect(); + /// Merge to `Flatten` if one of the item is flatten (we cannot unflatten), if not `Flatten` we output to + /// non compact form. + pub fn merge(proofs: I) -> Result + where + I: IntoIterator, + H: Hasher, + H::Out: Codec, + { + let mut do_flatten = false; + let mut child_sets = ChildrenProofMap::>>::default(); + let mut unique_set = BTreeSet::>::default(); + // lookup for best encoding + for mut proof in proofs { + if let &StorageProof::FullCompact(..) = &proof { + // TODO EMCH pack back so set to true. + proof = proof.unpack::(false)?.0; + } + let proof = proof; + match proof { + StorageProof::Flatten(proof) => { + if !do_flatten { + do_flatten = true; + for (_, set) in sp_std::mem::replace(&mut child_sets, Default::default()).into_iter() { + unique_set.extend(set); + } + } + unique_set.extend(proof); + }, + StorageProof::Full(children) => { + for (child_info, child) in children.into_iter() { + if do_flatten { + unique_set.extend(child); + } else { + let set = child_sets.entry(child_info).or_default(); + set.extend(child); + } + } + }, + StorageProof::FullCompact(_children) => unreachable!("unpacked when entering function"), + } + } + Ok(if do_flatten { + StorageProof::Flatten(unique_set.into_iter().collect()) + } else { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in child_sets.into_iter() { + result.insert(child_info, set.into_iter().collect()); + } + StorageProof::Full(result) + }) + } - Self { trie_nodes } + /// Merges multiple storage proofs covering potentially different sets of keys into one proof + /// covering all keys. The merged proof output may be smaller than the aggregate size of the input + /// proofs due to deduplication of trie nodes. + /// + /// Run only over flatten proof, will return `None` if one of the proofs is not + /// a flatten proof. + pub fn merge_flat(proofs: I) -> Option + where + I: IntoIterator, + { + let mut unique_set = BTreeSet::>::default(); + // lookup for best encoding + for proof in proofs { + if let StorageProof::Flatten(set) = proof { + unique_set.extend(set); + } else { + return None; + } + } + Some(StorageProof::Flatten(unique_set.into_iter().collect())) } } + /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to /// be traversed in any particular order. pub struct StorageProofNodeIterator { @@ -85,8 +332,13 @@ pub struct StorageProofNodeIterator { impl StorageProofNodeIterator { fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { - inner: proof.trie_nodes.into_iter(), + match proof { + StorageProof::Flatten(data) => StorageProofNodeIterator { + inner: data.into_iter(), + }, + _ => StorageProofNodeIterator { + inner: Vec::new().into_iter(), + }, } } } @@ -99,12 +351,121 @@ impl Iterator for StorageProofNodeIterator { } } -impl From for crate::MemoryDB { - fn from(proof: StorageProof) -> Self { - let mut db = crate::MemoryDB::default(); - for item in proof.iter_nodes() { - db.insert(crate::EMPTY_PREFIX, &item); - } - db +// TODO EMCH use tryfrom instead of those two create. + +/// Create in-memory storage of proof check backend. +/// Currently child trie are all with same backend +/// implementation, therefore using +/// `create_flat_proof_check_backend_storage` is prefered. +/// TODO flat proof check is enough for now, do we want to +/// maintain the full variant? +pub fn create_proof_check_backend_storage( + proof: StorageProof, +) -> Result>, H> +where + H: Hasher, +{ + let mut result = ChildrenProofMap::default(); + match proof { + s@StorageProof::Flatten(..) => { + let mut db = MemoryDB::default(); + for item in s.iter_nodes_flatten() { + db.insert(EMPTY_PREFIX, &item); + } + result.insert(ChildInfoProof::top_trie(), db); + }, + StorageProof::Full(children) => { + for (child_info, proof) in children.into_iter() { + let mut db = MemoryDB::default(); + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + result.insert(child_info, db); + } + }, + StorageProof::FullCompact(children) => { + for (child_info, (compact_scheme, proof)) in children.into_iter() { + match compact_scheme { + CompactScheme::TrieSkipHashes => { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + result.insert(child_info, db); + }, + } + } + }, + } + Ok(result) +} + +/// Create in-memory storage of proof check backend. +pub fn create_flat_proof_check_backend_storage( + proof: StorageProof, +) -> Result, H> +where + H: Hasher, +{ + let mut db = MemoryDB::default(); + match proof { + s@StorageProof::Flatten(..) => { + for item in s.iter_nodes_flatten() { + db.insert(EMPTY_PREFIX, &item); + } + }, + StorageProof::Full(children) => { + for (_child_info, proof) in children.into_iter() { + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + } + }, + StorageProof::FullCompact(children) => { + for (_child_info, (compact_scheme, proof)) in children.into_iter() { + match compact_scheme { + CompactScheme::TrieSkipHashes => { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + db.consolidate(child_db); + }, + } + } + }, + } + Ok(db) +} + +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +/// Type for storing a map of child trie proof related information. +/// A few utilities methods are defined. +pub struct ChildrenProofMap(pub BTreeMap); + +impl sp_std::ops::Deref for ChildrenProofMap { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ChildrenProofMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl sp_std::default::Default for ChildrenProofMap { + fn default() -> Self { + ChildrenProofMap(BTreeMap::new()) + } +} + +impl IntoIterator for ChildrenProofMap { + type Item = (ChildInfoProof, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() } } From 69ed11886456732ae8ec83bf3e3ff1aeff854cac Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 17 Apr 2020 12:31:33 +0200 Subject: [PATCH 100/185] fix renamed field related error --- primitives/state-machine/src/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 8d3ecb1b190a8..7f26085958e97 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -83,7 +83,7 @@ impl BasicExternalities { let mut ext = Self { inner: Storage { top: std::mem::replace(&mut storage.top, Default::default()), - children_default: std::mem::replace(&mut storage.children, Default::default()), + children_default: std::mem::replace(&mut storage.children_default, Default::default()), }, extensions: Default::default(), }; From eebdfe62cee338cba96d88b3845d81768be37120 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 17 Apr 2020 18:26:08 +0200 Subject: [PATCH 101/185] rework of storage proof in trie. --- primitives/trie/src/lib.rs | 6 +- primitives/trie/src/storage_proof.rs | 572 ++++++++++++++++++++------- 2 files changed, 429 insertions(+), 149 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 3adb48f530462..ac08bdbad8920 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -37,10 +37,12 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, create_proof_check_backend_storage, - create_flat_proof_check_backend_storage, ChildrenProofMap, StorageProofKind}; + LegacyStorageProof, create_flat_proof_check_backend_storage, ChildrenProofMap, + StorageProofKind}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, + Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, + nibble_ops, TrieDBIterator, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 6ce1fc6e5504f..9b86b15d4e3a7 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -17,87 +17,160 @@ use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; -use codec::{Codec, Encode, Decode}; +use codec::{Codec, Encode, Decode, Input, Output}; use hash_db::{Hasher, HashDB, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; use sp_storage::{ChildInfoProof, ChildType}; use crate::TrieError; type Result = sp_std::result::Result>>>; +type CodecResult = sp_std::result::Result; fn missing_pack_input() -> sp_std::boxed::Box>> { // TODO better error in trie db crate eg Packing error sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) } +fn impossible_merge_for_proof() -> sp_std::boxed::Box>> { + // TODO better error in trie db crate eg Packing error + sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +} + +fn impossible_backend_build() -> sp_std::boxed::Box>> { + // TODO better error in trie db crate eg Packing error + sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +} + /// Different kind of proof representation are allowed. /// This definition is used as input parameter when producing /// a storage proof. +#[repr(u8)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { - /// The proof can be build by multiple child trie only when - /// their query can be done on a single memory backend, - /// all encoded node can be stored in the same container. + /// Kind for `StorageProof::Flatten`. Flatten, - // FlattenCompact(CompactScheme), - /// Proofs split by child trie. - Full, - /// Compact form of proofs split by child trie. - /// TODO indicate compact scheme to use (BtreeMap?) - FullCompact, + + /// Kind for `StorageProof::TrieSkipHashes`. + TrieSkipHashes, + + /// Kind for `StorageProof::KnownQueryPlanAndValues`. + KnownQueryPlanAndValues, + + /// Testing only indices + + /// Kind for `StorageProof::Full`. + Full = 126, + + /// Kind for `StorageProof::TrieSkipHashesFull`. + TrieSkipHashesFull = 127, +} + +impl StorageProofKind { + /// Decode a byte value representing the storage byte. + /// Return `None` if value does not exists. + #[cfg(test)] + pub fn read_from_byte(encoded: u8) -> Option { + Some(match encoded { + x if x == StorageProofKind::Flatten as u8 => StorageProofKind::Flatten, + x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, + x if x == StorageProofKind::KnownQueryPlanAndValues as u8 + => StorageProofKind::KnownQueryPlanAndValues, + x if x == StorageProofKind::Full as u8 => StorageProofKind::Full, + x if x == StorageProofKind::TrieSkipHashesFull as u8 => StorageProofKind::TrieSkipHashesFull, + x if x == StorageProofKind::TrieSkipHashesFull as u8 + => StorageProofKind::TrieSkipHashesFull, + _ => return None, + }) + } + + /// Decode a byte value representing the storage byte. + /// Return `None` if value does not exists. + #[cfg(not(test))] + pub fn read_from_byte(encoded: u8) -> Option { + Some(match encoded { + x if x == StorageProofKind::Flatten as u8 => StorageProofKind::Flatten, + x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, + x if x == StorageProofKind::KnownQueryPlanAndValues as u8 + => StorageProofKind::KnownQueryPlanAndValues, + _ => return None, + }) + } +} + +/// Additional information needed for packing or unpacking. +/// These do not need to be part of the proof but are required +/// when using the proof. +pub enum AdditionalInfoForProcessing { + /// Contains trie roots used during proof processing. + ChildTrieRoots(ChildrenProofMap>), + + /// Contains trie roots used during proof processing. + /// Contains key and values queried during the proof processing. + QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), +} + +/// Kind for designing an `AdditionalInfoForProcessing` variant. +pub enum AdditionalInfoForProcessingKind { + /// `AdditionalInfoForProcessing::ChildTrieRoots` kind. + ChildTrieRoots, + + /// `AdditionalInfoForProcessing::QueryPlanWithValues` kind. + QueryPlanWithValues, } impl StorageProofKind { - /// Is proof stored in a unique structure or - /// different structure depending on child trie. - pub fn is_flatten(&self) -> bool { + /// Some proof variants requires more than just the collected + /// encoded nodes. + pub fn need_additional_info_to_produce(&self) -> Option { match self { - StorageProofKind::Flatten => true, - StorageProofKind::Full | StorageProofKind::FullCompact => false, + StorageProofKind::KnownQueryPlanAndValues => Some(AdditionalInfoForProcessingKind::QueryPlanWithValues), + StorageProofKind::TrieSkipHashes + | StorageProofKind::TrieSkipHashesFull => Some(AdditionalInfoForProcessingKind::ChildTrieRoots), + StorageProofKind::Full + | StorageProofKind::Flatten => None, } } - /// Is the proof compacted. Compaction requires - /// using state root of every child trie. - pub fn is_compact(&self) -> bool { + /// Same as `need_additional_info_to_produce` but for reading. + pub fn need_additional_info_to_read(&self) -> Option { match self { - StorageProofKind::FullCompact => true, - StorageProofKind::Full | StorageProofKind::Flatten => false, + StorageProofKind::KnownQueryPlanAndValues => Some(AdditionalInfoForProcessingKind::QueryPlanWithValues), + StorageProofKind::TrieSkipHashes + | StorageProofKind::TrieSkipHashesFull + | StorageProofKind::Full + | StorageProofKind::Flatten => None, } } - /// Indicate if we need all child trie information - /// to get register for producing the proof. + /// Some proof can get unpack into another proof representation. + pub fn can_unpack(&self) -> bool { + match self { + StorageProofKind::KnownQueryPlanAndValues => false, + StorageProofKind::TrieSkipHashes + | StorageProofKind::TrieSkipHashesFull => true, + StorageProofKind::Full + | StorageProofKind::Flatten => false, + } + } + + /// Indicate if we need to record proof with splitted child trie information + /// or can simply record on a single collection. pub fn need_register_full(&self) -> bool { match self { StorageProofKind::Flatten => false, - // StorageProofKind::FlattenCompact => true, - StorageProofKind::Full | StorageProofKind::FullCompact => true, + StorageProofKind::Full + | StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::TrieSkipHashes + | StorageProofKind::TrieSkipHashesFull => true, } } } -/// The possible compactions for proofs. -#[repr(u32)] -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub enum CompactScheme { - /// This skip encoding of hashes that are - /// calculated when reading the structue - /// of the trie. - TrieSkipHashes = 1, -/* /// Skip encoding of hashes and values, - /// we need to know them when when unpacking. - KnownQueryPlanAndValues = 2, - /// Skip encoding of hashes, this need knowing - /// the queried keys when unpacking, can be faster - /// than `TrieSkipHashes` but with similar packing - /// gain. - KnownQueryPlan = 3,*/ -} - +/// A collection on encoded trie nodes. type ProofNodes = Vec>; -// TODO EMCH do not alloc scheme per child for now -type ProofCompacted = (CompactScheme, Vec>); +/// A sorted by trie nodes order collection on encoded trie nodes +/// with possibly ommitted content or special compacted encoding. +type ProofCompacted = Vec>; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -106,23 +179,153 @@ type ProofCompacted = (CompactScheme, Vec>); /// For default trie, the proof component consists of the set of serialized nodes in the storage trie /// accessed when looking up the keys covered by the proof. Verifying the proof requires constructing /// the partial trie from the serialized nodes and performing the key lookups. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum StorageProof { /// Single flattened proof component, all default child trie are flattened over a same - /// container, no child trie information is provided, this works only for proof accessing - /// the same kind of child trie. + /// container, no child trie information is provided. + /// This is the same representation as the `LegacyStorageProof`. Flatten(ProofNodes), -/* TODO EMCH implement as it will be default for trie skip hashes /// Proof can address multiple child trie, but results in a single flatten - /// db backend. - FlattenCompact(Vec),*/ - /// Fully descriBed proof, it includes the child trie individual descriptions. - /// Currently Full variant are not of any use as we have only child trie that can use the same - /// memory db backend. - /// TODO EMCH consider removal: could be put back when needed, and probably - /// with a new StorageProof key that is the same for a flattenable kind. + + /// This skip encoding of hashes that are + /// calculated when reading the structue + /// of the trie. + /// It requires that the proof is collected with + /// child trie separation, will encode to struct that + /// separate child trie but do not keep information about + /// them (for compactness) and will therefore produce a flatten + /// verification backend. + TrieSkipHashes(Vec), + + /// This skip encoding of hashes, but need to know the key + /// values that are targetted by the operation. + /// As `TrieSkipHashes`, it does not pack hash that can be + /// calculated, so it requires a specific call to a custom + /// verify function with additional input. + /// This needs to be check for every children proofs. + KnownQueryPlanAndValues(ChildrenProofMap), + + // Following variants are only for testing, they still can be use but + // decoding is not implemented. + + /// Fully described proof, it includes the child trie individual description and split its + /// content by child trie. + /// Currently Full variant is unused as all our child trie kind can share a same memory db + /// (a bit more compact). + /// This is mainly provided for test purpose and extensibility. Full(ChildrenProofMap), - /// Fully descriped proof, compact encoded. - FullCompact(ChildrenProofMap), + + /// Compact form of proofs split by child trie, this is using the same compaction as + /// `TrieSkipHashes` but do not merge the content in a single memorydb backend. + /// This is mainly provided for test purpose and extensibility. + TrieSkipHashesFull(ChildrenProofMap), +} + +/// A legacy encoding of proof, it is the same as the inner encoding +/// of `StorageProof::Flatten`. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct LegacyStorageProof { + trie_nodes: Vec>, +} + +impl LegacyStorageProof { + /// Create a proof from encoded trie nodes. + pub fn new(trie_nodes: Vec>) -> Self { + LegacyStorageProof { trie_nodes } + } +} + +impl Decode for StorageProof { + fn decode(value: &mut I) -> CodecResult { + let kind = value.read_byte()?; + Ok(match StorageProofKind::read_from_byte(kind) + .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { + StorageProofKind::Flatten => StorageProof::Flatten(Decode::decode(value)?), + StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Decode::decode(value)?), + StorageProofKind::KnownQueryPlanAndValues + => StorageProof::KnownQueryPlanAndValues(Decode::decode(value)?), + StorageProofKind::Full => StorageProof::Full(Decode::decode(value)?), + StorageProofKind::TrieSkipHashesFull + => StorageProof::TrieSkipHashesFull(Decode::decode(value)?), + }) + } +} + +impl Encode for StorageProof { + fn encode_to(&self, dest: &mut T) { + (self.kind() as u8).encode_to(dest); + match self { + StorageProof::Flatten(p) => p.encode_to(dest), + StorageProof::TrieSkipHashes(p) => p.encode_to(dest), + StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), + StorageProof::Full(p) => p.encode_to(dest), + StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), + } + } +} + +/// This encodes the full proof capabillity under +/// legacy proof format by disabling the empty proof +/// from it (empty proof should not happen because +/// the empty trie still got a empty node recorded in +/// all its proof). +pub struct LegacyEncodeAdapter<'a>(pub &'a StorageProof); + +impl<'a> Encode for LegacyEncodeAdapter<'a> { + fn encode_to(&self, dest: &mut T) { + 0u8.encode_to(dest); + self.0.encode_to(dest); + } +} + +/// Decode variant of `LegacyEncodeAdapter`. +pub struct LegacyDecodeAdapter(pub StorageProof); + +/// Allow read ahead on input. +pub struct InputRevertReadAhead<'a, I>(pub &'a mut &'a [u8], pub &'a mut I); + +impl<'a, I: Input> Input for InputRevertReadAhead<'a, I> { + fn remaining_len(&mut self) -> CodecResult> { + Ok(self.1.remaining_len()?.map(|l| l + self.0.len())) + } + + fn read(&mut self, into: &mut [u8]) -> CodecResult<()> { + let mut offset = 0; + if self.0.len() > 0 { + if self.0.len() > into.len() { + into.copy_from_slice(&self.0[..into.len()]); + *self.0 = &self.0[into.len()..]; + return Ok(()); + } else { + into[..self.0.len()].copy_from_slice(&self.0[..]); + *self.0 = &[][..]; + offset = self.0.len(); + } + } + self.1.read(&mut into[offset..]) + } + + fn read_byte(&mut self) -> CodecResult { + if self.0.len() > 0 { + let result = self.0[0]; + *self.0 = &self.0[1..]; + Ok(result) + } else { + self.1.read_byte() + } + } +} + +impl Decode for LegacyDecodeAdapter { + fn decode(value: &mut I) -> CodecResult { + let legacy = value.read_byte()?; + Ok(if legacy == 0 { + LegacyDecodeAdapter(Decode::decode(value)?) + } else { + let mut legacy = &[legacy][..]; + let mut input = InputRevertReadAhead(&mut legacy, value); + LegacyDecodeAdapter(StorageProof::Flatten(Decode::decode(&mut input)?)) + }) + } } impl StorageProof { @@ -141,7 +344,9 @@ impl StorageProof { match kind { StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), - StorageProofKind::FullCompact => StorageProof::FullCompact(ChildrenProofMap::default()), + StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(ChildrenProofMap::default()), + StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(ChildrenProofMap::default()), + StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Default::default()), } } @@ -150,7 +355,9 @@ impl StorageProof { match self { StorageProof::Flatten(data) => data.is_empty(), StorageProof::Full(data) => data.is_empty(), - StorageProof::FullCompact(data) => data.is_empty(), + StorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), + StorageProof::TrieSkipHashes(data) => data.is_empty(), + StorageProof::TrieSkipHashesFull(data) => data.is_empty(), } } @@ -162,7 +369,7 @@ impl StorageProof { StorageProofNodeIterator::new(self) } - /// This unpacks `FullCompact` to `Full` or do nothing. + /// This unpacks `TrieSkipHashesFull` to `Full` or do nothing. /// TODO EMCH document and use case for with_roots to true?? (probably unpack -> merge -> pack /// but no code for it here) pub fn unpack( @@ -171,58 +378,87 @@ impl StorageProof { ) -> Result<(Self, Option>>), H> where H::Out: Codec, { - if let StorageProof::FullCompact(children) = self { - let mut result = ChildrenProofMap::default(); - let mut roots = if with_roots { - Some(ChildrenProofMap::default()) - } else { - None - }; - for (child_info, (compact_scheme, proof)) in children { - match child_info.child_type() { - ChildType::ParentKeyId => { - match compact_scheme { - CompactScheme::TrieSkipHashes => { - // Note that we could check the proof from the unpacking. - let (root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; - roots.as_mut().map(|roots| roots.insert(child_info.clone(), root.encode())); - result.insert(child_info, unpacked_proof); - }, + let mut roots = if with_roots { + Some(ChildrenProofMap::default()) + } else { + None + }; + match self { + StorageProof::TrieSkipHashesFull(children) => { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in children { + match child_info.child_type() { + ChildType::ParentKeyId => { + // Note that unpack does fill a memory db and on verification we will + // probalby switch this proof to a memory db to, so the function to produce + // the backend should not use this primitive. + let (root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; + roots.as_mut().map(|roots| roots.insert(child_info.clone(), root.encode())); + result.insert(child_info, unpacked_proof); } } } - } - Ok((StorageProof::Full(result), roots)) - } else { - Ok((self, None)) + Ok((StorageProof::Full(result), roots)) + }, + StorageProof::TrieSkipHashes(children) => { + let mut result = ProofNodes::default(); + for proof in children { + let (_root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; + result.extend(unpacked_proof); + } + + Ok((StorageProof::Flatten(result), None)) + }, + s => Ok((s, None)), } } - /// This packs `Full` to `FullCompact`, using needed roots. + /// This run proof validation when the proof only expect + /// validation. + pub fn validate( + self, + _additional_content: &Option, + ) -> Result, H> + where H::Out: Codec, + { + unimplemented!("TODO run the validation of the query plan one") + } + + /// This packs when possible. pub fn pack( self, - roots: &ChildrenProofMap>, + additional_content: &Option, ) -> Result where H::Out: Codec, { - if let StorageProof::Full(children) = self { - let mut result = ChildrenProofMap::default(); - for (child_info, proof) in children { - match child_info.child_type() { - ChildType::ParentKeyId => { - let root = roots.get(&child_info) - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input::())?; - // TODO EMCH pack directly from memory db, here we switch - let trie_nodes = crate::pack_proof::>(&root, &proof[..])?; - result.insert(child_info.clone(), (CompactScheme::TrieSkipHashes, trie_nodes)); - } + Ok(match self { + StorageProof::Full(children) => { + match additional_content { + Some(AdditionalInfoForProcessing::ChildTrieRoots(roots)) => { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in children { + match child_info.child_type() { + ChildType::ParentKeyId => { + let root = roots.get(&child_info) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input::())?; + // TODO EMCH pack directly from recorded memory db -> have a pack_proof returning + // directly memory db?? seems wrong?? + let trie_nodes = crate::pack_proof::>(&root, &proof[..])?; + result.insert(child_info.clone(), trie_nodes); + } + } + } + StorageProof::TrieSkipHashesFull(result) + }, + Some(AdditionalInfoForProcessing::QueryPlanWithValues(_plan)) => { + unimplemented!("TODO pack query plan mode") + }, + None => StorageProof::Full(children), } - } - Ok(StorageProof::FullCompact(result)) - } else { - Ok(self) - } + }, + s => s, + }) } /// This flatten `Full` to `Flatten`. @@ -250,6 +486,9 @@ impl StorageProof { /// proofs due to deduplication of trie nodes. /// Merge to `Flatten` if one of the item is flatten (we cannot unflatten), if not `Flatten` we output to /// non compact form. + /// The function cannot pack back proof as it does not have reference to additional information + /// needed. So for this the additional information need to be merged separately and the result + /// of this merge be packed with it afterward. pub fn merge(proofs: I) -> Result where I: IntoIterator, @@ -261,12 +500,25 @@ impl StorageProof { let mut unique_set = BTreeSet::>::default(); // lookup for best encoding for mut proof in proofs { - if let &StorageProof::FullCompact(..) = &proof { - // TODO EMCH pack back so set to true. - proof = proof.unpack::(false)?.0; + // unpack + match &proof { + &StorageProof::TrieSkipHashesFull(..) => { + proof = proof.unpack::(false)?.0; + }, + &StorageProof::TrieSkipHashes(..) => { + proof = proof.unpack::(false)?.0; + }, + &StorageProof::KnownQueryPlanAndValues(..) => { + return Err(impossible_merge_for_proof::()); + }, + _ => (), } let proof = proof; match proof { + StorageProof::TrieSkipHashesFull(..) + | StorageProof::TrieSkipHashes(..) + | StorageProof::KnownQueryPlanAndValues(..) + => unreachable!("Unpacked or early return earlier"), StorageProof::Flatten(proof) => { if !do_flatten { do_flatten = true; @@ -286,7 +538,6 @@ impl StorageProof { } } }, - StorageProof::FullCompact(_children) => unreachable!("unpacked when entering function"), } } Ok(if do_flatten { @@ -300,30 +551,18 @@ impl StorageProof { }) } - /// Merges multiple storage proofs covering potentially different sets of keys into one proof - /// covering all keys. The merged proof output may be smaller than the aggregate size of the input - /// proofs due to deduplication of trie nodes. - /// - /// Run only over flatten proof, will return `None` if one of the proofs is not - /// a flatten proof. - pub fn merge_flat(proofs: I) -> Option - where - I: IntoIterator, - { - let mut unique_set = BTreeSet::>::default(); - // lookup for best encoding - for proof in proofs { - if let StorageProof::Flatten(set) = proof { - unique_set.extend(set); - } else { - return None; - } + /// Get kind description for the storage proof variant. + pub fn kind(&self) -> StorageProofKind { + match self { + StorageProof::Flatten(_) => StorageProofKind::Flatten, + StorageProof::TrieSkipHashes(_) => StorageProofKind::TrieSkipHashes, + StorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, + StorageProof::Full(_) => StorageProofKind::Full, + StorageProof::TrieSkipHashesFull(_) => StorageProofKind::TrieSkipHashesFull, } - Some(StorageProof::Flatten(unique_set.into_iter().collect())) } } - /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to /// be traversed in any particular order. pub struct StorageProofNodeIterator { @@ -368,10 +607,7 @@ where let mut result = ChildrenProofMap::default(); match proof { s@StorageProof::Flatten(..) => { - let mut db = MemoryDB::default(); - for item in s.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item); - } + let db = create_flat_proof_check_backend_storage::(s)?; result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::Full(children) => { @@ -383,18 +619,21 @@ where result.insert(child_info, db); } }, - StorageProof::FullCompact(children) => { - for (child_info, (compact_scheme, proof)) in children.into_iter() { - match compact_scheme { - CompactScheme::TrieSkipHashes => { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - result.insert(child_info, db); - }, - } + StorageProof::TrieSkipHashesFull(children) => { + for (child_info, proof) in children.into_iter() { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + result.insert(child_info, db); } }, + s@StorageProof::TrieSkipHashes(..) => { + let db = create_flat_proof_check_backend_storage::(s)?; + result.insert(ChildInfoProof::top_trie(), db); + }, + StorageProof::KnownQueryPlanAndValues(_children) => { + return Err(impossible_backend_build::()); + }, } Ok(result) } @@ -407,6 +646,7 @@ where H: Hasher, { let mut db = MemoryDB::default(); + let mut db_empty = true; match proof { s@StorageProof::Flatten(..) => { for item in s.iter_nodes_flatten() { @@ -420,18 +660,33 @@ where } } }, - StorageProof::FullCompact(children) => { - for (_child_info, (compact_scheme, proof)) in children.into_iter() { - match compact_scheme { - CompactScheme::TrieSkipHashes => { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - db.consolidate(child_db); - }, + StorageProof::TrieSkipHashesFull(children) => { + for (_child_info, proof) in children.into_iter() { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + }, + StorageProof::TrieSkipHashes(children) => { + for proof in children.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); } } }, + StorageProof::KnownQueryPlanAndValues(_children) => { + return Err(impossible_backend_build::()); + }, } Ok(db) } @@ -469,3 +724,26 @@ impl IntoIterator for ChildrenProofMap { self.0.into_iter() } } + +#[test] +fn legacy_proof_codec() { + // random content for proof, we test serialization + let content = vec![b"first".to_vec(), b"second".to_vec()]; + + let legacy = LegacyStorageProof::new(content.clone()); + let encoded_legacy = legacy.encode(); + let proof = StorageProof::Flatten(content.clone()); + let encoded_proof = proof.encode(); + + assert_eq!(Decode::decode(&mut &encoded_proof[..]).unwrap(), proof); + // test encoded minus first bytes equal to storage proof + assert_eq!(&encoded_legacy[..], &encoded_proof[1..]); + + // test adapter + let encoded_adapter = LegacyEncodeAdapter(&proof).encode(); + assert_eq!(encoded_adapter[0], 0); + assert_eq!(&encoded_adapter[1..], &encoded_proof[..]); + let adapter_proof = LegacyDecodeAdapter(proof); + assert_eq!(Decode::decode(&mut &encoded_legacy[..]).unwrap(), adapter_proof); + assert_eq!(Decode::decode(&mut &encoded_adapter[..]).unwrap(), adapter_proof); +} From 39ba926736fec6ebd1b541e97d3cb22a6d030861 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 17 Apr 2020 20:00:25 +0200 Subject: [PATCH 102/185] adapt rest of code --- client/src/call_executor.rs | 5 +- primitives/state-machine/src/lib.rs | 208 ++++++++++-------- .../state-machine/src/proving_backend.rs | 80 +++---- primitives/state-machine/src/trie_backend.rs | 6 +- primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof.rs | 26 ++- 6 files changed, 181 insertions(+), 146 deletions(-) diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 94b257f3596ed..1ef4a17f7aeb7 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -223,8 +223,7 @@ where method: &str, call_data: &[u8] ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - // TODO this switch execution proof to full compact, should we move the choice to - // caller?? + // TODO Should we make proof kind a parameter? sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( trie_state, overlay, @@ -232,7 +231,7 @@ where self.spawn_handle.clone(), method, call_data, - StorageProofKind::FullCompact, + StorageProofKind::TrieSkipHashes, &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, ) .map_err(Into::into) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 2203e2f42d107..a6b1384143811 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -43,7 +43,7 @@ mod trie_backend_essence; mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - StorageProof, StorageProofKind, ChildrenProofMap}; + StorageProof, StorageProofKind, ChildrenProofMap, AdditionalInfoForProcessingKind}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; @@ -503,7 +503,10 @@ where Exec: CodeExecutor + 'static + Clone, N: crate::changes_trie::BlockNumber, { - let proving_backend = proving_backend::ProvingBackend::new(trie_backend, kind.is_flatten()); + let proving_backend = proving_backend::ProvingBackend::new( + trie_backend, + kind.need_register_full(), + ); let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, None, @@ -544,29 +547,34 @@ where H::Out: Ord + 'static + codec::Codec, N: crate::changes_trie::BlockNumber, { - let use_flat = proof_uses_flat(&proof); - if use_flat { - let trie_backend = create_flat_proof_check_backend::(root.into(), proof)?; - execution_flat_proof_check_on_trie_backend::<_, N, _>( - &trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) - } else { - let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _>( - &trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) + match proof.kind().need_check_full() { + Some(true) => { + let trie_backend = create_proof_check_backend::(root.into(), proof)?; + execution_proof_check_on_trie_backend::<_, N, _>( + &trie_backend, + overlay, + exec, + spawn_handle, + method, + call_data, + runtime_code, + ) + }, + Some(false) => { + let trie_backend = create_flat_proof_check_backend::(root.into(), proof)?; + execution_flat_proof_check_on_trie_backend::<_, N, _>( + &trie_backend, + overlay, + exec, + spawn_handle, + method, + call_data, + runtime_code, + ) + }, + None => { + return Err(Box::new("This kind of proof need to use a verify method")); + }, } } @@ -691,22 +699,29 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind.is_flatten()); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new( + trie_backend, + kind.need_register_full(), + ); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - let mut proof = proving_backend.extract_proof(&kind) + let proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; - if kind.is_compact() { - let roots = trie_backend.extract_registered_roots(); - if let Some(roots) = roots { - proof = proof.pack::(&roots) - .map_err(|e| Box::new(format!("{}", e)) as Box)?; - } - } - Ok(proof) + let infos = match kind.need_additional_info_to_produce() { + Some(AdditionalInfoForProcessingKind::ChildTrieRoots) => { + trie_backend.extract_registered_roots() + }, + Some(AdditionalInfoForProcessingKind::QueryPlanNoValues) => { + unimplemented!("TODO from keys, do not care about memory copy at first") + }, + Some(_) => return Err(Box::new("Cannot produce required info for proof")), + None => None, + }; + Ok(proof.pack::(&infos) + .map_err(|e| Box::new(format!("{}", e)) as Box)?) } /// Generate storage read proof on pre-created trie backend. @@ -723,37 +738,26 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind.is_flatten()); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind.need_register_full()); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - let mut proof = proving_backend.extract_proof(&kind) + let proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; - if kind.is_compact() { - let roots = trie_backend.extract_registered_roots(); - if let Some(roots) = roots { - proof = proof.pack::(&roots) - .map_err(|e| Box::new(format!("{}", e)) as Box)?; - } - } - Ok(proof) -} - -// Note that this is not directly in StorageKind as -// it is implementation specific choice. -fn proof_uses_flat(proof: &StorageProof) -> bool { - match proof { - StorageProof::Flatten(..) => true, - // there is currently no gain (same implementation - // for all trie backends) in not running on a flatten - // memorydb - StorageProof::Full(..) => true, - // unpack creates by nature splitted memory db, there - // is no need to merge them. - StorageProof::FullCompact(..) => false, - } + let infos = match kind.need_additional_info_to_produce() { + Some(AdditionalInfoForProcessingKind::ChildTrieRoots) => { + trie_backend.extract_registered_roots() + }, + Some(AdditionalInfoForProcessingKind::QueryPlanNoValues) => { + unimplemented!("TODO from keys, do not care about memory copy at first, warn to include child root fetch") + }, + Some(_) => return Err(Box::new("Cannot produce required info for proof")), + None => None, + }; + Ok(proof.pack::(&infos) + .map_err(|e| Box::new(format!("{}", e)) as Box)?) } /// Check storage read proof, generated by `prove_read` call. @@ -770,20 +774,25 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let use_flat = proof_uses_flat(&proof); let mut result = HashMap::new(); - if use_flat { - let proving_backend = create_flat_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_proof_check_on_flat_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); - } - } else { - let proving_backend = create_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); - } + match proof.kind().need_check_full() { + Some(true) => { + let proving_backend = create_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } + }, + Some(false) => { + let proving_backend = create_flat_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_proof_check_on_flat_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } + }, + None => { + return Err(Box::new("This kind of proof need to use a verify method")); + }, } Ok(result) } @@ -801,28 +810,33 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let use_flat = proof_uses_flat(&proof); let mut result = HashMap::new(); - if use_flat { - let proving_backend = create_flat_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_child_proof_check_on_flat_proving_backend( - &proving_backend, - child_info, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); - } - } else { - let proving_backend = create_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_child_proof_check_on_proving_backend( - &proving_backend, - child_info, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); - } + match proof.kind().need_check_full() { + Some(true) => { + let proving_backend = create_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_child_proof_check_on_proving_backend( + &proving_backend, + child_info, + key.as_ref(), + )?; + result.insert(key.as_ref().to_vec(), value); + } + }, + Some(false) => { + let proving_backend = create_flat_proof_check_backend::(root, proof)?; + for key in keys.into_iter() { + let value = read_child_proof_check_on_flat_proving_backend( + &proving_backend, + child_info, + key.as_ref(), + )?; + result.insert(key.as_ref().to_vec(), value); + } + }, + None => { + return Err(Box::new("This kind of proof need to use a verify method")); + }, } Ok(result) } @@ -1054,7 +1068,8 @@ mod tests { fn prove_execution_and_proof_check_works() { prove_execution_and_proof_check_works_inner(StorageProofKind::Flatten); prove_execution_and_proof_check_works_inner(StorageProofKind::Full); - prove_execution_and_proof_check_works_inner(StorageProofKind::FullCompact); + prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); + prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); } fn prove_execution_and_proof_check_works_inner(kind: StorageProofKind) { @@ -1189,7 +1204,8 @@ mod tests { fn prove_read_and_proof_check_works() { prove_read_and_proof_check_works_inner(StorageProofKind::Full); prove_read_and_proof_check_works_inner(StorageProofKind::Flatten); - prove_read_and_proof_check_works_inner(StorageProofKind::FullCompact); + prove_read_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); + prove_read_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); } fn prove_read_and_proof_check_works_inner(kind: StorageProofKind) { diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0b43a670441e9..dabc4e945198a 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,7 +23,8 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProofKind, StorageProof, + record_all_keys, StorageProofKind, StorageProof, AdditionalInfoForProcessingKind, + AdditionalInfoForProcessing, }; pub use sp_trie::{Recorder, ChildrenProofMap}; pub use sp_trie::trie_types::{Layout, TrieError}; @@ -157,11 +158,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> where H::Out: Codec { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend, flatten: bool) -> Self { - let proof_recorder = if flatten { - ProofRecorder::Flat(Default::default()) - } else { + pub fn new(backend: &'a TrieBackend, full: bool) -> Self { + let proof_recorder = if full { ProofRecorder::Full(Default::default()) + } else { + ProofRecorder::Flat(Default::default()) }; Self::new_with_recorder(backend, proof_recorder) } @@ -177,7 +178,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; - // TODO registering root can be disabled in most case: + // TODO EMCH registering root can be disabled in most case: // would simply need target proof as parameter (same thing for new // function). ProvingBackend(TrieBackend::new_with_roots(recorder, root)) @@ -185,11 +186,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Extracting the gathered unordered proof. pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { - // TODO we actually check a given type of compaction. - let roots = if kind.is_compact() { - self.0.extract_registered_roots() - } else { - None + let roots = match kind.need_additional_info_to_produce() { + Some(AdditionalInfoForProcessingKind::ChildTrieRoots) => { + self.0.extract_registered_roots() + }, + _ => None, }; self.0.essence().backend_storage().proof_recorder.extract_proof(kind, roots) } @@ -203,7 +204,7 @@ impl ProofRecorder pub fn extract_proof( &self, kind: &StorageProofKind, - registered_roots: Option>>, + additional_infos: Option, ) -> Result { // TODO EMCH logic should be in sp_trie Ok(match self { @@ -215,12 +216,13 @@ impl ProofRecorder .collect(); match kind { StorageProofKind::Flatten => StorageProof::Flatten(trie_nodes), -// TODO flatten compact for a given set of keys work StorageProofKind::FlattenCompact => StorageProof::Flatten(trie_nodes), _ => return Err("Invalid proof kind for a flat proof record".to_string()), } }, ProofRecorder::Full(rec) => { let mut children = ChildrenProofMap::default(); + // TODO EMCH logic should be in sp_trie and not build the + // intermediate full proof (especially for flattened one). for (child_info, set) in rec.read().iter() { let trie_nodes: Vec> = set .iter() @@ -229,17 +231,15 @@ impl ProofRecorder children.insert(child_info.proof_info(), trie_nodes); } let unpacked_full = StorageProof::Full(children); + match kind { StorageProofKind::Flatten => unpacked_full.flatten(), StorageProofKind::Full => unpacked_full, - StorageProofKind::FullCompact => { - if let Some(roots) = registered_roots { - unpacked_full.pack::(&roots) - .map_err(|e| format!("{}", e))? - } else { - return Err("Cannot compact without roots".to_string()); - } - }, + StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::TrieSkipHashesFull => unpacked_full.pack::(&additional_infos) + .map_err(|e| format!("{}", e))?, + StorageProofKind::TrieSkipHashes => unpacked_full.pack::(&additional_infos) + .map_err(|e| format!("{}", e))?.flatten(), // TODO EMCH I need to assert it is actually flatten (debug it), got strange non failing test on proof_recorded_and_checked_with_child (when was failing of flatten version } }, }) @@ -433,31 +433,33 @@ mod tests { fn test_proving<'a>( trie_backend: &'a TrieBackend, BlakeTwo256>, - flat: bool, + full: bool, ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { - ProvingBackend::new(trie_backend, flat) + ProvingBackend::new(trie_backend, full) } #[test] fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - assert!(test_proving(&trie_backend, kind.is_flatten()).extract_proof(&kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); let kind = StorageProofKind::Full; - assert!(test_proving(&trie_backend, kind.is_flatten()).extract_proof(&kind).unwrap().is_empty()); - let kind = StorageProofKind::FullCompact; - assert!(test_proving(&trie_backend, kind.is_flatten()).extract_proof(&kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); + let kind = StorageProofKind::TrieSkipHashesFull; + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); + let kind = StorageProofKind::TrieSkipHashes; + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - let backend = test_proving(&trie_backend, kind.is_flatten()); + let backend = test_proving(&trie_backend, kind.need_register_full()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof(&kind).unwrap().is_empty()); let kind = StorageProofKind::Full; - let backend = test_proving(&trie_backend, kind.is_flatten()); + let backend = test_proving(&trie_backend, kind.need_register_full()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof(&kind).unwrap().is_empty()); } @@ -503,7 +505,7 @@ mod tests { (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); let test = |kind: StorageProofKind| { - let proving = ProvingBackend::new(trie, kind.is_flatten()); + let proving = ProvingBackend::new(trie, kind.need_register_full()); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof(&kind).unwrap(); @@ -513,7 +515,8 @@ mod tests { }; test(StorageProofKind::Flatten); test(StorageProofKind::Full); - test(StorageProofKind::FullCompact); + test(StorageProofKind::TrieSkipHashesFull); + test(StorageProofKind::TrieSkipHashes); } #[test] @@ -557,8 +560,8 @@ mod tests { )); let test = |kind: StorageProofKind| { - let flat = kind.is_flatten(); - let proving = ProvingBackend::new(trie, flat); + let full = kind.need_register_full(); + let proving = ProvingBackend::new(trie, full); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof(&kind).unwrap(); @@ -573,12 +576,12 @@ mod tests { assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); - let proving = ProvingBackend::new(trie, flat); + let proving = ProvingBackend::new(trie, full); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(&kind).unwrap(); - if flat { - let proof_check = create_flat_proof_check_backend::( + if kind.need_check_full().unwrap() { + let proof_check = create_proof_check_backend::( in_memory_root.into(), proof ).unwrap(); @@ -588,7 +591,7 @@ mod tests { vec![64] ); } else { - let proof_check = create_proof_check_backend::( + let proof_check = create_flat_proof_check_backend::( in_memory_root.into(), proof ).unwrap(); @@ -601,6 +604,7 @@ mod tests { }; test(StorageProofKind::Flatten); test(StorageProofKind::Full); - test(StorageProofKind::FullCompact); + test(StorageProofKind::TrieSkipHashesFull); + test(StorageProofKind::TrieSkipHashes); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index ddef355883ae6..cf8c293687c9b 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,7 +19,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, - ChildrenProofMap}; + ChildrenProofMap, AdditionalInfoForProcessing}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; @@ -54,7 +54,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } /// Get registered roots - pub fn extract_registered_roots(&self) -> Option>> { + pub fn extract_registered_roots(&self) -> Option { if let Some(register_roots) = self.essence.register_roots.as_ref() { let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); @@ -64,7 +64,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec dest.insert(child_info.proof_info(), root.encode()); } } - Some(dest) + Some(AdditionalInfoForProcessing::ChildTrieRoots(dest)) } else { None } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index ac08bdbad8920..8f4db1d1088c3 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -38,7 +38,7 @@ pub use trie_stream::TrieStream; pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, create_proof_check_backend_storage, LegacyStorageProof, create_flat_proof_check_backend_storage, ChildrenProofMap, - StorageProofKind}; + StorageProofKind, AdditionalInfoForProcessing, AdditionalInfoForProcessingKind}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 9b86b15d4e3a7..c02709794f5b2 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -114,6 +114,9 @@ pub enum AdditionalInfoForProcessingKind { /// `AdditionalInfoForProcessing::ChildTrieRoots` kind. ChildTrieRoots, + /// `AdditionalInfoForProcessing::QueryPlanWithValues` kind. + QueryPlanNoValues, + /// `AdditionalInfoForProcessing::QueryPlanWithValues` kind. QueryPlanWithValues, } @@ -123,7 +126,7 @@ impl StorageProofKind { /// encoded nodes. pub fn need_additional_info_to_produce(&self) -> Option { match self { - StorageProofKind::KnownQueryPlanAndValues => Some(AdditionalInfoForProcessingKind::QueryPlanWithValues), + StorageProofKind::KnownQueryPlanAndValues => Some(AdditionalInfoForProcessingKind::QueryPlanNoValues), StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => Some(AdditionalInfoForProcessingKind::ChildTrieRoots), StorageProofKind::Full @@ -153,7 +156,7 @@ impl StorageProofKind { } } - /// Indicate if we need to record proof with splitted child trie information + /// Indicates if we need to record proof with splitted child trie information /// or can simply record on a single collection. pub fn need_register_full(&self) -> bool { match self { @@ -164,6 +167,18 @@ impl StorageProofKind { | StorageProofKind::TrieSkipHashesFull => true, } } + + /// Indicates if we can execute proof over a backend, + /// and if so, if the backend need to be full. + pub fn need_check_full(&self) -> Option { + match self { + StorageProofKind::Flatten + | StorageProofKind::TrieSkipHashes => Some(false), + StorageProofKind::Full + | StorageProofKind::TrieSkipHashesFull => Some(true), + StorageProofKind::KnownQueryPlanAndValues => None, + } + } } /// A collection on encoded trie nodes. @@ -277,6 +292,7 @@ impl<'a> Encode for LegacyEncodeAdapter<'a> { } } +#[cfg_attr(test, derive(Debug, PartialEq, Eq))] /// Decode variant of `LegacyEncodeAdapter`. pub struct LegacyDecodeAdapter(pub StorageProof); @@ -735,7 +751,7 @@ fn legacy_proof_codec() { let proof = StorageProof::Flatten(content.clone()); let encoded_proof = proof.encode(); - assert_eq!(Decode::decode(&mut &encoded_proof[..]).unwrap(), proof); + assert_eq!(StorageProof::decode(&mut &encoded_proof[..]).unwrap(), proof); // test encoded minus first bytes equal to storage proof assert_eq!(&encoded_legacy[..], &encoded_proof[1..]); @@ -744,6 +760,6 @@ fn legacy_proof_codec() { assert_eq!(encoded_adapter[0], 0); assert_eq!(&encoded_adapter[1..], &encoded_proof[..]); let adapter_proof = LegacyDecodeAdapter(proof); - assert_eq!(Decode::decode(&mut &encoded_legacy[..]).unwrap(), adapter_proof); - assert_eq!(Decode::decode(&mut &encoded_adapter[..]).unwrap(), adapter_proof); + assert_eq!(LegacyDecodeAdapter::decode(&mut &encoded_legacy[..]).unwrap(), adapter_proof); + assert_eq!(LegacyDecodeAdapter::decode(&mut &encoded_adapter[..]).unwrap(), adapter_proof); } From 60c136deb618a4dffddbbc771676ba43b40cb8d6 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 20 Apr 2020 11:59:59 +0200 Subject: [PATCH 103/185] renaming refactor --- client/src/light/fetcher.rs | 3 +- .../state-machine/src/proving_backend.rs | 4 +- primitives/trie/src/lib.rs | 6 +- primitives/trie/src/storage_proof.rs | 270 +++++++++++------- 4 files changed, 174 insertions(+), 109 deletions(-) diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index f0a46610dd94b..614292ece6cbd 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -33,7 +33,6 @@ use sp_state_machine::{ InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, read_child_proof_check, CloneableSpawn, }; -use sp_trie::create_flat_proof_check_backend_storage; pub use sp_state_machine::StorageProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -158,7 +157,7 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = create_flat_proof_check_backend_storage(remote_roots_proof) + let storage = remote_roots_proof.as_partial_flat_db::() .map_err(|e| format!("{}", e))?; // remote_roots.keys() are sorted => we can use this to group changes tries roots diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index dabc4e945198a..6b6076a4723f2 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -392,7 +392,7 @@ where H: Hasher, H::Out: Codec, { - let db = sp_trie::create_flat_proof_check_backend_storage(proof) + let db = proof.as_partial_flat_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new_with_roots(db, root)) @@ -411,7 +411,7 @@ where H::Out: Codec, { use std::ops::Deref; - let db = sp_trie::create_proof_check_backend_storage(proof) + let db = proof.as_partial_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.deref().get(&ChildInfoProof::top_trie()) .map(|db| db.contains(&root, EMPTY_PREFIX)) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8f4db1d1088c3..9dc875666d48b 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,9 +36,9 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, create_proof_check_backend_storage, - LegacyStorageProof, create_flat_proof_check_backend_storage, ChildrenProofMap, - StorageProofKind, AdditionalInfoForProcessing, AdditionalInfoForProcessingKind}; +pub use storage_proof::{StorageProof, LegacyStorageProof, ChildrenProofMap, + StorageProofKind, AdditionalInfoForProcessing, AdditionalInfoForProcessingKind, + AdditionalInfoFromProcessing}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index c02709794f5b2..3fe875befeaf3 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -17,6 +17,7 @@ use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; +use sp_std::convert::TryInto; use codec::{Codec, Encode, Decode, Input, Output}; use hash_db::{Hasher, HashDB, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; @@ -59,10 +60,15 @@ pub enum StorageProofKind { /// Testing only indices /// Kind for `StorageProof::Full`. - Full = 126, + Full = 125, /// Kind for `StorageProof::TrieSkipHashesFull`. - TrieSkipHashesFull = 127, + TrieSkipHashesFull = 126, + +// /// Kind for `StorageProof::KnownQueryPlan`. +// TODO + add comment test only because I do not know +// a case where it is better than trieskiphashes usage. +// KnownQueryPlan = 127, } impl StorageProofKind { @@ -100,6 +106,7 @@ impl StorageProofKind { /// Additional information needed for packing or unpacking. /// These do not need to be part of the proof but are required /// when using the proof. +/// TODO change to input & add None variant pub enum AdditionalInfoForProcessing { /// Contains trie roots used during proof processing. ChildTrieRoots(ChildrenProofMap>), @@ -110,6 +117,7 @@ pub enum AdditionalInfoForProcessing { } /// Kind for designing an `AdditionalInfoForProcessing` variant. +/// TODO change to output pub enum AdditionalInfoForProcessingKind { /// `AdditionalInfoForProcessing::ChildTrieRoots` kind. ChildTrieRoots, @@ -121,6 +129,18 @@ pub enum AdditionalInfoForProcessingKind { QueryPlanWithValues, } +/// Content produced on proof verification. +pub enum AdditionalInfoFromProcessing { + /// Proof only verify to success or failure. + None, +} + +/// Kind for designing an `AdditionalInfoFromProcessing` variant. +pub enum AdditionalInfoFromProcessingKind { + /// `AdditionalInfoFromProcessing::None` kind. + None, +} + impl StorageProofKind { /// Some proof variants requires more than just the collected /// encoded nodes. @@ -145,6 +165,15 @@ impl StorageProofKind { } } + /// Some proof variants requires more than just the collected + /// encoded nodes. + pub fn produce_additional_info(&self) -> AdditionalInfoFromProcessingKind { + match self { +// StorageProofKind::KnownQueryPlan => unimplemented!(),//TODO + _ => AdditionalInfoFromProcessingKind::None, + } + } + /// Some proof can get unpack into another proof representation. pub fn can_unpack(&self) -> bool { match self { @@ -179,6 +208,34 @@ impl StorageProofKind { StorageProofKind::KnownQueryPlanAndValues => None, } } + + /// Proof that should be use with `verify` method. + pub fn can_use_verify(&self) -> bool { + match self { +// StorageProofKind::KnownQueryPlan => true, + StorageProofKind::KnownQueryPlanAndValues => true, + _ => false, + } + } + + /// Can be use as a db backend for proof check and + /// result fetch. + /// If false `StorageProof` `as_partial_db` method + /// failure is related to an unsupported capability. + pub fn can_use_as_partial_db(&self) -> bool { + match self { + StorageProofKind::KnownQueryPlanAndValues => false, + _ => true, + } + } + + /// Can be use as a db backend without child trie + /// distinction. + /// If false `StorageProof` `as_partial_flat_db` method + /// failure is related to an unsupported capability. + pub fn can_use_as_flat_partial_db(&self) -> bool { + self.can_use_as_partial_db() + } } /// A collection on encoded trie nodes. @@ -388,6 +445,7 @@ impl StorageProof { /// This unpacks `TrieSkipHashesFull` to `Full` or do nothing. /// TODO EMCH document and use case for with_roots to true?? (probably unpack -> merge -> pack /// but no code for it here) + /// TODO consider making this private!! (pack to) pub fn unpack( self, with_roots: bool, @@ -429,9 +487,9 @@ impl StorageProof { } } - /// This run proof validation when the proof only expect - /// validation. - pub fn validate( + /// This run proof validation when the proof allows immediate + /// verification (`StorageProofKind::can_use_verify`). + pub fn verify( self, _additional_content: &Option, ) -> Result, H> @@ -439,7 +497,7 @@ impl StorageProof { { unimplemented!("TODO run the validation of the query plan one") } - + /// This packs when possible. pub fn pack( self, @@ -577,6 +635,99 @@ impl StorageProof { StorageProof::TrieSkipHashesFull(_) => StorageProofKind::TrieSkipHashesFull, } } + + /// Create in-memory storage of proof check backend. + /// Currently child trie are all with same backend + /// implementation, therefore using + /// `as_partial_flat_db` is prefered. + pub fn as_partial_db(self) -> Result>, H> + where + H: Hasher, + { + let mut result = ChildrenProofMap::default(); + match self { + s@StorageProof::Flatten(..) => { + let db = s.as_partial_flat_db::()?; + result.insert(ChildInfoProof::top_trie(), db); + }, + StorageProof::Full(children) => { + for (child_info, proof) in children.into_iter() { + let mut db = MemoryDB::default(); + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + result.insert(child_info, db); + } + }, + StorageProof::TrieSkipHashesFull(children) => { + for (child_info, proof) in children.into_iter() { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + result.insert(child_info, db); + } + }, + s@StorageProof::TrieSkipHashes(..) => { + let db = s.as_partial_flat_db::()?; + result.insert(ChildInfoProof::top_trie(), db); + }, + StorageProof::KnownQueryPlanAndValues(_children) => { + return Err(impossible_backend_build::()); + }, + } + Ok(result) + } + + /// Create in-memory storage of proof check backend. + pub fn as_partial_flat_db(self) -> Result, H> + where + H: Hasher, + { + let mut db = MemoryDB::default(); + let mut db_empty = true; + match self { + s@StorageProof::Flatten(..) => { + for item in s.iter_nodes_flatten() { + db.insert(EMPTY_PREFIX, &item[..]); + } + }, + StorageProof::Full(children) => { + for (_child_info, proof) in children.into_iter() { + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + } + }, + StorageProof::TrieSkipHashesFull(children) => { + for (_child_info, proof) in children.into_iter() { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + }, + StorageProof::TrieSkipHashes(children) => { + for proof in children.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + }, + StorageProof::KnownQueryPlanAndValues(_children) => { + return Err(impossible_backend_build::()); + }, + } + Ok(db) + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to @@ -606,105 +757,20 @@ impl Iterator for StorageProofNodeIterator { } } -// TODO EMCH use tryfrom instead of those two create. - -/// Create in-memory storage of proof check backend. -/// Currently child trie are all with same backend -/// implementation, therefore using -/// `create_flat_proof_check_backend_storage` is prefered. -/// TODO flat proof check is enough for now, do we want to -/// maintain the full variant? -pub fn create_proof_check_backend_storage( - proof: StorageProof, -) -> Result>, H> -where - H: Hasher, -{ - let mut result = ChildrenProofMap::default(); - match proof { - s@StorageProof::Flatten(..) => { - let db = create_flat_proof_check_backend_storage::(s)?; - result.insert(ChildInfoProof::top_trie(), db); - }, - StorageProof::Full(children) => { - for (child_info, proof) in children.into_iter() { - let mut db = MemoryDB::default(); - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - result.insert(child_info, db); - } - }, - StorageProof::TrieSkipHashesFull(children) => { - for (child_info, proof) in children.into_iter() { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - result.insert(child_info, db); - } - }, - s@StorageProof::TrieSkipHashes(..) => { - let db = create_flat_proof_check_backend_storage::(s)?; - result.insert(ChildInfoProof::top_trie(), db); - }, - StorageProof::KnownQueryPlanAndValues(_children) => { - return Err(impossible_backend_build::()); - }, - } - Ok(result) +impl TryInto> for StorageProof { + type Error = sp_std::boxed::Box>>; + + fn try_into(self) -> Result, H> { + self.as_partial_flat_db() + } } -/// Create in-memory storage of proof check backend. -pub fn create_flat_proof_check_backend_storage( - proof: StorageProof, -) -> Result, H> -where - H: Hasher, -{ - let mut db = MemoryDB::default(); - let mut db_empty = true; - match proof { - s@StorageProof::Flatten(..) => { - for item in s.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item); - } - }, - StorageProof::Full(children) => { - for (_child_info, proof) in children.into_iter() { - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - } - }, - StorageProof::TrieSkipHashesFull(children) => { - for (_child_info, proof) in children.into_iter() { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - if db_empty { - db_empty = false; - db = child_db; - } else { - db.consolidate(child_db); - } - } - }, - StorageProof::TrieSkipHashes(children) => { - for proof in children.into_iter() { - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - if db_empty { - db_empty = false; - db = child_db; - } else { - db.consolidate(child_db); - } - } - }, - StorageProof::KnownQueryPlanAndValues(_children) => { - return Err(impossible_backend_build::()); - }, +impl TryInto>> for StorageProof { + type Error = sp_std::boxed::Box>>; + + fn try_into(self) -> Result>, H> { + self.as_partial_db() } - Ok(db) } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] From e4c628973b456b3f11114d1c26b5eed4e2c6db58 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 20 Apr 2020 15:17:34 +0200 Subject: [PATCH 104/185] refact and fix flatten not use. --- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 1 + primitives/state-machine/src/lib.rs | 28 +-- .../state-machine/src/proving_backend.rs | 20 +-- primitives/state-machine/src/trie_backend.rs | 8 +- primitives/trie/src/lib.rs | 4 +- primitives/trie/src/storage_proof.rs | 166 +++++++++++------- 7 files changed, 138 insertions(+), 91 deletions(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index bc9813bf2094b..d4d07107a5beb 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -293,7 +293,7 @@ fn generate_runtime_api_base_structures() -> Result { .and_then(|(recorder, kind)| { // TODO EMCH this will fail for compact as we need the register // root - recorder.extract_proof(&kind, None).ok() + recorder.extract_proof(&kind, #crate_::ProofInput::None).ok() }) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index b3dc97d716189..c92f368b9f3ac 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -37,6 +37,7 @@ extern crate self as sp_api; #[cfg(feature = "std")] pub use sp_state_machine::{ OverlayedChanges, StorageProof, StorageProofKind, Backend as StateBackend, ChangesTrieState, InMemoryBackend, + ProofInput, }; #[doc(hidden)] #[cfg(feature = "std")] diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index a6b1384143811..dc2d2df3076fa 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -43,7 +43,7 @@ mod trie_backend_essence; mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - StorageProof, StorageProofKind, ChildrenProofMap, AdditionalInfoForProcessingKind}; + StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; @@ -547,7 +547,7 @@ where H::Out: Ord + 'static + codec::Codec, N: crate::changes_trie::BlockNumber, { - match proof.kind().need_check_full() { + match proof.kind().use_full_partial_db() { Some(true) => { let trie_backend = create_proof_check_backend::(root.into(), proof)?; execution_proof_check_on_trie_backend::<_, N, _>( @@ -710,15 +710,15 @@ where } let proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; - let infos = match kind.need_additional_info_to_produce() { - Some(AdditionalInfoForProcessingKind::ChildTrieRoots) => { + let infos = match kind.processing_input_kind() { + ProofInputKind::ChildTrieRoots => { trie_backend.extract_registered_roots() }, - Some(AdditionalInfoForProcessingKind::QueryPlanNoValues) => { + ProofInputKind::QueryPlanNoValues => { unimplemented!("TODO from keys, do not care about memory copy at first") }, - Some(_) => return Err(Box::new("Cannot produce required info for proof")), - None => None, + ProofInputKind::None => ProofInput::None, + _ => return Err(Box::new("Cannot produce required info for proof")), }; Ok(proof.pack::(&infos) .map_err(|e| Box::new(format!("{}", e)) as Box)?) @@ -746,15 +746,15 @@ where } let proof = proving_backend.extract_proof(&kind) .map_err(|e| Box::new(e) as Box)?; - let infos = match kind.need_additional_info_to_produce() { - Some(AdditionalInfoForProcessingKind::ChildTrieRoots) => { + let infos = match kind.processing_input_kind() { + ProofInputKind::ChildTrieRoots => { trie_backend.extract_registered_roots() }, - Some(AdditionalInfoForProcessingKind::QueryPlanNoValues) => { + ProofInputKind::QueryPlanNoValues => { unimplemented!("TODO from keys, do not care about memory copy at first, warn to include child root fetch") }, - Some(_) => return Err(Box::new("Cannot produce required info for proof")), - None => None, + ProofInputKind::None => ProofInput::None, + _ => return Err(Box::new("Cannot produce required info for proof")), }; Ok(proof.pack::(&infos) .map_err(|e| Box::new(format!("{}", e)) as Box)?) @@ -775,7 +775,7 @@ where I::Item: AsRef<[u8]>, { let mut result = HashMap::new(); - match proof.kind().need_check_full() { + match proof.kind().use_full_partial_db() { Some(true) => { let proving_backend = create_proof_check_backend::(root, proof)?; for key in keys.into_iter() { @@ -811,7 +811,7 @@ where I::Item: AsRef<[u8]>, { let mut result = HashMap::new(); - match proof.kind().need_check_full() { + match proof.kind().use_full_partial_db() { Some(true) => { let proving_backend = create_proof_check_backend::(root, proof)?; for key in keys.into_iter() { diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6b6076a4723f2..b1f3e59f19ac2 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,8 +23,7 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProofKind, StorageProof, AdditionalInfoForProcessingKind, - AdditionalInfoForProcessing, + record_all_keys, StorageProofKind, StorageProof, ProofInputKind, ProofInput, }; pub use sp_trie::{Recorder, ChildrenProofMap}; pub use sp_trie::trie_types::{Layout, TrieError}; @@ -186,11 +185,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Extracting the gathered unordered proof. pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { - let roots = match kind.need_additional_info_to_produce() { - Some(AdditionalInfoForProcessingKind::ChildTrieRoots) => { + let roots = match kind.processing_input_kind() { + ProofInputKind::ChildTrieRoots => { self.0.extract_registered_roots() }, - _ => None, + _ => ProofInput::None, }; self.0.essence().backend_storage().proof_recorder.extract_proof(kind, roots) } @@ -204,7 +203,7 @@ impl ProofRecorder pub fn extract_proof( &self, kind: &StorageProofKind, - additional_infos: Option, + input: ProofInput, ) -> Result { // TODO EMCH logic should be in sp_trie Ok(match self { @@ -236,10 +235,11 @@ impl ProofRecorder StorageProofKind::Flatten => unpacked_full.flatten(), StorageProofKind::Full => unpacked_full, StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::TrieSkipHashesFull => unpacked_full.pack::(&additional_infos) + | StorageProofKind::KnownQueryPlan + | StorageProofKind::TrieSkipHashesFull => unpacked_full.pack::(&input) .map_err(|e| format!("{}", e))?, - StorageProofKind::TrieSkipHashes => unpacked_full.pack::(&additional_infos) - .map_err(|e| format!("{}", e))?.flatten(), // TODO EMCH I need to assert it is actually flatten (debug it), got strange non failing test on proof_recorded_and_checked_with_child (when was failing of flatten version + StorageProofKind::TrieSkipHashes => unpacked_full.pack::(&input) + .map_err(|e| format!("{}", e))?.flatten(), } }, }) @@ -580,7 +580,7 @@ mod tests { assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(&kind).unwrap(); - if kind.need_check_full().unwrap() { + if kind.use_full_partial_db().unwrap() { let proof_check = create_proof_check_backend::( in_memory_root.into(), proof diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index cf8c293687c9b..fed5216195446 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,7 +19,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, - ChildrenProofMap, AdditionalInfoForProcessing}; + ChildrenProofMap, ProofInput}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; @@ -54,7 +54,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } /// Get registered roots - pub fn extract_registered_roots(&self) -> Option { + pub fn extract_registered_roots(&self) -> ProofInput { if let Some(register_roots) = self.essence.register_roots.as_ref() { let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); @@ -64,9 +64,9 @@ impl, H: Hasher> TrieBackend where H::Out: Codec dest.insert(child_info.proof_info(), root.encode()); } } - Some(AdditionalInfoForProcessing::ChildTrieRoots(dest)) + ProofInput::ChildTrieRoots(dest) } else { - None + ProofInput::None } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 9dc875666d48b..768b2bab00574 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -37,8 +37,8 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, LegacyStorageProof, ChildrenProofMap, - StorageProofKind, AdditionalInfoForProcessing, AdditionalInfoForProcessingKind, - AdditionalInfoFromProcessing}; + StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, Output as ProofOutput, + OutputKind as ProofOutputKind}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 3fe875befeaf3..d0456035b9b6e 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,7 +18,7 @@ use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; use sp_std::convert::TryInto; -use codec::{Codec, Encode, Decode, Input, Output}; +use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput}; use hash_db::{Hasher, HashDB, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; use sp_storage::{ChildInfoProof, ChildType}; @@ -65,10 +65,8 @@ pub enum StorageProofKind { /// Kind for `StorageProof::TrieSkipHashesFull`. TrieSkipHashesFull = 126, -// /// Kind for `StorageProof::KnownQueryPlan`. -// TODO + add comment test only because I do not know -// a case where it is better than trieskiphashes usage. -// KnownQueryPlan = 127, + /// Kind for `StorageProof::KnownQueryPlan`. + KnownQueryPlan = 127, } impl StorageProofKind { @@ -106,8 +104,10 @@ impl StorageProofKind { /// Additional information needed for packing or unpacking. /// These do not need to be part of the proof but are required /// when using the proof. -/// TODO change to input & add None variant -pub enum AdditionalInfoForProcessing { +pub enum Input { + /// Proof is self contained. + None, + /// Contains trie roots used during proof processing. ChildTrieRoots(ChildrenProofMap>), @@ -116,68 +116,79 @@ pub enum AdditionalInfoForProcessing { QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), } -/// Kind for designing an `AdditionalInfoForProcessing` variant. -/// TODO change to output -pub enum AdditionalInfoForProcessingKind { - /// `AdditionalInfoForProcessing::ChildTrieRoots` kind. +/// Kind for designing an `Input` variant. +pub enum InputKind { + /// `Input::None` kind. + None, + + /// `Input::ChildTrieRoots` kind. ChildTrieRoots, - /// `AdditionalInfoForProcessing::QueryPlanWithValues` kind. + /// `Input::QueryPlanWithValues` kind. QueryPlanNoValues, - /// `AdditionalInfoForProcessing::QueryPlanWithValues` kind. + /// `Input::QueryPlanWithValues` kind. QueryPlanWithValues, } /// Content produced on proof verification. -pub enum AdditionalInfoFromProcessing { +pub enum Output { /// Proof only verify to success or failure. None, + + /// Contains key and values queried during the proof processing. + QueryPlanWithValues(ChildrenProofMap, Option>)>>), } -/// Kind for designing an `AdditionalInfoFromProcessing` variant. -pub enum AdditionalInfoFromProcessingKind { - /// `AdditionalInfoFromProcessing::None` kind. +/// Kind for designing an `Output` variant. +pub enum OutputKind { + /// `Output::None` kind. None, + + /// `Output::QueryPlanWithValues` kind. + QueryPlanWithValues, } impl StorageProofKind { /// Some proof variants requires more than just the collected /// encoded nodes. - pub fn need_additional_info_to_produce(&self) -> Option { + pub fn processing_input_kind(&self) -> InputKind { match self { - StorageProofKind::KnownQueryPlanAndValues => Some(AdditionalInfoForProcessingKind::QueryPlanNoValues), + StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanNoValues, + | StorageProofKind::KnownQueryPlan => InputKind::QueryPlanNoValues, StorageProofKind::TrieSkipHashes - | StorageProofKind::TrieSkipHashesFull => Some(AdditionalInfoForProcessingKind::ChildTrieRoots), + | StorageProofKind::TrieSkipHashesFull => InputKind::ChildTrieRoots, StorageProofKind::Full - | StorageProofKind::Flatten => None, + | StorageProofKind::Flatten => InputKind::None, } } /// Same as `need_additional_info_to_produce` but for reading. - pub fn need_additional_info_to_read(&self) -> Option { + pub fn verify_input_kind(&self) -> InputKind { match self { - StorageProofKind::KnownQueryPlanAndValues => Some(AdditionalInfoForProcessingKind::QueryPlanWithValues), + StorageProofKind::KnownQueryPlan => InputKind::QueryPlanNoValues, + StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull | StorageProofKind::Full - | StorageProofKind::Flatten => None, + | StorageProofKind::Flatten => InputKind::None, } } /// Some proof variants requires more than just the collected /// encoded nodes. - pub fn produce_additional_info(&self) -> AdditionalInfoFromProcessingKind { + pub fn produce_additional_info(&self) -> OutputKind { match self { -// StorageProofKind::KnownQueryPlan => unimplemented!(),//TODO - _ => AdditionalInfoFromProcessingKind::None, + StorageProofKind::KnownQueryPlan => OutputKind::QueryPlanWithValues, + _ => OutputKind::None, } } /// Some proof can get unpack into another proof representation. pub fn can_unpack(&self) -> bool { match self { - StorageProofKind::KnownQueryPlanAndValues => false, + StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::KnownQueryPlan => false, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => true, StorageProofKind::Full @@ -191,29 +202,31 @@ impl StorageProofKind { match self { StorageProofKind::Flatten => false, StorageProofKind::Full + | StorageProofKind::KnownQueryPlan | StorageProofKind::KnownQueryPlanAndValues | StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => true, } } - /// Indicates if we can execute proof over a backend, + /// Indicates if we should execute proof over a backend, /// and if so, if the backend need to be full. - pub fn need_check_full(&self) -> Option { + pub fn use_full_partial_db(&self) -> Option { match self { StorageProofKind::Flatten | StorageProofKind::TrieSkipHashes => Some(false), StorageProofKind::Full | StorageProofKind::TrieSkipHashesFull => Some(true), - StorageProofKind::KnownQueryPlanAndValues => None, + StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::KnownQueryPlan => None, } } /// Proof that should be use with `verify` method. pub fn can_use_verify(&self) -> bool { match self { -// StorageProofKind::KnownQueryPlan => true, - StorageProofKind::KnownQueryPlanAndValues => true, + StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::KnownQueryPlan => true, _ => false, } } @@ -224,7 +237,8 @@ impl StorageProofKind { /// failure is related to an unsupported capability. pub fn can_use_as_partial_db(&self) -> bool { match self { - StorageProofKind::KnownQueryPlanAndValues => false, + StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::KnownQueryPlan => false, _ => true, } } @@ -279,6 +293,12 @@ pub enum StorageProof { // Following variants are only for testing, they still can be use but // decoding is not implemented. + /// This acts as `KnownQueryPlanAndValues` but without value. + /// Values are therefore store in the proof and can be retrieved + /// after succesfully checking the proof. + /// This is mainly provided for test purpose and extensibility. + KnownQueryPlan(ChildrenProofMap), + /// Fully described proof, it includes the child trie individual description and split its /// content by child trie. /// Currently Full variant is unused as all our child trie kind can share a same memory db @@ -307,7 +327,7 @@ impl LegacyStorageProof { } impl Decode for StorageProof { - fn decode(value: &mut I) -> CodecResult { + fn decode(value: &mut I) -> CodecResult { let kind = value.read_byte()?; Ok(match StorageProofKind::read_from_byte(kind) .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { @@ -315,6 +335,8 @@ impl Decode for StorageProof { StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Decode::decode(value)?), StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(Decode::decode(value)?), + StorageProofKind::KnownQueryPlan + => StorageProof::KnownQueryPlan(Decode::decode(value)?), StorageProofKind::Full => StorageProof::Full(Decode::decode(value)?), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(Decode::decode(value)?), @@ -323,12 +345,13 @@ impl Decode for StorageProof { } impl Encode for StorageProof { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { (self.kind() as u8).encode_to(dest); match self { StorageProof::Flatten(p) => p.encode_to(dest), StorageProof::TrieSkipHashes(p) => p.encode_to(dest), StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), + StorageProof::KnownQueryPlan(p) => p.encode_to(dest), StorageProof::Full(p) => p.encode_to(dest), StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), } @@ -343,7 +366,7 @@ impl Encode for StorageProof { pub struct LegacyEncodeAdapter<'a>(pub &'a StorageProof); impl<'a> Encode for LegacyEncodeAdapter<'a> { - fn encode_to(&self, dest: &mut T) { + fn encode_to(&self, dest: &mut T) { 0u8.encode_to(dest); self.0.encode_to(dest); } @@ -356,7 +379,7 @@ pub struct LegacyDecodeAdapter(pub StorageProof); /// Allow read ahead on input. pub struct InputRevertReadAhead<'a, I>(pub &'a mut &'a [u8], pub &'a mut I); -impl<'a, I: Input> Input for InputRevertReadAhead<'a, I> { +impl<'a, I: CodecInput> CodecInput for InputRevertReadAhead<'a, I> { fn remaining_len(&mut self) -> CodecResult> { Ok(self.1.remaining_len()?.map(|l| l + self.0.len())) } @@ -389,7 +412,7 @@ impl<'a, I: Input> Input for InputRevertReadAhead<'a, I> { } impl Decode for LegacyDecodeAdapter { - fn decode(value: &mut I) -> CodecResult { + fn decode(value: &mut I) -> CodecResult { let legacy = value.read_byte()?; Ok(if legacy == 0 { LegacyDecodeAdapter(Decode::decode(value)?) @@ -419,6 +442,7 @@ impl StorageProof { StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(ChildrenProofMap::default()), StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(ChildrenProofMap::default()), + StorageProofKind::KnownQueryPlan => StorageProof::KnownQueryPlan(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Default::default()), } } @@ -429,6 +453,7 @@ impl StorageProof { StorageProof::Flatten(data) => data.is_empty(), StorageProof::Full(data) => data.is_empty(), StorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), + StorageProof::KnownQueryPlan(data) => data.is_empty(), StorageProof::TrieSkipHashes(data) => data.is_empty(), StorageProof::TrieSkipHashesFull(data) => data.is_empty(), } @@ -491,7 +516,7 @@ impl StorageProof { /// verification (`StorageProofKind::can_use_verify`). pub fn verify( self, - _additional_content: &Option, + _additional_content: &Input, ) -> Result, H> where H::Out: Codec, { @@ -501,14 +526,14 @@ impl StorageProof { /// This packs when possible. pub fn pack( self, - additional_content: &Option, + additional_content: &Input, ) -> Result where H::Out: Codec, { Ok(match self { StorageProof::Full(children) => { match additional_content { - Some(AdditionalInfoForProcessing::ChildTrieRoots(roots)) => { + Input::ChildTrieRoots(roots) => { let mut result = ChildrenProofMap::default(); for (child_info, proof) in children { match child_info.child_type() { @@ -516,8 +541,6 @@ impl StorageProof { let root = roots.get(&child_info) .and_then(|r| Decode::decode(&mut &r[..]).ok()) .ok_or_else(|| missing_pack_input::())?; - // TODO EMCH pack directly from recorded memory db -> have a pack_proof returning - // directly memory db?? seems wrong?? let trie_nodes = crate::pack_proof::>(&root, &proof[..])?; result.insert(child_info.clone(), trie_nodes); } @@ -525,33 +548,47 @@ impl StorageProof { } StorageProof::TrieSkipHashesFull(result) }, - Some(AdditionalInfoForProcessing::QueryPlanWithValues(_plan)) => { + Input::QueryPlanWithValues(_plan) => { unimplemented!("TODO pack query plan mode") }, - None => StorageProof::Full(children), + Input::None => StorageProof::Full(children), } }, s => s, }) } - /// This flatten `Full` to `Flatten`. + /// This flatten some children expanded proof to their + /// non expanded counterpart when possible. /// Note that if for some reason child proof were not /// attached to the top trie, they will be lost. pub fn flatten(self) -> Self { - if let StorageProof::Full(children) = self { - let mut result = Vec::new(); - children.into_iter().for_each(|(child_info, proof)| { - match child_info.child_type() { - ChildType::ParentKeyId => { - // this can get merged with top, since it is proof we do not use prefix - result.extend(proof); + match self { + StorageProof::Full(children) => { + let mut result = Vec::new(); + children.into_iter().for_each(|(child_info, proof)| { + match child_info.child_type() { + ChildType::ParentKeyId => { + // this can get merged with top, since it is proof we do not use prefix + result.extend(proof); + } } - } - }); - StorageProof::Flatten(result) - } else { - self + }); + StorageProof::Flatten(result) + }, + StorageProof::TrieSkipHashesFull(children) => { + let mut result = Vec::new(); + children.into_iter().for_each(|(child_info, proof)| { + match child_info.child_type() { + ChildType::ParentKeyId => { + result.push(proof); + } + } + }); + + StorageProof::TrieSkipHashes(result) + }, + _ => self, } } @@ -582,7 +619,8 @@ impl StorageProof { &StorageProof::TrieSkipHashes(..) => { proof = proof.unpack::(false)?.0; }, - &StorageProof::KnownQueryPlanAndValues(..) => { + &StorageProof::KnownQueryPlanAndValues(..) + | &StorageProof::KnownQueryPlan(..) => { return Err(impossible_merge_for_proof::()); }, _ => (), @@ -592,6 +630,7 @@ impl StorageProof { StorageProof::TrieSkipHashesFull(..) | StorageProof::TrieSkipHashes(..) | StorageProof::KnownQueryPlanAndValues(..) + | StorageProof::KnownQueryPlan(..) => unreachable!("Unpacked or early return earlier"), StorageProof::Flatten(proof) => { if !do_flatten { @@ -631,6 +670,7 @@ impl StorageProof { StorageProof::Flatten(_) => StorageProofKind::Flatten, StorageProof::TrieSkipHashes(_) => StorageProofKind::TrieSkipHashes, StorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, + StorageProof::KnownQueryPlan(_) => StorageProofKind::KnownQueryPlan, StorageProof::Full(_) => StorageProofKind::Full, StorageProof::TrieSkipHashesFull(_) => StorageProofKind::TrieSkipHashesFull, } @@ -674,6 +714,9 @@ impl StorageProof { StorageProof::KnownQueryPlanAndValues(_children) => { return Err(impossible_backend_build::()); }, + StorageProof::KnownQueryPlan(_children) => { + return Err(impossible_backend_build::()); + }, } Ok(result) } @@ -725,6 +768,9 @@ impl StorageProof { StorageProof::KnownQueryPlanAndValues(_children) => { return Err(impossible_backend_build::()); }, + StorageProof::KnownQueryPlan(_children) => { + return Err(impossible_backend_build::()); + }, } Ok(db) } From de305ed2cfd1fe5766621e48724af4a3e13096b0 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 20 Apr 2020 17:44:49 +0200 Subject: [PATCH 105/185] move all pack to storage proof --- Cargo.lock | 1 + primitives/state-machine/src/lib.rs | 36 +--- .../state-machine/src/proving_backend.rs | 74 +++---- primitives/storage/src/lib.rs | 5 - primitives/trie/Cargo.toml | 1 + primitives/trie/src/lib.rs | 16 +- primitives/trie/src/storage_proof.rs | 204 +++++++++++++----- 7 files changed, 187 insertions(+), 150 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c797abdc33ce..4e62ac966023d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7729,6 +7729,7 @@ version = "2.0.0-alpha.6" dependencies = [ "criterion 0.2.11", "hash-db", + "hashbrown", "hex-literal", "memory-db", "parity-scale-codec", diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index dc2d2df3076fa..75267d77046a0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -518,14 +518,12 @@ where runtime_code, spawn_handle, ); - // TODO EMCH passing root in input is probably a dead end: registering them in overlay seems - // better!!!! let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( always_wasm(), None, )?; - let proof = proving_backend.extract_proof(&kind) + let proof = proving_backend.extract_proof(kind) .map_err(|e| Box::new(e) as Box)?; Ok((result.into_encoded(), proof)) } @@ -708,20 +706,8 @@ where .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - let proof = proving_backend.extract_proof(&kind) - .map_err(|e| Box::new(e) as Box)?; - let infos = match kind.processing_input_kind() { - ProofInputKind::ChildTrieRoots => { - trie_backend.extract_registered_roots() - }, - ProofInputKind::QueryPlanNoValues => { - unimplemented!("TODO from keys, do not care about memory copy at first") - }, - ProofInputKind::None => ProofInput::None, - _ => return Err(Box::new("Cannot produce required info for proof")), - }; - Ok(proof.pack::(&infos) - .map_err(|e| Box::new(format!("{}", e)) as Box)?) + Ok(proving_backend.extract_proof(kind) + .map_err(|e| Box::new(e) as Box)?) } /// Generate storage read proof on pre-created trie backend. @@ -744,20 +730,8 @@ where .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - let proof = proving_backend.extract_proof(&kind) - .map_err(|e| Box::new(e) as Box)?; - let infos = match kind.processing_input_kind() { - ProofInputKind::ChildTrieRoots => { - trie_backend.extract_registered_roots() - }, - ProofInputKind::QueryPlanNoValues => { - unimplemented!("TODO from keys, do not care about memory copy at first, warn to include child root fetch") - }, - ProofInputKind::None => ProofInput::None, - _ => return Err(Box::new("Cannot produce required info for proof")), - }; - Ok(proof.pack::(&infos) - .map_err(|e| Box::new(format!("{}", e)) as Box)?) + Ok(proving_backend.extract_proof(kind) + .map_err(|e| Box::new(e) as Box)?) } /// Check storage read proof, generated by `prove_read` call. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index b1f3e59f19ac2..62498d715a59c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -24,13 +24,13 @@ use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, StorageProofKind, StorageProof, ProofInputKind, ProofInput, + RecordMapTrieNodes, }; pub use sp_trie::{Recorder, ChildrenProofMap}; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, Backend}; -use std::collections::HashMap; use crate::DBValue; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildrenMap}; @@ -120,9 +120,9 @@ pub enum ProofRecorder { // root of each child is added to be able to pack. /// Proof keep a separation between child trie content, this is usually useless, /// but when we use proof compression we want this separation. - Full(Arc::Out, Option>>>>), + Full(Arc>>>), /// Single level of storage for all recoded nodes. - Flat(Arc::Out, Option>>>), + Flat(Arc>>), } impl Default for ProofRecorder { @@ -184,7 +184,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } /// Extracting the gathered unordered proof. - pub fn extract_proof(&self, kind: &StorageProofKind) -> Result { + pub fn extract_proof(&self, kind: StorageProofKind) -> Result { let roots = match kind.processing_input_kind() { ProofInputKind::ChildTrieRoots => { self.0.extract_registered_roots() @@ -202,46 +202,20 @@ impl ProofRecorder /// Extracting the gathered unordered proof. pub fn extract_proof( &self, - kind: &StorageProofKind, + kind: StorageProofKind, input: ProofInput, ) -> Result { - // TODO EMCH logic should be in sp_trie Ok(match self { - ProofRecorder::Flat(rec) => { - let trie_nodes = rec - .read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - match kind { - StorageProofKind::Flatten => StorageProof::Flatten(trie_nodes), - _ => return Err("Invalid proof kind for a flat proof record".to_string()), - } - }, - ProofRecorder::Full(rec) => { - let mut children = ChildrenProofMap::default(); - // TODO EMCH logic should be in sp_trie and not build the - // intermediate full proof (especially for flattened one). - for (child_info, set) in rec.read().iter() { - let trie_nodes: Vec> = set - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - children.insert(child_info.proof_info(), trie_nodes); - } - let unpacked_full = StorageProof::Full(children); - - match kind { - StorageProofKind::Flatten => unpacked_full.flatten(), - StorageProofKind::Full => unpacked_full, - StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::KnownQueryPlan - | StorageProofKind::TrieSkipHashesFull => unpacked_full.pack::(&input) - .map_err(|e| format!("{}", e))?, - StorageProofKind::TrieSkipHashes => unpacked_full.pack::(&input) - .map_err(|e| format!("{}", e))?.flatten(), - } - }, + ProofRecorder::Flat(rec) => StorageProof::extract_proof_from_flat( + &*rec.read(), + kind, + &input, + ).map_err(|e| format!("{}", e))?, + ProofRecorder::Full(rec) => StorageProof::extract_proof( + &*rec.read(), + kind, + &input, + ).map_err(|e| format!("{}", e))?, }) } } @@ -442,13 +416,13 @@ mod tests { fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::Full; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::TrieSkipHashesFull; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::TrieSkipHashes; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(&kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); } #[test] @@ -457,11 +431,11 @@ mod tests { let kind = StorageProofKind::Flatten; let backend = test_proving(&trie_backend, kind.need_register_full()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof(&kind).unwrap().is_empty()); + assert!(!backend.extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::Full; let backend = test_proving(&trie_backend, kind.need_register_full()); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof(&kind).unwrap().is_empty()); + assert!(!backend.extract_proof(kind).unwrap().is_empty()); } #[test] @@ -508,7 +482,7 @@ mod tests { let proving = ProvingBackend::new(trie, kind.need_register_full()); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(&kind).unwrap(); + let proof = proving.extract_proof(kind).unwrap(); let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); @@ -564,7 +538,7 @@ mod tests { let proving = ProvingBackend::new(trie, full); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(&kind).unwrap(); + let proof = proving.extract_proof(kind).unwrap(); let proof_check = create_proof_check_backend::( in_memory_root.into(), @@ -579,7 +553,7 @@ mod tests { let proving = ProvingBackend::new(trie, full); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - let proof = proving.extract_proof(&kind).unwrap(); + let proof = proving.extract_proof(kind).unwrap(); if kind.use_full_partial_db().unwrap() { let proof_check = create_proof_check_backend::( in_memory_root.into(), diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 388d27907abad..42d795676d769 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -420,13 +420,11 @@ impl ChildTrieParentKeyId { } } -#[cfg(feature = "std")] #[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. /// A few utilities methods are defined. pub struct ChildrenMap(pub BTreeMap); -#[cfg(feature = "std")] impl sp_std::ops::Deref for ChildrenMap { type Target = BTreeMap; @@ -435,21 +433,18 @@ impl sp_std::ops::Deref for ChildrenMap { } } -#[cfg(feature = "std")] impl sp_std::ops::DerefMut for ChildrenMap { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -#[cfg(feature = "std")] impl sp_std::default::Default for ChildrenMap { fn default() -> Self { ChildrenMap(BTreeMap::new()) } } -#[cfg(feature = "std")] impl IntoIterator for ChildrenMap { type Item = (ChildInfo, T); type IntoIter = sp_std::collections::btree_map::IntoIter; diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 93c1ed738037e..fa79b5761b063 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -25,6 +25,7 @@ trie-db = { version = "0.20.1", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.20.0", default-features = false } sp-core = { version = "2.0.0-alpha.6", default-features = false, path = "../core" } +hashbrown = { version = "0.6.3", default-features = false, features = [ "ahash" ] } [dev-dependencies] trie-bench = "0.21.0" diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 768b2bab00574..ccd01ba310cc4 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -38,7 +38,7 @@ pub use trie_stream::TrieStream; pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, LegacyStorageProof, ChildrenProofMap, StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, Output as ProofOutput, - OutputKind as ProofOutputKind}; + OutputKind as ProofOutputKind, RecordMapTrieNodes}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, @@ -318,14 +318,12 @@ pub fn record_all_keys( Ok(()) } -/// Pack proof. -fn pack_proof(root: &TrieHash, input: &[Vec]) - -> Result>, Box>> { - let mut memory_db = MemoryDB::<::Hash>::default(); - for i in input.as_ref() { - memory_db.insert(EMPTY_PREFIX, i.as_ref()); - } - let trie = TrieDB::::new(&memory_db, root)?; +/// Pack proof from memdb. +fn pack_proof_from_collected( + root: &TrieHash, + input: &dyn hash_db::HashDBRef, +) -> Result>, Box>> { + let trie = TrieDB::::new(input, root)?; trie_db::encode_compact(&trie) } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index d0456035b9b6e..7df27dff55b8f 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -19,11 +19,20 @@ use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; use sp_std::convert::TryInto; use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput}; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX}; +use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; -use sp_storage::{ChildInfoProof, ChildType}; +use sp_storage::{ChildInfoProof, ChildType, ChildrenMap}; use crate::TrieError; - +use trie_db::DBValue; +// we are not using std as this use in no_std is +// only allowed here because it is already use in +// no_std use of trie_db. +#[cfg(not(feature = "std"))] +use hashbrown::HashMap; + +#[cfg(feature = "std")] +use std::collections::HashMap; + type Result = sp_std::result::Result>>>; type CodecResult = sp_std::result::Result; @@ -114,6 +123,10 @@ pub enum Input { /// Contains trie roots used during proof processing. /// Contains key and values queried during the proof processing. QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), + + /// Contains trie roots used during proof processing. + /// Contains keys queried during the proof processing. + QueryPlan(ChildrenProofMap<(Vec, Vec>)>), } /// Kind for designing an `Input` variant. @@ -124,8 +137,8 @@ pub enum InputKind { /// `Input::ChildTrieRoots` kind. ChildTrieRoots, - /// `Input::QueryPlanWithValues` kind. - QueryPlanNoValues, + /// `Input::QueryPlan` kind. + QueryPlan, /// `Input::QueryPlanWithValues` kind. QueryPlanWithValues, @@ -154,8 +167,8 @@ impl StorageProofKind { /// encoded nodes. pub fn processing_input_kind(&self) -> InputKind { match self { - StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanNoValues, - | StorageProofKind::KnownQueryPlan => InputKind::QueryPlanNoValues, + StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlan, + | StorageProofKind::KnownQueryPlan => InputKind::QueryPlan, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => InputKind::ChildTrieRoots, StorageProofKind::Full @@ -166,7 +179,7 @@ impl StorageProofKind { /// Same as `need_additional_info_to_produce` but for reading. pub fn verify_input_kind(&self) -> InputKind { match self { - StorageProofKind::KnownQueryPlan => InputKind::QueryPlanNoValues, + StorageProofKind::KnownQueryPlan => InputKind::QueryPlan, StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull @@ -468,36 +481,24 @@ impl StorageProof { } /// This unpacks `TrieSkipHashesFull` to `Full` or do nothing. - /// TODO EMCH document and use case for with_roots to true?? (probably unpack -> merge -> pack - /// but no code for it here) - /// TODO consider making this private!! (pack to) - pub fn unpack( + fn unpack( self, - with_roots: bool, - ) -> Result<(Self, Option>>), H> + ) -> Result where H::Out: Codec, { - let mut roots = if with_roots { - Some(ChildrenProofMap::default()) - } else { - None - }; match self { StorageProof::TrieSkipHashesFull(children) => { let mut result = ChildrenProofMap::default(); for (child_info, proof) in children { match child_info.child_type() { ChildType::ParentKeyId => { - // Note that unpack does fill a memory db and on verification we will - // probalby switch this proof to a memory db to, so the function to produce - // the backend should not use this primitive. - let (root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; - roots.as_mut().map(|roots| roots.insert(child_info.clone(), root.encode())); + // Note that we could return roots from unpacking. + let (_root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; result.insert(child_info, unpacked_proof); } } } - Ok((StorageProof::Full(result), roots)) + Ok(StorageProof::Full(result)) }, StorageProof::TrieSkipHashes(children) => { let mut result = ProofNodes::default(); @@ -506,9 +507,9 @@ impl StorageProof { result.extend(unpacked_proof); } - Ok((StorageProof::Flatten(result), None)) + Ok(StorageProof::Flatten(result)) }, - s => Ok((s, None)), + s => Ok(s), } } @@ -523,38 +524,95 @@ impl StorageProof { unimplemented!("TODO run the validation of the query plan one") } - /// This packs when possible. - pub fn pack( - self, - additional_content: &Input, + /// This produce the proof from collected information. + pub fn extract_proof( + collected: &ChildrenMap>, + kind: StorageProofKind, + input: &Input, ) -> Result where H::Out: Codec, { - Ok(match self { - StorageProof::Full(children) => { - match additional_content { - Input::ChildTrieRoots(roots) => { - let mut result = ChildrenProofMap::default(); - for (child_info, proof) in children { - match child_info.child_type() { - ChildType::ParentKeyId => { - let root = roots.get(&child_info) - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input::())?; - let trie_nodes = crate::pack_proof::>(&root, &proof[..])?; - result.insert(child_info.clone(), trie_nodes); - } - } + Ok(match kind { + StorageProofKind::Flatten => { + let mut result = Vec::new(); + collected.iter().for_each(|(child_info, proof)| { + match child_info.child_type() { + ChildType::ParentKeyId => { + // this can get merged with top, we do not use key prefix + result.extend(proof.0.clone() + .drain() + .filter_map(|(_k, v)| v) + ); } - StorageProof::TrieSkipHashesFull(result) - }, - Input::QueryPlanWithValues(_plan) => { - unimplemented!("TODO pack query plan mode") - }, - Input::None => StorageProof::Full(children), + } + }); + StorageProof::Flatten(result) + }, + StorageProofKind::Full => { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in collected.iter() { + let trie_nodes: Vec> = set + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + result.insert(child_info.proof_info(), trie_nodes); } + StorageProof::Full(result) + }, + StorageProofKind::TrieSkipHashesFull => { + if let Input::ChildTrieRoots(roots) = input { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in collected.iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input::())?; + let trie_nodes = crate::pack_proof_from_collected::>(&root, set)?; + result.insert(child_info.proof_info(), trie_nodes); + } + StorageProof::TrieSkipHashesFull(result) + } else { + return Err(missing_pack_input::()); + } + }, + StorageProofKind::TrieSkipHashes => { + if let Input::ChildTrieRoots(roots) = input { + let mut result = Vec::default(); + for (child_info, set) in collected.iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input::())?; + let trie_nodes = crate::pack_proof_from_collected::>(&root, set)?; + result.push(trie_nodes); + } + StorageProof::TrieSkipHashes(result) + } else { + return Err(missing_pack_input::()); + } + }, + StorageProofKind::KnownQueryPlanAndValues + | StorageProofKind::KnownQueryPlan => { + unimplemented!("TODO pack query plan mode") + }, + }) + } + + /// This produce the proof from collected information on a flat backend. + pub fn extract_proof_from_flat( + collected: &RecordMapTrieNodes, + kind: StorageProofKind, + _input: &Input, + ) -> Result + where H::Out: Codec, + { + Ok(match kind { + StorageProofKind::Flatten => { + let trie_nodes = collected + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + StorageProof::Flatten(trie_nodes) }, - s => s, + _ => return Err(impossible_backend_build::()), }) } @@ -614,10 +672,10 @@ impl StorageProof { // unpack match &proof { &StorageProof::TrieSkipHashesFull(..) => { - proof = proof.unpack::(false)?.0; + proof = proof.unpack::()?; }, &StorageProof::TrieSkipHashes(..) => { - proof = proof.unpack::(false)?.0; + proof = proof.unpack::()?; }, &StorageProof::KnownQueryPlanAndValues(..) | &StorageProof::KnownQueryPlan(..) => { @@ -853,6 +911,42 @@ impl IntoIterator for ChildrenProofMap { } } + +/// Container recording trie nodes. +#[derive(Clone)] +pub struct RecordMapTrieNodes(HashMap>); + +impl sp_std::default::Default for RecordMapTrieNodes { + fn default() -> Self { + RecordMapTrieNodes(Default::default()) + } +} + + +impl sp_std::ops::Deref for RecordMapTrieNodes { + type Target = HashMap>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for RecordMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for RecordMapTrieNodes { + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + self.0.get(key).and_then(Clone::clone) + } + + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + self.0.get(key).map(Option::is_some).unwrap_or(false) + } +} + #[test] fn legacy_proof_codec() { // random content for proof, we test serialization From 7a9098b432521ef7dfe2679b472a1a0778bc44b3 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 20 Apr 2020 17:58:07 +0200 Subject: [PATCH 106/185] fix compile error, remove unused flatten method --- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- primitives/trie/src/storage_proof.rs | 41 ++----------------- 2 files changed, 4 insertions(+), 39 deletions(-) diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index d4d07107a5beb..915489282f01c 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -293,7 +293,7 @@ fn generate_runtime_api_base_structures() -> Result { .and_then(|(recorder, kind)| { // TODO EMCH this will fail for compact as we need the register // root - recorder.extract_proof(&kind, #crate_::ProofInput::None).ok() + recorder.extract_proof(kind, #crate_::ProofInput::None).ok() }) } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 7df27dff55b8f..7dbb398af4856 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -480,8 +480,7 @@ impl StorageProof { StorageProofNodeIterator::new(self) } - /// This unpacks `TrieSkipHashesFull` to `Full` or do nothing. - fn unpack( + fn trie_skip_unpack( self, ) -> Result where H::Out: Codec, @@ -616,40 +615,6 @@ impl StorageProof { }) } - /// This flatten some children expanded proof to their - /// non expanded counterpart when possible. - /// Note that if for some reason child proof were not - /// attached to the top trie, they will be lost. - pub fn flatten(self) -> Self { - match self { - StorageProof::Full(children) => { - let mut result = Vec::new(); - children.into_iter().for_each(|(child_info, proof)| { - match child_info.child_type() { - ChildType::ParentKeyId => { - // this can get merged with top, since it is proof we do not use prefix - result.extend(proof); - } - } - }); - StorageProof::Flatten(result) - }, - StorageProof::TrieSkipHashesFull(children) => { - let mut result = Vec::new(); - children.into_iter().for_each(|(child_info, proof)| { - match child_info.child_type() { - ChildType::ParentKeyId => { - result.push(proof); - } - } - }); - - StorageProof::TrieSkipHashes(result) - }, - _ => self, - } - } - /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. @@ -672,10 +637,10 @@ impl StorageProof { // unpack match &proof { &StorageProof::TrieSkipHashesFull(..) => { - proof = proof.unpack::()?; + proof = proof.trie_skip_unpack::()?; }, &StorageProof::TrieSkipHashes(..) => { - proof = proof.unpack::()?; + proof = proof.trie_skip_unpack::()?; }, &StorageProof::KnownQueryPlanAndValues(..) | &StorageProof::KnownQueryPlan(..) => { From 728daf4822f55f1fe2168885719c700c58468b43 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 20 Apr 2020 19:23:14 +0200 Subject: [PATCH 107/185] add calls to the proof, untested. --- primitives/trie/src/storage_proof.rs | 79 +++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 6 deletions(-) diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 7dbb398af4856..7463e1622f5e6 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -41,6 +41,16 @@ fn missing_pack_input() -> sp_std::boxed::Box>> { sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) } +fn missing_collected_input() -> sp_std::boxed::Box>> { + // TODO better error in trie db crate eg Packing error + sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +} + +fn missing_verify_input() -> sp_std::boxed::Box>> { + // TODO better error in trie db crate eg Packing error + sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +} + fn impossible_merge_for_proof() -> sp_std::boxed::Box>> { // TODO better error in trie db crate eg Packing error sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) @@ -516,11 +526,42 @@ impl StorageProof { /// verification (`StorageProofKind::can_use_verify`). pub fn verify( self, - _additional_content: &Input, - ) -> Result, H> + input: &Input, + ) -> Result, H> where H::Out: Codec, { - unimplemented!("TODO run the validation of the query plan one") + match self { + StorageProof::KnownQueryPlan(..) => { + unimplemented!("there is no such mode actually"); + }, + StorageProof::KnownQueryPlanAndValues(proof_children) => { + if let Input::QueryPlanWithValues(input_children) = input { + let mut root_hash = H::Out::default(); + for (child_info, nodes) in proof_children.iter() { + if let Some((root, input)) = input_children.get(child_info) { + // Layout h is the only supported one at the time being + if root.len() != root_hash.as_ref().len() { + return Ok(Some((false, Output::None))); + } + root_hash.as_mut().copy_from_slice(&root[..]); + if let Err(_) = trie_db::proof::verify_proof::, _, _, _>( + &root_hash, + &nodes[..], + input.iter(), + ) { + return Ok(Some((false, Output::None))); + } + } else { + return Err(missing_verify_input::()); + } + } + Ok(Some((true, Output::None))) + } else { + Err(missing_verify_input::()) + } + }, + _ => Ok(None), + } } /// This produce the proof from collected information. @@ -588,9 +629,35 @@ impl StorageProof { return Err(missing_pack_input::()); } }, - StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::KnownQueryPlan => { - unimplemented!("TODO pack query plan mode") + StorageProofKind::KnownQueryPlan => unimplemented!("Actually do not exists"), + StorageProofKind::KnownQueryPlanAndValues => { + if let Input::QueryPlan(input_children) = input { + let mut result = ChildrenProofMap::default(); + let mut count_input = input_children.len(); + let mut root_hash = H::Out::default(); + for (child_info, set) in collected.iter() { + let child_info_proof = child_info.proof_info(); + if let Some((root, keys)) = input_children.get(&child_info_proof) { + count_input -= 1; + // Layout h is the only supported one at the time being + if root.len() != root_hash.as_ref().len() { + return Err(missing_pack_input::()); + } + root_hash.as_mut().copy_from_slice(&root[..]); + let trie = >>::new(set, &root_hash)?; + let compacted = trie_db::proof::generate_proof(&trie, keys)?; + result.insert(child_info_proof, compacted); + } else { + return Err(missing_pack_input::()); + } + } + if count_input > 0 { + return Err(missing_collected_input::()); + } + StorageProof::KnownQueryPlanAndValues(result) + } else { + return Err(missing_pack_input::()); + } }, }) } From 8724b573263e364cbdc63c68a2dc58c15230bf54 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 20 Apr 2020 19:32:49 +0200 Subject: [PATCH 108/185] Removing proof output and queryplan without value (does not exist in fact). Added a tag on output as it can be interesting. --- primitives/trie/src/lib.rs | 4 +- primitives/trie/src/storage_proof.rs | 83 ++++------------------------ 2 files changed, 13 insertions(+), 74 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index ccd01ba310cc4..0b658fa7bb052 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -37,8 +37,8 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, LegacyStorageProof, ChildrenProofMap, - StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, Output as ProofOutput, - OutputKind as ProofOutputKind, RecordMapTrieNodes}; + StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, + RecordMapTrieNodes}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 7463e1622f5e6..cbf61e31ed2fc 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -79,13 +79,10 @@ pub enum StorageProofKind { /// Testing only indices /// Kind for `StorageProof::Full`. - Full = 125, + Full = 126, /// Kind for `StorageProof::TrieSkipHashesFull`. - TrieSkipHashesFull = 126, - - /// Kind for `StorageProof::KnownQueryPlan`. - KnownQueryPlan = 127, + TrieSkipHashesFull = 127, } impl StorageProofKind { @@ -154,31 +151,12 @@ pub enum InputKind { QueryPlanWithValues, } -/// Content produced on proof verification. -pub enum Output { - /// Proof only verify to success or failure. - None, - - /// Contains key and values queried during the proof processing. - QueryPlanWithValues(ChildrenProofMap, Option>)>>), -} - -/// Kind for designing an `Output` variant. -pub enum OutputKind { - /// `Output::None` kind. - None, - - /// `Output::QueryPlanWithValues` kind. - QueryPlanWithValues, -} - impl StorageProofKind { /// Some proof variants requires more than just the collected /// encoded nodes. pub fn processing_input_kind(&self) -> InputKind { match self { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlan, - | StorageProofKind::KnownQueryPlan => InputKind::QueryPlan, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => InputKind::ChildTrieRoots, StorageProofKind::Full @@ -189,7 +167,6 @@ impl StorageProofKind { /// Same as `need_additional_info_to_produce` but for reading. pub fn verify_input_kind(&self) -> InputKind { match self { - StorageProofKind::KnownQueryPlan => InputKind::QueryPlan, StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull @@ -198,20 +175,10 @@ impl StorageProofKind { } } - /// Some proof variants requires more than just the collected - /// encoded nodes. - pub fn produce_additional_info(&self) -> OutputKind { - match self { - StorageProofKind::KnownQueryPlan => OutputKind::QueryPlanWithValues, - _ => OutputKind::None, - } - } - /// Some proof can get unpack into another proof representation. pub fn can_unpack(&self) -> bool { match self { - StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::KnownQueryPlan => false, + StorageProofKind::KnownQueryPlanAndValues => false, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => true, StorageProofKind::Full @@ -225,7 +192,6 @@ impl StorageProofKind { match self { StorageProofKind::Flatten => false, StorageProofKind::Full - | StorageProofKind::KnownQueryPlan | StorageProofKind::KnownQueryPlanAndValues | StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => true, @@ -240,16 +206,14 @@ impl StorageProofKind { | StorageProofKind::TrieSkipHashes => Some(false), StorageProofKind::Full | StorageProofKind::TrieSkipHashesFull => Some(true), - StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::KnownQueryPlan => None, + StorageProofKind::KnownQueryPlanAndValues => None, } } /// Proof that should be use with `verify` method. pub fn can_use_verify(&self) -> bool { match self { - StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::KnownQueryPlan => true, + StorageProofKind::KnownQueryPlanAndValues => true, _ => false, } } @@ -260,8 +224,7 @@ impl StorageProofKind { /// failure is related to an unsupported capability. pub fn can_use_as_partial_db(&self) -> bool { match self { - StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::KnownQueryPlan => false, + StorageProofKind::KnownQueryPlanAndValues => false, _ => true, } } @@ -316,12 +279,6 @@ pub enum StorageProof { // Following variants are only for testing, they still can be use but // decoding is not implemented. - /// This acts as `KnownQueryPlanAndValues` but without value. - /// Values are therefore store in the proof and can be retrieved - /// after succesfully checking the proof. - /// This is mainly provided for test purpose and extensibility. - KnownQueryPlan(ChildrenProofMap), - /// Fully described proof, it includes the child trie individual description and split its /// content by child trie. /// Currently Full variant is unused as all our child trie kind can share a same memory db @@ -358,8 +315,6 @@ impl Decode for StorageProof { StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Decode::decode(value)?), StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(Decode::decode(value)?), - StorageProofKind::KnownQueryPlan - => StorageProof::KnownQueryPlan(Decode::decode(value)?), StorageProofKind::Full => StorageProof::Full(Decode::decode(value)?), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(Decode::decode(value)?), @@ -374,7 +329,6 @@ impl Encode for StorageProof { StorageProof::Flatten(p) => p.encode_to(dest), StorageProof::TrieSkipHashes(p) => p.encode_to(dest), StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), - StorageProof::KnownQueryPlan(p) => p.encode_to(dest), StorageProof::Full(p) => p.encode_to(dest), StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), } @@ -465,7 +419,6 @@ impl StorageProof { StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(ChildrenProofMap::default()), StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(ChildrenProofMap::default()), - StorageProofKind::KnownQueryPlan => StorageProof::KnownQueryPlan(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Default::default()), } } @@ -476,7 +429,6 @@ impl StorageProof { StorageProof::Flatten(data) => data.is_empty(), StorageProof::Full(data) => data.is_empty(), StorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), - StorageProof::KnownQueryPlan(data) => data.is_empty(), StorageProof::TrieSkipHashes(data) => data.is_empty(), StorageProof::TrieSkipHashesFull(data) => data.is_empty(), } @@ -527,13 +479,10 @@ impl StorageProof { pub fn verify( self, input: &Input, - ) -> Result, H> + ) -> Result, H> where H::Out: Codec, { match self { - StorageProof::KnownQueryPlan(..) => { - unimplemented!("there is no such mode actually"); - }, StorageProof::KnownQueryPlanAndValues(proof_children) => { if let Input::QueryPlanWithValues(input_children) = input { let mut root_hash = H::Out::default(); @@ -541,7 +490,7 @@ impl StorageProof { if let Some((root, input)) = input_children.get(child_info) { // Layout h is the only supported one at the time being if root.len() != root_hash.as_ref().len() { - return Ok(Some((false, Output::None))); + return Ok(Some(false)); } root_hash.as_mut().copy_from_slice(&root[..]); if let Err(_) = trie_db::proof::verify_proof::, _, _, _>( @@ -549,13 +498,13 @@ impl StorageProof { &nodes[..], input.iter(), ) { - return Ok(Some((false, Output::None))); + return Ok(Some(false)); } } else { return Err(missing_verify_input::()); } } - Ok(Some((true, Output::None))) + Ok(Some(true)) } else { Err(missing_verify_input::()) } @@ -629,7 +578,6 @@ impl StorageProof { return Err(missing_pack_input::()); } }, - StorageProofKind::KnownQueryPlan => unimplemented!("Actually do not exists"), StorageProofKind::KnownQueryPlanAndValues => { if let Input::QueryPlan(input_children) = input { let mut result = ChildrenProofMap::default(); @@ -709,8 +657,7 @@ impl StorageProof { &StorageProof::TrieSkipHashes(..) => { proof = proof.trie_skip_unpack::()?; }, - &StorageProof::KnownQueryPlanAndValues(..) - | &StorageProof::KnownQueryPlan(..) => { + &StorageProof::KnownQueryPlanAndValues(..) => { return Err(impossible_merge_for_proof::()); }, _ => (), @@ -720,7 +667,6 @@ impl StorageProof { StorageProof::TrieSkipHashesFull(..) | StorageProof::TrieSkipHashes(..) | StorageProof::KnownQueryPlanAndValues(..) - | StorageProof::KnownQueryPlan(..) => unreachable!("Unpacked or early return earlier"), StorageProof::Flatten(proof) => { if !do_flatten { @@ -760,7 +706,6 @@ impl StorageProof { StorageProof::Flatten(_) => StorageProofKind::Flatten, StorageProof::TrieSkipHashes(_) => StorageProofKind::TrieSkipHashes, StorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, - StorageProof::KnownQueryPlan(_) => StorageProofKind::KnownQueryPlan, StorageProof::Full(_) => StorageProofKind::Full, StorageProof::TrieSkipHashesFull(_) => StorageProofKind::TrieSkipHashesFull, } @@ -804,9 +749,6 @@ impl StorageProof { StorageProof::KnownQueryPlanAndValues(_children) => { return Err(impossible_backend_build::()); }, - StorageProof::KnownQueryPlan(_children) => { - return Err(impossible_backend_build::()); - }, } Ok(result) } @@ -858,9 +800,6 @@ impl StorageProof { StorageProof::KnownQueryPlanAndValues(_children) => { return Err(impossible_backend_build::()); }, - StorageProof::KnownQueryPlan(_children) => { - return Err(impossible_backend_build::()); - }, } Ok(db) } From 4641cbd8e2947010de50e84780b8c124e1413b1c Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 10:38:07 +0200 Subject: [PATCH 109/185] more acceptable error management --- primitives/trie/src/storage_proof.rs | 127 +++++++++++++++++---------- 1 file changed, 81 insertions(+), 46 deletions(-) diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index cbf61e31ed2fc..905ef26f0dbd7 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -22,7 +22,6 @@ use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput}; use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; use sp_storage::{ChildInfoProof, ChildType, ChildrenMap}; -use crate::TrieError; use trie_db::DBValue; // we are not using std as this use in no_std is // only allowed here because it is already use in @@ -33,32 +32,73 @@ use hashbrown::HashMap; #[cfg(feature = "std")] use std::collections::HashMap; -type Result = sp_std::result::Result>>>; +type Result = sp_std::result::Result; type CodecResult = sp_std::result::Result; -fn missing_pack_input() -> sp_std::boxed::Box>> { - // TODO better error in trie db crate eg Packing error - sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +#[cfg(feature = "std")] +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + /// Error produce by storage proof logic. + /// It is formatted in std to simplify type. + Trie(String), + /// Error produce by trie manipulation. + Proof(&'static str), } -fn missing_collected_input() -> sp_std::boxed::Box>> { - // TODO better error in trie db crate eg Packing error - sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +#[cfg(not(feature = "std"))] +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + /// Error produce by storage proof logic. + Trie, + /// Error produce by trie manipulation. + Proof, } -fn missing_verify_input() -> sp_std::boxed::Box>> { - // TODO better error in trie db crate eg Packing error - sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +#[cfg(feature = "std")] +impl sp_std::convert::From> for Error { + fn from(e: sp_std::boxed::Box) -> Self { + // currently only trie error is build from box + Error::Trie(format!("{}", e)) + } } -fn impossible_merge_for_proof() -> sp_std::boxed::Box>> { - // TODO better error in trie db crate eg Packing error - sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +#[cfg(feature = "std")] +impl sp_std::fmt::Display for Error { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match self { + Error::Trie(msg) => write!(f, "Proof error trie: {}", msg), + Error::Proof(msg) => write!(f, "Proof error: {}", msg), + } + } } -fn impossible_backend_build() -> sp_std::boxed::Box>> { - // TODO better error in trie db crate eg Packing error - sp_std::boxed::Box::new(TrieError::>::IncompleteDatabase(Default::default())) +#[cfg(not(feature = "std"))] +impl sp_std::convert::From> for Error { + fn from(_e: sp_std::boxed::Box) -> Self { + Error::Trie + } +} + +#[cfg(feature = "std")] +const fn error(message: &'static str) -> Error { + Error::Proof(message) +} + +#[cfg(not(feature = "std"))] +const fn error(_message: &'static str) -> Error { + Error::Proof +} + +const fn missing_pack_input() -> Error { + error("Packing input missing for proof") +} + +const fn missing_verify_input() -> Error { + error("Input missing for proof verification") +} + +const fn no_partial_db_support() -> Error { + error("Partial db not supported for this proof") } /// Different kind of proof representation are allowed. @@ -444,7 +484,7 @@ impl StorageProof { fn trie_skip_unpack( self, - ) -> Result + ) -> Result where H::Out: Codec, { match self { @@ -479,7 +519,7 @@ impl StorageProof { pub fn verify( self, input: &Input, - ) -> Result, H> + ) -> Result> where H::Out: Codec, { match self { @@ -501,12 +541,12 @@ impl StorageProof { return Ok(Some(false)); } } else { - return Err(missing_verify_input::()); + return Err(missing_verify_input()); } } Ok(Some(true)) } else { - Err(missing_verify_input::()) + Err(missing_verify_input()) } }, _ => Ok(None), @@ -518,7 +558,7 @@ impl StorageProof { collected: &ChildrenMap>, kind: StorageProofKind, input: &Input, - ) -> Result + ) -> Result where H::Out: Codec, { Ok(match kind { @@ -554,13 +594,13 @@ impl StorageProof { for (child_info, set) in collected.iter() { let root = roots.get(&child_info.proof_info()) .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input::())?; + .ok_or_else(|| missing_pack_input())?; let trie_nodes = crate::pack_proof_from_collected::>(&root, set)?; result.insert(child_info.proof_info(), trie_nodes); } StorageProof::TrieSkipHashesFull(result) } else { - return Err(missing_pack_input::()); + return Err(missing_pack_input()); } }, StorageProofKind::TrieSkipHashes => { @@ -569,42 +609,37 @@ impl StorageProof { for (child_info, set) in collected.iter() { let root = roots.get(&child_info.proof_info()) .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input::())?; + .ok_or_else(|| missing_pack_input())?; let trie_nodes = crate::pack_proof_from_collected::>(&root, set)?; result.push(trie_nodes); } StorageProof::TrieSkipHashes(result) } else { - return Err(missing_pack_input::()); + return Err(missing_pack_input()); } }, StorageProofKind::KnownQueryPlanAndValues => { if let Input::QueryPlan(input_children) = input { let mut result = ChildrenProofMap::default(); - let mut count_input = input_children.len(); let mut root_hash = H::Out::default(); for (child_info, set) in collected.iter() { let child_info_proof = child_info.proof_info(); if let Some((root, keys)) = input_children.get(&child_info_proof) { - count_input -= 1; // Layout h is the only supported one at the time being if root.len() != root_hash.as_ref().len() { - return Err(missing_pack_input::()); + return Err(missing_pack_input()); } root_hash.as_mut().copy_from_slice(&root[..]); let trie = >>::new(set, &root_hash)?; let compacted = trie_db::proof::generate_proof(&trie, keys)?; result.insert(child_info_proof, compacted); } else { - return Err(missing_pack_input::()); + return Err(missing_pack_input()); } } - if count_input > 0 { - return Err(missing_collected_input::()); - } StorageProof::KnownQueryPlanAndValues(result) } else { - return Err(missing_pack_input::()); + return Err(missing_pack_input()); } }, }) @@ -615,7 +650,7 @@ impl StorageProof { collected: &RecordMapTrieNodes, kind: StorageProofKind, _input: &Input, - ) -> Result + ) -> Result where H::Out: Codec, { Ok(match kind { @@ -626,7 +661,7 @@ impl StorageProof { .collect(); StorageProof::Flatten(trie_nodes) }, - _ => return Err(impossible_backend_build::()), + _ => return Err(no_partial_db_support()), }) } @@ -638,7 +673,7 @@ impl StorageProof { /// The function cannot pack back proof as it does not have reference to additional information /// needed. So for this the additional information need to be merged separately and the result /// of this merge be packed with it afterward. - pub fn merge(proofs: I) -> Result + pub fn merge(proofs: I) -> Result where I: IntoIterator, H: Hasher, @@ -658,7 +693,7 @@ impl StorageProof { proof = proof.trie_skip_unpack::()?; }, &StorageProof::KnownQueryPlanAndValues(..) => { - return Err(impossible_merge_for_proof::()); + return Err(error("Proof incompatibility for merging")); }, _ => (), } @@ -715,7 +750,7 @@ impl StorageProof { /// Currently child trie are all with same backend /// implementation, therefore using /// `as_partial_flat_db` is prefered. - pub fn as_partial_db(self) -> Result>, H> + pub fn as_partial_db(self) -> Result>> where H: Hasher, { @@ -747,14 +782,14 @@ impl StorageProof { result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::KnownQueryPlanAndValues(_children) => { - return Err(impossible_backend_build::()); + return Err(no_partial_db_support()); }, } Ok(result) } /// Create in-memory storage of proof check backend. - pub fn as_partial_flat_db(self) -> Result, H> + pub fn as_partial_flat_db(self) -> Result> where H: Hasher, { @@ -798,7 +833,7 @@ impl StorageProof { } }, StorageProof::KnownQueryPlanAndValues(_children) => { - return Err(impossible_backend_build::()); + return Err(no_partial_db_support()); }, } Ok(db) @@ -833,17 +868,17 @@ impl Iterator for StorageProofNodeIterator { } impl TryInto> for StorageProof { - type Error = sp_std::boxed::Box>>; + type Error = Error; - fn try_into(self) -> Result, H> { + fn try_into(self) -> Result> { self.as_partial_flat_db() } } impl TryInto>> for StorageProof { - type Error = sp_std::boxed::Box>>; + type Error = Error; - fn try_into(self) -> Result>, H> { + fn try_into(self) -> Result>> { self.as_partial_db() } } From c2d1cf10e430a42affb10cb8e86aec7d773197fe Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 11:00:51 +0200 Subject: [PATCH 110/185] switch root registration to encoded data. --- primitives/state-machine/src/trie_backend.rs | 8 +++--- .../state-machine/src/trie_backend_essence.rs | 25 ++++++++++--------- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index fed5216195446..400cedc2ae7b9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -58,10 +58,12 @@ impl, H: Hasher> TrieBackend where H::Out: Codec if let Some(register_roots) = self.essence.register_roots.as_ref() { let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); - let read_lock = register_roots.read(); - for (child_info, root) in read_lock.iter() { + let roots = { + std::mem::replace(&mut *register_roots.write(), Default::default()) + }; + for (child_info, root) in roots.into_iter() { if let Some(root) = root { - dest.insert(child_info.proof_info(), root.encode()); + dest.insert(child_info.proof_info(), root); } } ProofInput::ChildTrieRoots(dest) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 708596cb9a277..86e271301c3a7 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -43,8 +43,7 @@ pub struct TrieBackendEssence, H: Hasher> { root: H::Out, /// If defined, we store encoded visited roots for top_trie and child trie in this /// map. It also act as a cache. - /// TODO EMCH switch to register encoded value (this assumes same hash out between child) - pub register_roots: Option>>>>, + pub register_roots: Option>>>>, } impl, H: Hasher> TrieBackendEssence where H::Out: Decode + Encode { @@ -52,7 +51,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub fn new( storage: S, root: H::Out, - register_roots: Option>>>>, + register_roots: Option>>>>, ) -> Self { TrieBackendEssence { storage, @@ -86,15 +85,14 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub(crate) fn child_root_encoded(&self, child_info: &ChildInfo) -> Result, String> { if let Some(cache) = self.register_roots.as_ref() { if let Some(result) = cache.read().get(child_info) { - return Ok(result.map(|root| root.encode())); + return Ok(result.clone()); } } let root: Option = self.storage(&child_info.prefixed_storage_key()[..])?; if let Some(cache) = self.register_roots.as_ref() { - let root = root.as_ref().and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); - cache.write().insert(child_info.clone(), root); + cache.write().insert(child_info.clone(), root.clone()); } Ok(root) @@ -103,18 +101,21 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Access the root of the child storage in its parent trie fn child_root(&self, child_info: &ChildInfo) -> Result, String> { if let Some(cache) = self.register_roots.as_ref() { - if let Some(result) = cache.read().get(child_info) { - return Ok(result.clone()); + if let Some(root) = cache.read().get(child_info) { + let root = root.as_ref() + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + return Ok(root); } } - let root: Option = self.storage(&child_info.prefixed_storage_key()[..])? - .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); - + let encoded_root = self.storage(&child_info.prefixed_storage_key()[..])?; if let Some(cache) = self.register_roots.as_ref() { - cache.write().insert(child_info.clone(), root); + cache.write().insert(child_info.clone(), encoded_root.clone()); } + let root: Option = encoded_root + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + Ok(root) } From 1a19852990d4702d27f4ad99e3844f8a24f080ba Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 11:16:09 +0200 Subject: [PATCH 111/185] only collect root when needed. --- client/src/call_executor.rs | 3 +- primitives/state-machine/src/lib.rs | 6 +-- .../state-machine/src/proving_backend.rs | 51 ++++++++++--------- primitives/state-machine/src/trie_backend.rs | 1 - 4 files changed, 31 insertions(+), 30 deletions(-) diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 1ef4a17f7aeb7..c44726e34792b 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -144,7 +144,7 @@ where let mut state = self.backend.state_at(*at)?; match recorder { - Some((recorder, _target_proof_kind)) => { + Some((recorder, target_proof_kind)) => { let trie_state = state.as_trie_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box @@ -158,6 +158,7 @@ where let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, recorder.clone(), + target_proof_kind, ); let changes = &mut *changes.borrow_mut(); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 75267d77046a0..c138e4069e3d6 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -505,7 +505,7 @@ where { let proving_backend = proving_backend::ProvingBackend::new( trie_backend, - kind.need_register_full(), + kind, ); let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, @@ -699,7 +699,7 @@ where { let proving_backend = proving_backend::ProvingBackend::<_, H>::new( trie_backend, - kind.need_register_full(), + kind, ); for key in keys.into_iter() { proving_backend @@ -724,7 +724,7 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind.need_register_full()); + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 62498d715a59c..e0db216055745 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -157,19 +157,20 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> where H::Out: Codec { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend, full: bool) -> Self { - let proof_recorder = if full { + pub fn new(backend: &'a TrieBackend, kind: StorageProofKind) -> Self { + let proof_recorder = if kind.need_register_full() { ProofRecorder::Full(Default::default()) } else { ProofRecorder::Flat(Default::default()) }; - Self::new_with_recorder(backend, proof_recorder) + Self::new_with_recorder(backend, proof_recorder, kind) } /// Create new proving backend with the given recorder. pub fn new_with_recorder( backend: &'a TrieBackend, proof_recorder: ProofRecorder, + proof_kind: StorageProofKind, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -177,10 +178,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; - // TODO EMCH registering root can be disabled in most case: - // would simply need target proof as parameter (same thing for new - // function). - ProvingBackend(TrieBackend::new_with_roots(recorder, root)) + if let ProofInputKind::ChildTrieRoots = proof_kind.processing_input_kind() { + ProvingBackend(TrieBackend::new_with_roots(recorder, root)) + } else { + ProvingBackend(TrieBackend::new(recorder, root)) + } } /// Extracting the gathered unordered proof. @@ -369,7 +371,7 @@ where let db = proof.as_partial_flat_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new_with_roots(db, root)) + Ok(TrieBackend::new(db, root)) } else { Err(Box::new(ExecutionError::InvalidProof)) } @@ -390,7 +392,7 @@ where if db.deref().get(&ChildInfoProof::top_trie()) .map(|db| db.contains(&root, EMPTY_PREFIX)) .unwrap_or(false) { - Ok(TrieBackend::new_with_roots(db, root)) + Ok(TrieBackend::new(db, root)) } else { Err(Box::new(ExecutionError::InvalidProof)) } @@ -407,33 +409,33 @@ mod tests { fn test_proving<'a>( trie_backend: &'a TrieBackend, BlakeTwo256>, - full: bool, + kind: StorageProofKind, ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { - ProvingBackend::new(trie_backend, full) + ProvingBackend::new(trie_backend, kind) } #[test] fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::Full; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::TrieSkipHashesFull; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::TrieSkipHashes; - assert!(test_proving(&trie_backend, kind.need_register_full()).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - let backend = test_proving(&trie_backend, kind.need_register_full()); + let backend = test_proving(&trie_backend, kind); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof(kind).unwrap().is_empty()); let kind = StorageProofKind::Full; - let backend = test_proving(&trie_backend, kind.need_register_full()); + let backend = test_proving(&trie_backend, kind); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof(kind).unwrap().is_empty()); } @@ -450,9 +452,9 @@ mod tests { #[test] fn passes_through_backend_calls() { - let test = |flat| { + let test = |proof_kind| { let trie_backend = test_trie(); - let proving_backend = test_proving(&trie_backend, flat); + let proving_backend = test_proving(&trie_backend, proof_kind); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); @@ -461,8 +463,8 @@ mod tests { assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); }; - test(true); - test(false); + test(StorageProofKind::Flatten); + test(StorageProofKind::Full); } #[test] @@ -479,7 +481,7 @@ mod tests { (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); let test = |kind: StorageProofKind| { - let proving = ProvingBackend::new(trie, kind.need_register_full()); + let proving = ProvingBackend::new(trie, kind); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof(kind).unwrap(); @@ -534,8 +536,7 @@ mod tests { )); let test = |kind: StorageProofKind| { - let full = kind.need_register_full(); - let proving = ProvingBackend::new(trie, full); + let proving = ProvingBackend::new(trie, kind); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof(kind).unwrap(); @@ -550,7 +551,7 @@ mod tests { assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); - let proving = ProvingBackend::new(trie, full); + let proving = ProvingBackend::new(trie, kind); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(kind).unwrap(); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 400cedc2ae7b9..09dca0deb6991 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -37,7 +37,6 @@ pub struct TrieBackend, H: Hasher> { impl, H: Hasher> TrieBackend where H::Out: Codec { /// Create new trie-based backend. - /// TODO check if still used pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root, None), From 02128d2d6a0cc01ccb9043be931fe233cb865392 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 11:19:40 +0200 Subject: [PATCH 112/185] fix --- client/src/call_executor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index c44726e34792b..a3d9b205c878c 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -158,7 +158,7 @@ where let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, recorder.clone(), - target_proof_kind, + *target_proof_kind, ); let changes = &mut *changes.borrow_mut(); From a9c1e6510774dc2fa87a42aa0563c6bf48368eeb Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 11:24:59 +0200 Subject: [PATCH 113/185] remove old todos --- primitives/state-machine/src/in_memory_backend.rs | 3 --- primitives/state-machine/src/lib.rs | 1 - 2 files changed, 4 deletions(-) diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 299ad20fc4f11..83126abbf78e0 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -324,9 +324,6 @@ impl Backend for InMemory where H::Out: Codec { .collect() } - // TODO instead of changing mutabliity of the returned value, we could wrap the trie - // backend in a new backend that register roots -> would be cleaner and still allow - // caching. fn as_trie_backend(&mut self)-> Option<&TrieBackend> { let mut mdb = MemoryDB::default(); let mut new_child_roots = Vec::new(); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c138e4069e3d6..fd45409c24027 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -577,7 +577,6 @@ where } /// Check execution proof on proving backend, generated by `prove_execution` call. -/// TODO EMCH this is exact copy of non flat version: makes trie_backend a parameter! pub fn execution_flat_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, From 4f1e0acba9ceadc2df69b0336145e22bd481898e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 15:59:56 +0200 Subject: [PATCH 114/185] Set kind at the consumer level (static in rpc) --- client/api/src/call_executor.rs | 8 +- client/api/src/lib.rs | 3 +- client/api/src/proof_provider.rs | 6 +- client/finality-grandpa/src/finality_proof.rs | 19 +++-- client/network/src/protocol.rs | 21 +++-- .../src/protocol/light_client_handler.rs | 21 ++++- client/network/src/protocol/message.rs | 5 +- client/src/call_executor.rs | 6 +- client/src/client.rs | 16 ++-- client/src/light/call_executor.rs | 85 +++++++++++++------ client/src/light/fetcher.rs | 76 +++++++++++------ primitives/state-machine/src/lib.rs | 3 +- primitives/trie/src/storage_proof.rs | 14 +++ 13 files changed, 193 insertions(+), 90 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index f3d54986c45ba..2e4a39baa6eda 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -108,14 +108,15 @@ pub trait CallExecutor { mut state: S, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] + call_data: &[u8], + kind: StorageProofKind, ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { let trie_state = state.as_trie_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box )?; - self.prove_at_trie_state(trie_state, overlay, method, call_data) + self.prove_at_trie_state(trie_state, overlay, method, call_data, kind) } /// Execute a call to a contract on top of given trie state, gathering execution proof. @@ -126,7 +127,8 @@ pub trait CallExecutor { trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] + call_data: &[u8], + kind: StorageProofKind, ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; /// Get runtime version if supported. diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index e4080323c188e..ed3041d84649f 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -34,7 +34,8 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{StorageProof, ExecutionStrategy, CloneableSpawn}; +pub use sp_state_machine::{LegacyStorageProof, StorageProof, + StorageProofKind, ExecutionStrategy, CloneableSpawn}; /// Utility methods for the client. pub mod utils { diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 93160855eaebe..edcfe5c926bde 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -18,7 +18,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT}, }; -use crate::{StorageProof, ChangesProof}; +use crate::{StorageProof, ChangesProof, StorageProofKind}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; /// Interface for providing block proving utilities. @@ -28,6 +28,7 @@ pub trait ProofProvider { &self, id: &BlockId, keys: &mut dyn Iterator, + kind: StorageProofKind, ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning @@ -37,6 +38,7 @@ pub trait ProofProvider { id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, + kind: StorageProofKind, ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash @@ -48,7 +50,9 @@ pub trait ProofProvider { id: &BlockId, method: &str, call_data: &[u8], + kind: StorageProofKind, ) -> sp_blockchain::Result<(Vec, StorageProof)>; + /// Reads given header and generates CHT-based header proof. fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index ed1eb05712f31..f6b55d041be98 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -39,7 +39,7 @@ use log::{trace, warn}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::Backend, StorageProof, + backend::Backend, StorageProof, StorageProofKind, LegacyStorageProof, light::{FetchChecker, RemoteReadRequest}, StorageProvider, ProofProvider, }; @@ -95,7 +95,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flatten) } } @@ -226,7 +226,7 @@ struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -331,7 +331,8 @@ pub(crate) fn prove_finality, J>( let new_authorities = authorities_provider.authorities(¤t_id)?; let new_authorities_proof = if current_authorities != new_authorities { current_authorities = new_authorities; - Some(authorities_provider.prove_authorities(¤t_id)?) + Some(authorities_provider.prove_authorities(¤t_id)? + .legacy().expect("Flatten proof used")) } else { None }; @@ -528,7 +529,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - new_authorities_proof, + new_authorities_proof.to_storage_proof(), )?; current_set_id = current_set_id + 1; @@ -866,14 +867,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), + authorities_proof: Some(LegacyStorageProof::new(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), + authorities_proof: Some(LegacyStorageProof::new(vec![vec![70]])), }, ]); @@ -948,7 +949,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(LegacyStorageProof::new(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -998,7 +999,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(LegacyStorageProof::new(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 84b913b284c62..000e4b028230f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -51,7 +51,7 @@ use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; use crate::chain::{Client, FinalityProofProvider}; -use sc_client_api::{ChangesProof, StorageProof}; +use sc_client_api::{ChangesProof, StorageProof, StorageProofKind}; use crate::error; use util::LruHashSet; use wasm_timer::Instant; @@ -1342,10 +1342,12 @@ impl Protocol { request.method, request.block ); + // TODO EMCH consider switching this to compact let proof = match self.context_data.chain.execution_proof( &BlockId::Hash(request.block), &request.method, &request.data, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1366,7 +1368,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof, + proof: proof.legacy().expect("Flatten was use above"), }), ); } @@ -1471,9 +1473,11 @@ impl Protocol { trace!(target: "sync", "Remote read request {} from {} ({} at {})", request.id, who, keys_str(), request.block); + // TODO EMCH consider switching this to compact let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), - &mut request.keys.iter().map(AsRef::as_ref) + &mut request.keys.iter().map(AsRef::as_ref), + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -1492,7 +1496,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.legacy().expect("Flatten was use above"), }), ); } @@ -1525,10 +1529,12 @@ impl Protocol { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; + // TODO EMCH consider switching this to compact let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { @@ -1548,7 +1554,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.legacy().expect("Flatten was use above"), }), ); } @@ -1578,7 +1584,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof, + proof: proof.legacy().expect("header_proof is a flatten proof"), }), ); } @@ -1641,7 +1647,8 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, + roots_proof: proof.roots_proof.legacy() + .expect("Change roots is flatten"), }), ); } diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f4e877d675e79..d5fef4ba79255 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client::light::fetcher; -use sc_client_api::StorageProof; +use sc_client_api::{StorageProof, StorageProofKind}; use sc_peerset::ReputationChange; use sp_core::{ storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, @@ -539,7 +539,13 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data) { + // TODO EMCH consider new version with compact + let proof = match self.chain.execution_proof( + &BlockId::Hash(block), + &request.method, + &request.data, + StorageProofKind::Flatten, + ) { Ok((_, proof)) => proof, Err(e) => { log::trace!("remote call request from {} ({} at {:?}) failed with: {}", @@ -578,7 +584,12 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.chain.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { + // TODO EMCH consider new version with compact + let proof = match self.chain.read_proof( + &BlockId::Hash(block), + &mut request.keys.iter().map(AsRef::as_ref), + StorageProofKind::Flatten, + ) { Ok(proof) => proof, Err(error) => { log::trace!("remote read request from {} ({} at {:?}) failed with: {}", @@ -622,10 +633,12 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; + // TODO EMCH consider new version with compact let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), &child_info, - &mut request.keys.iter().map(AsRef::as_ref) + &mut request.keys.iter().map(AsRef::as_ref), + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 8638e9afc59b9..561f61daca05a 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -26,7 +26,10 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; -use sc_client_api::StorageProof; +// TODO EMCH consider breaking change new api with actual new Storage proof. +// would need request or static choice. Or use adapter to put proof in legacy +// format. +use sc_client_api::LegacyStorageProof as StorageProof; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index a3d9b205c878c..637b70686c462 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -222,9 +222,9 @@ where trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] + call_data: &[u8], + kind: StorageProofKind, ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - // TODO Should we make proof kind a parameter? sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( trie_state, overlay, @@ -232,7 +232,7 @@ where self.spawn_handle.clone(), method, call_data, - StorageProofKind::TrieSkipHashes, + kind, &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, ) .map_err(Into::into) diff --git a/client/src/client.rs b/client/src/client.rs index 76394072d608a..91512d7ddf67f 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -1092,11 +1092,10 @@ impl ProofProvider for Client where &self, id: &BlockId, keys: &mut dyn Iterator, + kind: StorageProofKind, ) -> sp_blockchain::Result { - // TODO keep flatten proof here?? or move choice to caller? - // TODO EMCH this should be parametereized fo client self.state_at(id) - .and_then(|state| prove_read(state, keys, StorageProofKind::Flatten) + .and_then(|state| prove_read(state, keys, kind) .map_err(Into::into)) } @@ -1105,10 +1104,10 @@ impl ProofProvider for Client where id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, + kind: StorageProofKind, ) -> sp_blockchain::Result { - // TODO EMCH this should be parametereized fo client self.state_at(id) - .and_then(|state| prove_child_read(state, child_info, keys, StorageProofKind::Flatten) + .and_then(|state| prove_child_read(state, child_info, keys, kind) .map_err(Into::into)) } @@ -1116,7 +1115,8 @@ impl ProofProvider for Client where &self, id: &BlockId, method: &str, - call_data: &[u8] + call_data: &[u8], + kind: StorageProofKind, ) -> sp_blockchain::Result<(Vec, StorageProof)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. @@ -1125,6 +1125,7 @@ impl ProofProvider for Client where let code_proof = self.read_proof( id, &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES].iter().map(|v| *v), + kind, )?; let state = self.state_at(id)?; @@ -1135,8 +1136,9 @@ impl ProofProvider for Client where &self.executor, method, call_data, + kind, ).and_then(|(r, p)| { - // TODO EMCH using flatten?? + // TODO EMCH kind mappnig for mergeable proof type Ok((r, StorageProof::merge::, _>(vec![p, code_proof]) .map_err(|e| format!("{}", e))?)) }) diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 884153598b85d..24b9e8b678d38 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -159,6 +159,7 @@ impl CallExecutor for _changes: &mut OverlayedChanges, _method: &str, _call_data: &[u8], + _kind: StorageProofKind, ) -> ClientResult<(Vec, StorageProof)> { Err(ClientError::NotAvailableOnLightClient) } @@ -178,6 +179,7 @@ pub fn prove_execution( executor: &E, method: &str, call_data: &[u8], + kind: StorageProofKind, ) -> ClientResult<(Vec, StorageProof)> where Block: BlockT, @@ -197,6 +199,7 @@ pub fn prove_execution( &mut changes, "Core_initialize_block", &header.encode(), + kind, )?; // execute method + record execution proof @@ -205,7 +208,13 @@ pub fn prove_execution( &mut changes, method, call_data, + kind, )?; + // TODO EMCH this is actually not allowing compaction (would need to pack both input): merge + // current approach is probably a bit naive. -> could do if prove_at_trie_state would + // also return proof input (not packing would be good to) -> maybe a test variant with + // input in proof -> that way merge is possible. + // => make a mapping on kind to best possible merge strategy. let total_proof = StorageProof::merge::, _>(vec![init_proof, exec_proof]) .map_err(|e| format!("{}", e))?; @@ -364,7 +373,8 @@ mod tests { _trie_state: &sp_state_machine::TrieBackend>, _overlay: &mut OverlayedChanges, _method: &str, - _call_data: &[u8] + _call_data: &[u8], + _kind: StorageProofKind, ) -> Result<(Vec, StorageProof), ClientError> { unreachable!() } @@ -380,7 +390,11 @@ mod tests { #[test] fn execution_proof_is_generated_and_checked() { - fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { + fn execute( + remote_client: &TestClient, + at: u64, method: &'static str, + kind: StorageProofKind, + ) -> (Vec, Vec) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -388,7 +402,8 @@ mod tests { let (remote_result, remote_execution_proof) = remote_client.execution_proof( &remote_block_id, method, - &[] + &[], + kind, ).unwrap(); // check remote execution proof locally @@ -408,7 +423,12 @@ mod tests { (remote_result, local_result) } - fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { + fn execute_with_proof_failure( + remote_client: &TestClient, + at: u64, + method: &'static str, + kind: StorageProofKind, + ) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -416,7 +436,8 @@ mod tests { let (_, remote_execution_proof) = remote_client.execution_proof( &remote_block_id, method, - &[] + &[], + kind, ).unwrap(); // check remote execution proof locally @@ -457,28 +478,38 @@ mod tests { ).unwrap(); } - // check method that doesn't requires environment - let (remote, local) = execute(&remote_client, 0, "Core_version"); - assert_eq!(remote, local); - - let (remote, local) = execute(&remote_client, 2, "Core_version"); - assert_eq!(remote, local); - - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); - - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); - - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2, "Core_version"); - - // check that proof check doesn't panic even if proof is incorrect AND panic handler is set - sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2, "Core_version"); + let kinds = [ + StorageProofKind::Flatten, + /* TODO EMCH currently remote is static to legacy so flatten only + StorageProofKind::TrieSkipHashes, + StorageProofKind::KnownQueryPlanAndValues, + */ + ]; + + for kind in &kinds { + // check method that doesn't requires environment + let (remote, local) = execute(&remote_client, 0, "Core_version", *kind); + assert_eq!(remote, local); + + let (remote, local) = execute(&remote_client, 2, "Core_version", *kind); + assert_eq!(remote, local); + + // check method that requires environment + let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block", *kind); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 1); + + let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block", *kind); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 3); + + // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set + execute_with_proof_failure(&remote_client, 2, "Core_version", *kind); + + // check that proof check doesn't panic even if proof is incorrect AND panic handler is set + sp_panic_handler::set("TEST", "1.2.3"); + execute_with_proof_failure(&remote_client, 2, "Core_version", *kind); + } } #[test] diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 614292ece6cbd..8850b95e52f7d 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -361,11 +361,20 @@ pub mod tests { use sp_core::{blake2_256, ChangesTrieConfiguration, H256}; use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_runtime::{generic::BlockId, traits::BlakeTwo256}; - use sp_state_machine::Backend; + use sp_state_machine::{Backend, StorageProofKind}; use super::*; use sc_client_api::{StorageProvider, ProofProvider}; use sc_block_builder::BlockBuilderProvider; + // TODO see what can be use in this context + const KINDS: [StorageProofKind; 5] = [ + StorageProofKind::Flatten, + StorageProofKind::Full, + StorageProofKind::TrieSkipHashes, + StorageProofKind::TrieSkipHashesFull, + StorageProofKind::KnownQueryPlanAndValues, + ]; + type TestChecker = LightDataChecker< NativeExecutor, BlakeTwo256, @@ -377,7 +386,8 @@ pub mod tests { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } - fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { + fn prepare_for_read_proof_check(kind: StorageProofKind) + -> (TestChecker, Header, StorageProof, u32) { // prepare remote client let remote_client = substrate_test_runtime_client::new(); let remote_block_id = BlockId::Number(0); @@ -393,6 +403,7 @@ pub mod tests { let remote_read_proof = remote_client.read_proof( &remote_block_id, &mut std::iter::once(well_known_keys::HEAP_PAGES), + kind, ).unwrap(); // check remote read proof locally @@ -412,7 +423,8 @@ pub mod tests { (local_checker, remote_block_header, remote_read_proof, heap_pages) } - fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { + fn prepare_for_read_child_proof_check(kind: StorageProofKind) + -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; let child_info = ChildInfo::new_default(b"child1"); @@ -441,6 +453,7 @@ pub mod tests { &remote_block_id, child_info, &mut std::iter::once("key1".as_bytes()), + kind, ).unwrap(); // check locally @@ -502,34 +515,45 @@ pub mod tests { #[test] fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + for kind in &KINDS { + let ( + local_checker, + remote_block_header, + remote_read_proof, + heap_pages, + ) = prepare_for_read_proof_check(*kind); + assert_eq!((&local_checker as &dyn FetchChecker) + .check_read_proof(&RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, remote_read_proof).unwrap() + .remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + } } #[test] fn storage_child_read_proof_is_generated_and_checked() { let child_info = ChildInfo::new_default(&b"child1"[..]); - let ( - local_checker, - remote_block_header, - remote_read_proof, - result, - ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: child_info.prefixed_storage_key(), - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); + for kind in &KINDS { + let ( + local_checker, + remote_block_header, + remote_read_proof, + result, + ) = prepare_for_read_child_proof_check(*kind); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: child_info.prefixed_storage_key(), + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); + } } #[test] diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index fd45409c24027..c2a1a19e2a44a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -43,7 +43,8 @@ mod trie_backend_essence; mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind}; + StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, + LegacyStorageProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 905ef26f0dbd7..f4e054ddfbe7f 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -344,6 +344,11 @@ impl LegacyStorageProof { pub fn new(trie_nodes: Vec>) -> Self { LegacyStorageProof { trie_nodes } } + + /// Convert to a `StorageProof`. + pub fn to_storage_proof(self) -> StorageProof { + StorageProof::Flatten(self.trie_nodes) + } } impl Decode for StorageProof { @@ -838,6 +843,15 @@ impl StorageProof { } Ok(db) } + + /// Cast a flatten proof to a legacy one. + pub fn legacy(self) -> Result { + if let StorageProof::Flatten(trie_nodes) = self { + Ok(LegacyStorageProof{ trie_nodes }) + } else { + Err(error("Cannot use as legacy proof")) + } + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to From 869c24fc0d431b75441e804fca92b0aed930654b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 16:38:10 +0200 Subject: [PATCH 115/185] restore correct legacy encoding of light client --- .../src/protocol/light_client_handler.rs | 39 +++++++++++-------- client/src/light/fetcher.rs | 4 +- primitives/trie/src/storage_proof.rs | 10 +++-- 3 files changed, 31 insertions(+), 22 deletions(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index d5fef4ba79255..6e67956a14f85 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client::light::fetcher; -use sc_client_api::{StorageProof, StorageProofKind}; +use sc_client_api::{LegacyStorageProof, StorageProof, StorageProofKind}; use sc_peerset::ReputationChange; use sp_core::{ storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, @@ -438,7 +438,8 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = Decode::decode(&mut response.proof.as_ref())?; + let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? + .to_storage_proof(); let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -447,12 +448,14 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; + let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? + .to_storage_proof(); let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; + let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? + .to_storage_proof(); let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -461,7 +464,8 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; + let roots_proof = LegacyStorageProof::decode(&mut response.roots_proof.as_ref())? + .to_storage_proof(); let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -489,7 +493,8 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = Decode::decode(&mut response.proof.as_ref())?; + let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? + .to_storage_proof(); let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -546,7 +551,7 @@ where &request.data, StorageProofKind::Flatten, ) { - Ok((_, proof)) => proof, + Ok((_, proof)) => proof.legacy().expect("Call in flatten mode"), Err(e) => { log::trace!("remote call request from {} ({} at {:?}) failed with: {}", peer, @@ -554,7 +559,7 @@ where request.block, e, ); - StorageProof::empty() + LegacyStorageProof::empty() } }; @@ -590,14 +595,14 @@ where &mut request.keys.iter().map(AsRef::as_ref), StorageProofKind::Flatten, ) { - Ok(proof) => proof, + Ok(proof) => proof.legacy().expect("Call in flatten mode"), Err(error) => { log::trace!("remote read request from {} ({} at {:?}) failed with: {}", peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, error); - StorageProof::empty() + LegacyStorageProof::empty() } }; @@ -640,7 +645,7 @@ where &mut request.keys.iter().map(AsRef::as_ref), StorageProofKind::Flatten, )) { - Ok(proof) => proof, + Ok(proof) => proof.legacy().expect("Flatten proof used"), Err(error) => { log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, @@ -648,7 +653,7 @@ where fmt_keys(request.keys.first(), request.keys.last()), request.block, error); - StorageProof::empty() + LegacyStorageProof::empty() } }; @@ -670,13 +675,13 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof), + Ok((header, proof)) => (header.encode(), proof.legacy().expect("header is flatten proof")), Err(error) => { log::trace!("remote header proof request from {} ({:?}) failed with: {}", peer, request.block, error); - (Default::default(), StorageProof::empty()) + (Default::default(), LegacyStorageProof::empty()) } }; @@ -741,7 +746,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| api::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: proof.roots_proof.encode(), + roots_proof: proof.roots_proof.legacy().expect("change proof is flatten").encode(), }; api::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1326,7 +1331,7 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::StorageProof; + use sc_client_api::{StorageProof, LegacyStorageProof}; use sc_client::light::fetcher; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; @@ -1347,7 +1352,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - StorageProof::empty().encode() + LegacyStorageProof::empty().encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 8850b95e52f7d..91d9aec0788ce 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -367,12 +367,12 @@ pub mod tests { use sc_block_builder::BlockBuilderProvider; // TODO see what can be use in this context - const KINDS: [StorageProofKind; 5] = [ + const KINDS: [StorageProofKind; 4] = [ StorageProofKind::Flatten, StorageProofKind::Full, StorageProofKind::TrieSkipHashes, StorageProofKind::TrieSkipHashesFull, - StorageProofKind::KnownQueryPlanAndValues, + //StorageProofKind::KnownQueryPlanAndValues, // this is currently unsupported ]; type TestChecker = LightDataChecker< diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f4e054ddfbe7f..af19e9702995b 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -340,6 +340,11 @@ pub struct LegacyStorageProof { } impl LegacyStorageProof { + /// Create a empty proof. + pub fn empty() -> Self { + LegacyStorageProof { trie_nodes: Default::default() } + } + /// Create a proof from encoded trie nodes. pub fn new(trie_nodes: Vec>) -> Self { LegacyStorageProof { trie_nodes } @@ -452,9 +457,8 @@ impl StorageProof { /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). pub fn empty() -> Self { - // we default to full as it can be reduce to flatten when reducing - // flatten to full is not possible without making asumption over the content. - Self::empty_for(StorageProofKind::Full) + // we default to flatten for compatibility + Self::empty_for(StorageProofKind::Flatten) } /// Returns a new empty proof of a given kind. From d3643813436000fe494bf99284ceb0b87ad19ed1 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 19:00:33 +0200 Subject: [PATCH 116/185] Merge implementation --- client/src/client.rs | 10 +- client/src/light/call_executor.rs | 16 +-- primitives/trie/src/storage_proof.rs | 204 ++++++++++++++++++++++++--- 3 files changed, 198 insertions(+), 32 deletions(-) diff --git a/client/src/client.rs b/client/src/client.rs index 91512d7ddf67f..1a3cdef713de0 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -474,7 +474,7 @@ impl Client where Ok(()) }, ())?; - Ok(StorageProof::merge::, _>(proofs) + Ok(StorageProof::merge::, _>(proofs, false) .map_err(|e| format!("{}", e))?) } @@ -1118,6 +1118,7 @@ impl ProofProvider for Client where call_data: &[u8], kind: StorageProofKind, ) -> sp_blockchain::Result<(Vec, StorageProof)> { + let (merge_kind, prefer_full) = kind.mergeable_kind(); // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1125,7 +1126,7 @@ impl ProofProvider for Client where let code_proof = self.read_proof( id, &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES].iter().map(|v| *v), - kind, + merge_kind, )?; let state = self.state_at(id)?; @@ -1136,10 +1137,9 @@ impl ProofProvider for Client where &self.executor, method, call_data, - kind, + merge_kind, ).and_then(|(r, p)| { - // TODO EMCH kind mappnig for mergeable proof type - Ok((r, StorageProof::merge::, _>(vec![p, code_proof]) + Ok((r, StorageProof::merge::, _>(vec![p, code_proof], prefer_full) .map_err(|e| format!("{}", e))?)) }) } diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 24b9e8b678d38..7c0dd114b5ba4 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -192,6 +192,7 @@ pub fn prove_execution( Box )?; + let (merge_kind, prefer_full) = kind.mergeable_kind(); // prepare execution environment + record preparation proof let mut changes = Default::default(); let (_, init_proof) = executor.prove_at_trie_state( @@ -199,7 +200,7 @@ pub fn prove_execution( &mut changes, "Core_initialize_block", &header.encode(), - kind, + merge_kind, )?; // execute method + record execution proof @@ -208,15 +209,12 @@ pub fn prove_execution( &mut changes, method, call_data, - kind, + merge_kind, )?; - // TODO EMCH this is actually not allowing compaction (would need to pack both input): merge - // current approach is probably a bit naive. -> could do if prove_at_trie_state would - // also return proof input (not packing would be good to) -> maybe a test variant with - // input in proof -> that way merge is possible. - // => make a mapping on kind to best possible merge strategy. - let total_proof = StorageProof::merge::, _>(vec![init_proof, exec_proof]) - .map_err(|e| format!("{}", e))?; + let total_proof = StorageProof::merge::, _>( + vec![init_proof, exec_proof], + prefer_full, + ).map_err(|e| format!("{}", e))?; Ok((result, total_proof)) } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index af19e9702995b..42c6dca5334fb 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,7 +18,7 @@ use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; use sp_std::convert::TryInto; -use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput}; +use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; use sp_storage::{ChildInfoProof, ChildType, ChildrenMap}; @@ -54,14 +54,6 @@ pub enum Error { Proof, } -#[cfg(feature = "std")] -impl sp_std::convert::From> for Error { - fn from(e: sp_std::boxed::Box) -> Self { - // currently only trie error is build from box - Error::Trie(format!("{}", e)) - } -} - #[cfg(feature = "std")] impl sp_std::fmt::Display for Error { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { @@ -72,6 +64,14 @@ impl sp_std::fmt::Display for Error { } } +#[cfg(feature = "std")] +impl sp_std::convert::From> for Error { + fn from(e: sp_std::boxed::Box) -> Self { + // currently only trie error is build from box + Error::Trie(format!("{}", e)) + } +} + #[cfg(not(feature = "std"))] impl sp_std::convert::From> for Error { fn from(_e: sp_std::boxed::Box) -> Self { @@ -79,6 +79,12 @@ impl sp_std::convert::From> for Error { } } +impl sp_std::convert::From for Error { + fn from(e: CodecError) -> Self { + error(e.what()) + } +} + #[cfg(feature = "std")] const fn error(message: &'static str) -> Error { Error::Proof(message) @@ -116,6 +122,11 @@ pub enum StorageProofKind { /// Kind for `StorageProof::KnownQueryPlanAndValues`. KnownQueryPlanAndValues, + /// Technical only + + /// Kind for `StorageProof::TrieSkipHashesForMerge`. + TrieSkipHashesForMerge = 125, + /// Testing only indices /// Kind for `StorageProof::Full`. @@ -137,6 +148,8 @@ impl StorageProofKind { => StorageProofKind::KnownQueryPlanAndValues, x if x == StorageProofKind::Full as u8 => StorageProofKind::Full, x if x == StorageProofKind::TrieSkipHashesFull as u8 => StorageProofKind::TrieSkipHashesFull, + x if x == StorageProofKind::TrieSkipHashesForMerge as u8 + => StorageProofKind::TrieSkipHashesForMerge, x if x == StorageProofKind::TrieSkipHashesFull as u8 => StorageProofKind::TrieSkipHashesFull, _ => return None, @@ -197,7 +210,8 @@ impl StorageProofKind { pub fn processing_input_kind(&self) -> InputKind { match self { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlan, - StorageProofKind::TrieSkipHashes + StorageProofKind::TrieSkipHashesForMerge + | StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => InputKind::ChildTrieRoots, StorageProofKind::Full | StorageProofKind::Flatten => InputKind::None, @@ -210,6 +224,7 @@ impl StorageProofKind { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull + | StorageProofKind::TrieSkipHashesForMerge | StorageProofKind::Full | StorageProofKind::Flatten => InputKind::None, } @@ -222,6 +237,7 @@ impl StorageProofKind { StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => true, StorageProofKind::Full + | StorageProofKind::TrieSkipHashesForMerge | StorageProofKind::Flatten => false, } } @@ -234,6 +250,7 @@ impl StorageProofKind { StorageProofKind::Full | StorageProofKind::KnownQueryPlanAndValues | StorageProofKind::TrieSkipHashes + | StorageProofKind::TrieSkipHashesForMerge | StorageProofKind::TrieSkipHashesFull => true, } } @@ -245,6 +262,7 @@ impl StorageProofKind { StorageProofKind::Flatten | StorageProofKind::TrieSkipHashes => Some(false), StorageProofKind::Full + | StorageProofKind::TrieSkipHashesForMerge | StorageProofKind::TrieSkipHashesFull => Some(true), StorageProofKind::KnownQueryPlanAndValues => None, } @@ -276,6 +294,16 @@ impl StorageProofKind { pub fn can_use_as_flat_partial_db(&self) -> bool { self.can_use_as_partial_db() } + + /// Return the best kind to use for merging later, and + /// wether the merge should produce full proof. + pub fn mergeable_kind(&self) -> (Self, bool) { + match self { + StorageProofKind::TrieSkipHashes => (StorageProofKind::TrieSkipHashesForMerge, false), + StorageProofKind::TrieSkipHashesFull => (StorageProofKind::TrieSkipHashesForMerge, true), + s => (*s, s.use_full_partial_db().unwrap_or(false)) + } + } } /// A collection on encoded trie nodes. @@ -316,6 +344,13 @@ pub enum StorageProof { /// This needs to be check for every children proofs. KnownQueryPlanAndValues(ChildrenProofMap), + // Techincal variant + + /// This is an intermediate representation that keep trace of + /// input, in order merge into a `TrieSkipHashes` or a `TrieSkipHashesFull` + /// proof + TrieSkipHashesForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>), + // Following variants are only for testing, they still can be use but // decoding is not implemented. @@ -366,6 +401,8 @@ impl Decode for StorageProof { StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(Decode::decode(value)?), StorageProofKind::Full => StorageProof::Full(Decode::decode(value)?), + StorageProofKind::TrieSkipHashesForMerge + => return Err(codec::Error::from("Invalid storage kind")), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(Decode::decode(value)?), }) @@ -381,6 +418,7 @@ impl Encode for StorageProof { StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), StorageProof::Full(p) => p.encode_to(dest), StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), + StorageProof::TrieSkipHashesForMerge(..) => (), } } } @@ -404,9 +442,9 @@ impl<'a> Encode for LegacyEncodeAdapter<'a> { pub struct LegacyDecodeAdapter(pub StorageProof); /// Allow read ahead on input. -pub struct InputRevertReadAhead<'a, I>(pub &'a mut &'a [u8], pub &'a mut I); +pub struct InputRevertPeek<'a, I>(pub &'a mut &'a [u8], pub &'a mut I); -impl<'a, I: CodecInput> CodecInput for InputRevertReadAhead<'a, I> { +impl<'a, I: CodecInput> CodecInput for InputRevertPeek<'a, I> { fn remaining_len(&mut self) -> CodecResult> { Ok(self.1.remaining_len()?.map(|l| l + self.0.len())) } @@ -445,7 +483,7 @@ impl Decode for LegacyDecodeAdapter { LegacyDecodeAdapter(Decode::decode(value)?) } else { let mut legacy = &[legacy][..]; - let mut input = InputRevertReadAhead(&mut legacy, value); + let mut input = InputRevertPeek(&mut legacy, value); LegacyDecodeAdapter(StorageProof::Flatten(Decode::decode(&mut input)?)) }) } @@ -467,6 +505,9 @@ impl StorageProof { StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(ChildrenProofMap::default()), + StorageProofKind::TrieSkipHashesForMerge => StorageProof::TrieSkipHashesForMerge( + ChildrenProofMap::default(), + ), StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Default::default()), } @@ -480,6 +521,7 @@ impl StorageProof { StorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), StorageProof::TrieSkipHashes(data) => data.is_empty(), StorageProof::TrieSkipHashesFull(data) => data.is_empty(), + StorageProof::TrieSkipHashesForMerge(data) => data.is_empty(), } } @@ -597,6 +639,24 @@ impl StorageProof { } StorageProof::Full(result) }, + StorageProofKind::TrieSkipHashesForMerge => { + if let Input::ChildTrieRoots(roots) = input { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in collected.iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input())?; + let trie_nodes: HashMap<_, _> = set + .iter() + .filter_map(|(k, v)| v.as_ref().map(|v| (k.encode(), v.to_vec()))) + .collect(); + result.insert(child_info.proof_info(), (ProofMapTrieNodes(trie_nodes), root)); + } + StorageProof::TrieSkipHashesForMerge(result) + } else { + return Err(missing_pack_input()); + } + }, StorageProofKind::TrieSkipHashesFull => { if let Input::ChildTrieRoots(roots) = input { let mut result = ChildrenProofMap::default(); @@ -682,15 +742,16 @@ impl StorageProof { /// The function cannot pack back proof as it does not have reference to additional information /// needed. So for this the additional information need to be merged separately and the result /// of this merge be packed with it afterward. - pub fn merge(proofs: I) -> Result + pub fn merge(proofs: I, prefer_full: bool) -> Result where I: IntoIterator, H: Hasher, H::Out: Codec, { - let mut do_flatten = false; + let mut do_flatten = !prefer_full; let mut child_sets = ChildrenProofMap::>>::default(); let mut unique_set = BTreeSet::>::default(); + let mut packable_child_sets: Option)>> = None; // lookup for best encoding for mut proof in proofs { // unpack @@ -708,11 +769,31 @@ impl StorageProof { } let proof = proof; match proof { + StorageProof::TrieSkipHashesForMerge(proof) => { + if !child_sets.is_empty() || !unique_set.is_empty() { + return Err(error("Proof incompatibility for merging")); + } + if let Some(p) = packable_child_sets.as_mut() { + for (child_info, (mut proof, root)) in proof.into_iter() { + p.entry(child_info) + .and_modify(|entry| { + debug_assert!(&root == &entry.1); + entry.0.extend(proof.drain()); + }) + .or_insert((proof, root)); + } + } else { + packable_child_sets = Some(proof); + } + }, StorageProof::TrieSkipHashesFull(..) | StorageProof::TrieSkipHashes(..) | StorageProof::KnownQueryPlanAndValues(..) => unreachable!("Unpacked or early return earlier"), StorageProof::Flatten(proof) => { + if packable_child_sets.is_some() { + return Err(error("Proof incompatibility for merging")); + } if !do_flatten { do_flatten = true; for (_, set) in sp_std::mem::replace(&mut child_sets, Default::default()).into_iter() { @@ -722,6 +803,9 @@ impl StorageProof { unique_set.extend(proof); }, StorageProof::Full(children) => { + if packable_child_sets.is_some() { + return Err(error("Proof incompatibility for merging")); + } for (child_info, child) in children.into_iter() { if do_flatten { unique_set.extend(child); @@ -733,6 +817,25 @@ impl StorageProof { }, } } + if let Some(children) = packable_child_sets { + if prefer_full { + let mut result = ChildrenProofMap::default(); + for (child_info, (set, root)) in children.into_iter() { + let root = Decode::decode(&mut &root[..])?; + let trie_nodes = crate::pack_proof_from_collected::>(&root, &set)?; + result.insert(child_info, trie_nodes); + } + return Ok(StorageProof::TrieSkipHashesFull(result)) + } else { + let mut result = Vec::default(); + for (_child_info, (set, root)) in children.iter() { + let root = Decode::decode(&mut &root[..])?; + let trie_nodes = crate::pack_proof_from_collected::>(&root, &*set)?; + result.push(trie_nodes); + } + return Ok(StorageProof::TrieSkipHashes(result)) + } + } Ok(if do_flatten { StorageProof::Flatten(unique_set.into_iter().collect()) } else { @@ -752,6 +855,7 @@ impl StorageProof { StorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, StorageProof::Full(_) => StorageProofKind::Full, StorageProof::TrieSkipHashesFull(_) => StorageProofKind::TrieSkipHashesFull, + StorageProof::TrieSkipHashesForMerge(_) => StorageProofKind::TrieSkipHashesForMerge, } } @@ -762,6 +866,7 @@ impl StorageProof { pub fn as_partial_db(self) -> Result>> where H: Hasher, + H::Out: Decode, { let mut result = ChildrenProofMap::default(); match self { @@ -778,6 +883,16 @@ impl StorageProof { result.insert(child_info, db); } }, + StorageProof::TrieSkipHashesForMerge(children) => { + for (child_info, (proof, _root)) in children.into_iter() { + let mut db = MemoryDB::default(); + for (key, value) in proof.0.into_iter() { + let key = Decode::decode(&mut &key[..])?; + db.emplace(key, EMPTY_PREFIX, value); + } + result.insert(child_info, db); + } + }, StorageProof::TrieSkipHashesFull(children) => { for (child_info, proof) in children.into_iter() { // Note that this does check all hashes so using a trie backend @@ -801,6 +916,7 @@ impl StorageProof { pub fn as_partial_flat_db(self) -> Result> where H: Hasher, + H::Out: Decode, { let mut db = MemoryDB::default(); let mut db_empty = true; @@ -817,6 +933,14 @@ impl StorageProof { } } }, + StorageProof::TrieSkipHashesForMerge(children) => { + for (_child_info, (proof, _root)) in children.into_iter() { + for (key, value) in proof.0.into_iter() { + let key = Decode::decode(&mut &key[..])?; + db.emplace(key, EMPTY_PREFIX, value); + } + } + }, StorageProof::TrieSkipHashesFull(children) => { for (_child_info, proof) in children.into_iter() { // Note that this does check all hashes so using a trie backend @@ -885,7 +1009,10 @@ impl Iterator for StorageProofNodeIterator { } } -impl TryInto> for StorageProof { +impl TryInto> for StorageProof + where + H::Out: Decode, +{ type Error = Error; fn try_into(self) -> Result> { @@ -893,7 +1020,10 @@ impl TryInto> for StorageProof { } } -impl TryInto>> for StorageProof { +impl TryInto>> for StorageProof + where + H::Out: Decode, +{ type Error = Error; fn try_into(self) -> Result>> { @@ -940,13 +1070,16 @@ impl IntoIterator for ChildrenProofMap { #[derive(Clone)] pub struct RecordMapTrieNodes(HashMap>); +/// Container recording trie nodes and their encoded hash. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ProofMapTrieNodes(pub HashMap, DBValue>); + impl sp_std::default::Default for RecordMapTrieNodes { fn default() -> Self { RecordMapTrieNodes(Default::default()) } } - impl sp_std::ops::Deref for RecordMapTrieNodes { type Target = HashMap>; @@ -971,6 +1104,41 @@ impl HashDBRef for RecordMapTrieNodes { } } +impl sp_std::default::Default for ProofMapTrieNodes { + fn default() -> Self { + ProofMapTrieNodes(Default::default()) + } +} + +impl sp_std::ops::Deref for ProofMapTrieNodes { + type Target = HashMap, DBValue>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ProofMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for ProofMapTrieNodes + where + H::Out: Encode, +{ + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + let key = key.encode(); + self.0.get(&key).cloned() + } + + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + let key = key.encode(); + self.0.contains_key(&key) + } +} + #[test] fn legacy_proof_codec() { // random content for proof, we test serialization From 55a12c86d25a28dbdb44ca0a44ac91b4d4dc9c75 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 19:30:05 +0200 Subject: [PATCH 117/185] Switch to new proof format and use compact on rpc and light (except head and change and cht). --- client/api/src/lib.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 17 ++++---- client/network/src/protocol.rs | 20 ++++------ .../src/protocol/light_client_handler.rs | 39 ++++++++----------- client/network/src/protocol/message.rs | 5 +-- client/src/light/call_executor.rs | 4 +- primitives/state-machine/src/lib.rs | 3 +- 7 files changed, 37 insertions(+), 53 deletions(-) diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index ed3041d84649f..c633c360ab353 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -34,7 +34,7 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{LegacyStorageProof, StorageProof, +pub use sp_state_machine::{StorageProof, StorageProofKind, ExecutionStrategy, CloneableSpawn}; /// Utility methods for the client. diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index f6b55d041be98..f200846977192 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -39,7 +39,7 @@ use log::{trace, warn}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::Backend, StorageProof, StorageProofKind, LegacyStorageProof, + backend::Backend, StorageProof, StorageProofKind, light::{FetchChecker, RemoteReadRequest}, StorageProvider, ProofProvider, }; @@ -226,7 +226,7 @@ struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -331,8 +331,7 @@ pub(crate) fn prove_finality, J>( let new_authorities = authorities_provider.authorities(¤t_id)?; let new_authorities_proof = if current_authorities != new_authorities { current_authorities = new_authorities; - Some(authorities_provider.prove_authorities(¤t_id)? - .legacy().expect("Flatten proof used")) + Some(authorities_provider.prove_authorities(¤t_id)?) } else { None }; @@ -529,7 +528,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - new_authorities_proof.to_storage_proof(), + new_authorities_proof, )?; current_set_id = current_set_id + 1; @@ -867,14 +866,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(LegacyStorageProof::new(vec![vec![50]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(LegacyStorageProof::new(vec![vec![70]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), }, ]); @@ -949,7 +948,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(LegacyStorageProof::new(vec![vec![42]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -999,7 +998,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(LegacyStorageProof::new(vec![vec![42]])), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 000e4b028230f..bc0aa08686707 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1342,12 +1342,11 @@ impl Protocol { request.method, request.block ); - // TODO EMCH consider switching this to compact let proof = match self.context_data.chain.execution_proof( &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1368,7 +1367,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof: proof.legacy().expect("Flatten was use above"), + proof, }), ); } @@ -1473,11 +1472,10 @@ impl Protocol { trace!(target: "sync", "Remote read request {} from {} ({} at {})", request.id, who, keys_str(), request.block); - // TODO EMCH consider switching this to compact let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok(proof) => proof, Err(error) => { @@ -1496,7 +1494,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.legacy().expect("Flatten was use above"), + proof, }), ); } @@ -1529,12 +1527,11 @@ impl Protocol { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; - // TODO EMCH consider switching this to compact let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, Err(error) => { @@ -1554,7 +1551,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.legacy().expect("Flatten was use above"), + proof, }), ); } @@ -1584,7 +1581,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof: proof.legacy().expect("header_proof is a flatten proof"), + proof, }), ); } @@ -1647,8 +1644,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof.legacy() - .expect("Change roots is flatten"), + roots_proof: proof.roots_proof, }), ); } diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 6e67956a14f85..b478377421cbc 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client::light::fetcher; -use sc_client_api::{LegacyStorageProof, StorageProof, StorageProofKind}; +use sc_client_api::{StorageProof, StorageProofKind}; use sc_peerset::ReputationChange; use sp_core::{ storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, @@ -438,8 +438,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? - .to_storage_proof(); + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -448,14 +447,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? - .to_storage_proof(); + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? - .to_storage_proof(); + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -464,8 +461,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = LegacyStorageProof::decode(&mut response.roots_proof.as_ref())? - .to_storage_proof(); + let roots_proof = StorageProof::decode(&mut response.roots_proof.as_ref())?; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -493,8 +489,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = LegacyStorageProof::decode(&mut response.proof.as_ref())? - .to_storage_proof(); + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -551,7 +546,7 @@ where &request.data, StorageProofKind::Flatten, ) { - Ok((_, proof)) => proof.legacy().expect("Call in flatten mode"), + Ok((_, proof)) => proof, Err(e) => { log::trace!("remote call request from {} ({} at {:?}) failed with: {}", peer, @@ -559,7 +554,7 @@ where request.block, e, ); - LegacyStorageProof::empty() + StorageProof::empty() } }; @@ -595,14 +590,14 @@ where &mut request.keys.iter().map(AsRef::as_ref), StorageProofKind::Flatten, ) { - Ok(proof) => proof.legacy().expect("Call in flatten mode"), + Ok(proof) => proof, Err(error) => { log::trace!("remote read request from {} ({} at {:?}) failed with: {}", peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, error); - LegacyStorageProof::empty() + StorageProof::empty() } }; @@ -645,7 +640,7 @@ where &mut request.keys.iter().map(AsRef::as_ref), StorageProofKind::Flatten, )) { - Ok(proof) => proof.legacy().expect("Flatten proof used"), + Ok(proof) => proof, Err(error) => { log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, @@ -653,7 +648,7 @@ where fmt_keys(request.keys.first(), request.keys.last()), request.block, error); - LegacyStorageProof::empty() + StorageProof::empty() } }; @@ -675,13 +670,13 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof.legacy().expect("header is flatten proof")), + Ok((header, proof)) => (header.encode(), proof), Err(error) => { log::trace!("remote header proof request from {} ({:?}) failed with: {}", peer, request.block, error); - (Default::default(), LegacyStorageProof::empty()) + (Default::default(), StorageProof::empty()) } }; @@ -746,7 +741,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| api::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: proof.roots_proof.legacy().expect("change proof is flatten").encode(), + roots_proof: proof.roots_proof.encode(), }; api::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1331,7 +1326,7 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::{StorageProof, LegacyStorageProof}; + use sc_client_api::StorageProof; use sc_client::light::fetcher; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; @@ -1352,7 +1347,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - LegacyStorageProof::empty().encode() + StorageProof::empty().encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 561f61daca05a..8638e9afc59b9 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -26,10 +26,7 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; -// TODO EMCH consider breaking change new api with actual new Storage proof. -// would need request or static choice. Or use adapter to put proof in legacy -// format. -use sc_client_api::LegacyStorageProof as StorageProof; +use sc_client_api::StorageProof; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 7c0dd114b5ba4..3dbb429bfd3f7 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -478,10 +478,8 @@ mod tests { let kinds = [ StorageProofKind::Flatten, - /* TODO EMCH currently remote is static to legacy so flatten only StorageProofKind::TrieSkipHashes, - StorageProofKind::KnownQueryPlanAndValues, - */ + //StorageProofKind::KnownQueryPlanAndValues, ]; for kind in &kinds { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c2a1a19e2a44a..fd45409c24027 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -43,8 +43,7 @@ mod trie_backend_essence; mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, - LegacyStorageProof}; + StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; From e5424179a9bd6139fd6902d8ff72c4875ef40e7d Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 21 Apr 2020 19:50:53 +0200 Subject: [PATCH 118/185] missing switches --- client/network/src/protocol/light_client_handler.rs | 9 +++------ client/src/cht.rs | 3 +-- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index b478377421cbc..9991daf5d4d6d 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -539,12 +539,11 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - // TODO EMCH consider new version with compact let proof = match self.chain.execution_proof( &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok((_, proof)) => proof, Err(e) => { @@ -584,11 +583,10 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - // TODO EMCH consider new version with compact let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok(proof) => proof, Err(error) => { @@ -633,12 +631,11 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; - // TODO EMCH consider new version with compact let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, Err(error) => { diff --git a/client/src/cht.rs b/client/src/cht.rs index 111070200782d..282cf3cc132b7 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -121,8 +121,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - // TODO consider Flatten compact here? - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ).map_err(ClientError::Execution) } From d3da220f21903718fdba45a53f7e4de3f8818e7a Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 22 Apr 2020 18:20:42 +0200 Subject: [PATCH 119/185] fix merging and into a refactoring --- client/api/src/call_executor.rs | 4 +- client/block-builder/src/lib.rs | 4 +- client/src/call_executor.rs | 50 ++++---- client/src/client.rs | 11 +- client/src/light/call_executor.rs | 9 +- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 29 +++-- .../proc-macro/src/mock_impl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 13 +- primitives/state-machine/src/lib.rs | 44 +++---- .../state-machine/src/proving_backend.rs | 111 +++++++++++------- primitives/trie/src/storage_proof.rs | 63 ++++++++-- 12 files changed, 222 insertions(+), 120 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 2e4a39baa6eda..7e39198fabfd7 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -29,7 +29,7 @@ use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; use sp_core::NativeOrEncoded; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{RuntimeApiProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; /// Executor Provider @@ -91,7 +91,7 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: &Option<(ProofRecorder, StorageProofKind)>, + proof_recorder: Option<&RefCell>>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 63b695efe2efa..fc231ce8989a2 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; -use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof}; +use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, ProofInput}; use sp_consensus::RecordProof; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -189,7 +189,7 @@ where ), ); - let proof = self.api.extract_proof(); + let proof = self.api.extract_proof(ProofInput::None); let state = self.backend.state_at(self.block_id)?; let changes_trie_state = backend::changes_tries_state_at_block( diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 637b70686c462..e1cfba153faf2 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -21,12 +21,12 @@ use sp_runtime::{ }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, StorageProofKind, + backend::Backend as _, StorageProof, StorageProofKind, ProofInput, }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; use sp_core::{NativeOrEncoded, NeverNativeValue, traits::CodeExecutor}; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{RuntimeApiProofRecorder, InitializeBlock, StorageTransactionCache}; use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; /// Call executor that executes methods locally, querying all required @@ -126,7 +126,7 @@ where initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, - recorder: &Option<(ProofRecorder, StorageProofKind)>, + recorder: Option<&RefCell>>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { match initialize_block { @@ -144,7 +144,8 @@ where let mut state = self.backend.state_at(*at)?; match recorder { - Some((recorder, target_proof_kind)) => { + Some(recorder) => { + let RuntimeApiProofRecorder{ recorder, kind, input} = &mut *recorder.borrow_mut(); let trie_state = state.as_trie_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box @@ -155,27 +156,34 @@ where // recorder. let runtime_code = state_runtime_code.runtime_code()?; + let input_backend = std::mem::replace(input, ProofInput::None); let backend = sp_state_machine::ProvingBackend::new_with_recorder( trie_state, recorder.clone(), - *target_proof_kind, + *kind, + input_backend, ); - - let changes = &mut *changes.borrow_mut(); - let mut state_machine = StateMachine::new( - &backend, - changes_trie_state, - changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ); - // TODO: https://github.com/paritytech/substrate/issues/4455 - // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + let result = { + let changes = &mut *changes.borrow_mut(); + let mut state_machine = StateMachine::new( + &backend, + changes_trie_state, + changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ); + // TODO: https://github.com/paritytech/substrate/issues/4455 + // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + }; + let (recorder_state, input_state) = backend.recording_state()?; + *recorder = recorder_state; + *input = input_state; + result }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); diff --git a/client/src/client.rs b/client/src/client.rs index 1a3cdef713de0..75f709ffd6f53 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -474,7 +474,7 @@ impl Client where Ok(()) }, ())?; - Ok(StorageProof::merge::, _>(proofs, false) + Ok(StorageProof::merge::, _>(proofs, false, false) .map_err(|e| format!("{}", e))?) } @@ -1118,7 +1118,7 @@ impl ProofProvider for Client where call_data: &[u8], kind: StorageProofKind, ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let (merge_kind, prefer_full) = kind.mergeable_kind(); + let (merge_kind, prefer_full, recurse) = kind.mergeable_kind(); // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1139,8 +1139,11 @@ impl ProofProvider for Client where call_data, merge_kind, ).and_then(|(r, p)| { - Ok((r, StorageProof::merge::, _>(vec![p, code_proof], prefer_full) - .map_err(|e| format!("{}", e))?)) + Ok((r, StorageProof::merge::, _>( + vec![p, code_proof], + prefer_full, + recurse, + ).map_err(|e| format!("{}", e))?)) }) } diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 3dbb429bfd3f7..10ceb8d6735a7 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -33,7 +33,7 @@ use sp_state_machine::{ }; use hash_db::Hasher; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{RuntimeApiProofRecorder, InitializeBlock, StorageTransactionCache}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -113,7 +113,7 @@ impl CallExecutor for initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, - recorder: &Option<(ProofRecorder, StorageProofKind)>, + recorder: Option<&RefCell>>, extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { // there's no actual way/need to specify native/wasm execution strategy on light node @@ -192,7 +192,7 @@ pub fn prove_execution( Box )?; - let (merge_kind, prefer_full) = kind.mergeable_kind(); + let (merge_kind, prefer_full, recurse) = kind.mergeable_kind(); // prepare execution environment + record preparation proof let mut changes = Default::default(); let (_, init_proof) = executor.prove_at_trie_state( @@ -214,6 +214,7 @@ pub fn prove_execution( let total_proof = StorageProof::merge::, _>( vec![init_proof, exec_proof], prefer_full, + recurse, ).map_err(|e| format!("{}", e))?; Ok((result, total_proof)) @@ -356,7 +357,7 @@ mod tests { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: &Option<(ProofRecorder, StorageProofKind)>, + _proof_recorder: Option<&RefCell>>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index d310a6b992f7c..02a5bf21dcac7 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -413,7 +413,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, - recorder: &Option<(#crate_::ProofRecorder, #crate_::StorageProofKind)>, + recorder: Option<&std::cell::RefCell<#crate_::RuntimeApiProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { let version = call_runtime_at.runtime_version_at(at)?; use #crate_::InitializeBlock; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 915489282f01c..358c3d78dc612 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -210,7 +210,7 @@ fn generate_runtime_api_base_structures() -> Result { storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, - recorder: Option<(#crate_::ProofRecorder, #crate_::StorageProofKind)>, + recorder: Option>>, } // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a @@ -280,20 +280,25 @@ fn generate_runtime_api_base_structures() -> Result { } fn record_proof(&mut self, kind: #crate_::StorageProofKind) { - if kind.need_register_full() { - self.recorder = Some((#crate_::ProofRecorder::::Full(Default::default()), kind)); + let recorder = if kind.need_register_full() { + #crate_::ProofRecorder::::Full(Default::default()) } else { - self.recorder = Some((#crate_::ProofRecorder::::Flat(Default::default()), kind)); - } + #crate_::ProofRecorder::::Flat(Default::default()) + }; + self.recorder = Some(std::cell::RefCell::new(#crate_::RuntimeApiProofRecorder { + recorder, + kind, + input: #crate_::ProofInput::None, + })) } - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + fn extract_proof(&mut self, input: #crate_::ProofInput) -> Option<#crate_::StorageProof> { self.recorder .take() - .and_then(|(recorder, kind)| { - // TODO EMCH this will fail for compact as we need the register - // root - recorder.extract_proof(kind, #crate_::ProofInput::None).ok() + .and_then(|recorder| { + let #crate_::RuntimeApiProofRecorder{ recorder, kind, input } = &mut *recorder.borrow_mut(); + let input = std::mem::replace(input, #crate_::ProofInput::None); + recorder.extract_proof(*kind, input).ok() }) } @@ -357,7 +362,7 @@ fn generate_runtime_api_base_structures() -> Result { &std::cell::RefCell<#crate_::OverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, &std::cell::RefCell>>, - &Option<(#crate_::ProofRecorder, #crate_::StorageProofKind)>, + Option<&std::cell::RefCell<#crate_::RuntimeApiProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, >( @@ -370,7 +375,7 @@ fn generate_runtime_api_base_structures() -> Result { &self.changes, &self.storage_transaction_cache, &self.initialized_block, - &self.recorder, + self.recorder.as_ref(), ); self.commit_on_ok(&res); diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index a90e6e1812bb5..b2c7bb88f8eb9 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -98,7 +98,7 @@ fn implement_common_api_traits( unimplemented!("`record_proof` not implemented for runtime api mocks") } - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + fn extract_proof(&mut self, _input: #crate_::ProofInput) -> Option<#crate_::StorageProof> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index c92f368b9f3ac..d1011b7cab600 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -379,7 +379,7 @@ pub trait ApiExt: ApiErrorExt { /// This stops the proof recording. /// /// If `record_proof` was not called before, this will return `None`. - fn extract_proof(&mut self) -> Option; + fn extract_proof(&mut self, input: ProofInput) -> Option; /// Convert the api object into the storage changes that were done while executing runtime /// api functions. @@ -439,7 +439,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend, StorageProofKind)>, + pub recorder: Option<&'a RefCell>>, } /// Something that can call into the an api at a given block. @@ -517,6 +517,15 @@ pub trait RuntimeApiInfo { const VERSION: u32; } +/// Inner struct for storage of proof management. +/// TODO consider renaming to ProofRecorder (if type alias is not use) +#[cfg(feature = "std")] +pub struct RuntimeApiProofRecorder { + pub recorder: ProofRecorder, + pub kind: StorageProofKind, + pub input: ProofInput, +} + /// Extracts the `Api::Error` for a type that provides a runtime api. #[cfg(feature = "std")] pub type ApiErrorFor = <>::Api as ApiErrorExt>::Error; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index fd45409c24027..bd8867b8b20e1 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -503,27 +503,29 @@ where Exec: CodeExecutor + 'static + Clone, N: crate::changes_trie::BlockNumber, { - let proving_backend = proving_backend::ProvingBackend::new( + let mut proving_backend = proving_backend::ProvingBackend::new( trie_backend, kind, ); - let mut sm = StateMachine::<_, H, N, Exec>::new( - &proving_backend, - None, - overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); + let result = { + let mut sm = StateMachine::<_, H, N, Exec>::new( + &proving_backend, + None, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); - let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )?; - let proof = proving_backend.extract_proof(kind) + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )? + }; + let proof = proving_backend.extract_proof() .map_err(|e| Box::new(e) as Box)?; Ok((result.into_encoded(), proof)) } @@ -696,7 +698,7 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new( + let mut proving_backend = proving_backend::ProvingBackend::<_, H>::new( trie_backend, kind, ); @@ -705,7 +707,7 @@ where .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof(kind) + Ok(proving_backend.extract_proof() .map_err(|e| Box::new(e) as Box)?) } @@ -723,13 +725,13 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind); + let mut proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof(kind) + Ok(proving_backend.extract_proof() .map_err(|e| Box::new(e) as Box)?) } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e0db216055745..c2d4152b7d76e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -143,14 +143,16 @@ impl Clone for ProofRecorder { /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( - TrieBackend, H>, -); +pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { + trie_backend: TrieBackend, H>, + previous_input: ProofInput, // TODO consider &'a mut previous_input + proof_kind: StorageProofKind, +} /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { backend: &'a S, - proof_recorder: ProofRecorder, + proof_recorder: ProofRecorder, // TODO if removing rwlock, consider &'a mut } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> @@ -163,7 +165,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } else { ProofRecorder::Flat(Default::default()) }; - Self::new_with_recorder(backend, proof_recorder, kind) + Self::new_with_recorder(backend, proof_recorder, kind, ProofInput::None) } /// Create new proving backend with the given recorder. @@ -171,6 +173,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: &'a TrieBackend, proof_recorder: ProofRecorder, proof_kind: StorageProofKind, + previous_input: ProofInput, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -178,22 +181,48 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; - if let ProofInputKind::ChildTrieRoots = proof_kind.processing_input_kind() { - ProvingBackend(TrieBackend::new_with_roots(recorder, root)) + let trie_backend = if let ProofInputKind::ChildTrieRoots = proof_kind.processing_input_kind() { + TrieBackend::new_with_roots(recorder, root) } else { - ProvingBackend(TrieBackend::new(recorder, root)) + TrieBackend::new(recorder, root) + }; + ProvingBackend { + trie_backend, + previous_input, + proof_kind, } } /// Extracting the gathered unordered proof. - pub fn extract_proof(&self, kind: StorageProofKind) -> Result { - let roots = match kind.processing_input_kind() { + /// TODO remove or make it consiming: here it is doable to get + /// intermediate proof, not sure if of any use. + pub fn extract_proof(&mut self) -> Result { + self.update_input()?; + self.trie_backend.essence().backend_storage().proof_recorder + .extract_proof(self.proof_kind, self.previous_input.clone()) + } + + fn update_input(&mut self) -> Result<(), String> { + let input = match self.proof_kind.processing_input_kind() { ProofInputKind::ChildTrieRoots => { - self.0.extract_registered_roots() + self.trie_backend.extract_registered_roots() }, _ => ProofInput::None, }; - self.0.essence().backend_storage().proof_recorder.extract_proof(kind, roots) + if !self.previous_input.consolidate(input) { + Err("Incompatible inputs".to_string()) + } else { + Ok(()) + } + } + + /// Drop the backend, but keep the state to use it again afterward + pub fn recording_state(mut self) -> Result<(ProofRecorder, ProofInput), String> { + self.update_input()?; + Ok(( + self.trie_backend.essence().backend_storage().proof_recorder.clone(), + self.previous_input + )) } } @@ -201,7 +230,7 @@ impl ProofRecorder where H::Out: Codec, { - /// Extracting the gathered unordered proof. + /// Extracts the gathered unordered proof. pub fn extract_proof( &self, kind: StorageProofKind, @@ -270,7 +299,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.storage(key) + self.trie_backend.storage(key) } fn child_storage( @@ -278,7 +307,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.child_storage(child_info, key) + self.trie_backend.child_storage(child_info, key) } fn for_keys_in_child_storage( @@ -286,11 +315,11 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(child_info, f) + self.trie_backend.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.next_storage_key(key) + self.trie_backend.next_storage_key(key) } fn next_child_storage_key( @@ -298,15 +327,15 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.next_child_storage_key(child_info, key) + self.trie_backend.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_keys_with_prefix(prefix, f) + self.trie_backend.for_keys_with_prefix(prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_key_values_with_prefix(prefix, f) + self.trie_backend.for_key_values_with_prefix(prefix, f) } fn for_child_keys_with_prefix( @@ -315,15 +344,15 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix( child_info, prefix, f) + self.trie_backend.for_child_keys_with_prefix( child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { - self.0.pairs() + self.trie_backend.pairs() } fn keys(&self, prefix: &[u8]) -> Vec> { - self.0.keys(prefix) + self.trie_backend.keys(prefix) } fn child_keys( @@ -331,13 +360,13 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.0.child_keys(child_info, prefix) + self.trie_backend.child_keys(child_info, prefix) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)> { - self.0.storage_root(delta) + self.trie_backend.storage_root(delta) } fn child_storage_root( @@ -349,13 +378,13 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(child_info, delta) + self.trie_backend.child_storage_root(child_info, delta) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } fn usage_info(&self) -> crate::stats::UsageInfo { - self.0.usage_info() + self.trie_backend.usage_info() } } @@ -418,26 +447,26 @@ mod tests { fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); let kind = StorageProofKind::Full; - assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); let kind = StorageProofKind::TrieSkipHashesFull; - assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); let kind = StorageProofKind::TrieSkipHashes; - assert!(test_proving(&trie_backend, kind).extract_proof(kind).unwrap().is_empty()); + assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { let trie_backend = test_trie(); let kind = StorageProofKind::Flatten; - let backend = test_proving(&trie_backend, kind); + let mut backend = test_proving(&trie_backend, kind); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof(kind).unwrap().is_empty()); + assert!(!backend.extract_proof().unwrap().is_empty()); let kind = StorageProofKind::Full; - let backend = test_proving(&trie_backend, kind); + let mut backend = test_proving(&trie_backend, kind); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof(kind).unwrap().is_empty()); + assert!(!backend.extract_proof().unwrap().is_empty()); } #[test] @@ -481,10 +510,10 @@ mod tests { (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); let test = |kind: StorageProofKind| { - let proving = ProvingBackend::new(trie, kind); + let mut proving = ProvingBackend::new(trie, kind); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(kind).unwrap(); + let proof = proving.extract_proof().unwrap(); let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); @@ -536,10 +565,10 @@ mod tests { )); let test = |kind: StorageProofKind| { - let proving = ProvingBackend::new(trie, kind); + let mut proving = ProvingBackend::new(trie, kind); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof(kind).unwrap(); + let proof = proving.extract_proof().unwrap(); let proof_check = create_proof_check_backend::( in_memory_root.into(), @@ -551,10 +580,10 @@ mod tests { assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); - let proving = ProvingBackend::new(trie, kind); + let mut proving = ProvingBackend::new(trie, kind); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - let proof = proving.extract_proof(kind).unwrap(); + let proof = proving.extract_proof().unwrap(); if kind.use_full_partial_db().unwrap() { let proof_check = create_proof_check_backend::( in_memory_root.into(), diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 42c6dca5334fb..eec8ea466a538 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use sp_std::collections::btree_map::BTreeMap; +use sp_std::collections::{btree_map::BTreeMap, btree_map}; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; use sp_std::convert::TryInto; @@ -170,6 +170,7 @@ impl StorageProofKind { } } +#[derive(Clone)] /// Additional information needed for packing or unpacking. /// These do not need to be part of the proof but are required /// when using the proof. @@ -189,6 +190,46 @@ pub enum Input { QueryPlan(ChildrenProofMap<(Vec, Vec>)>), } +impl Input { + #[must_use] + /// Update input with new content. + /// Return false on failure. + /// Fail when the content differs, except for `None` input + /// that is always reassignable. + /// + /// Not that currently all query plan input are not mergeable + /// even if it could in the future. + pub fn consolidate(&mut self, other: Self) -> bool { + match self { + Input::None => { + *self = other; + }, + Input::ChildTrieRoots(children) => { + match other { + Input::None => (), + Input::ChildTrieRoots(children_other) => { + for (child_info, root) in children_other { + match children.entry(child_info) { + btree_map::Entry::Occupied(v) => if v.get() != &root { + return false; + }, + btree_map::Entry::Vacant(v) => { + v.insert(root); + }, + } + } + }, + Input::QueryPlan(..) => return false, + Input::QueryPlanWithValues(..) => return false, + } + }, + Input::QueryPlan(..) => return false, + Input::QueryPlanWithValues(..) => return false, + } + true + } +} + /// Kind for designing an `Input` variant. pub enum InputKind { /// `Input::None` kind. @@ -296,12 +337,14 @@ impl StorageProofKind { } /// Return the best kind to use for merging later, and - /// wether the merge should produce full proof. - pub fn mergeable_kind(&self) -> (Self, bool) { + /// wether the merge should produce full proof, and if + /// we are recursing. + pub fn mergeable_kind(&self) -> (Self, bool, bool) { match self { - StorageProofKind::TrieSkipHashes => (StorageProofKind::TrieSkipHashesForMerge, false), - StorageProofKind::TrieSkipHashesFull => (StorageProofKind::TrieSkipHashesForMerge, true), - s => (*s, s.use_full_partial_db().unwrap_or(false)) + StorageProofKind::TrieSkipHashes => (StorageProofKind::TrieSkipHashesForMerge, false, false), + StorageProofKind::TrieSkipHashesFull => (StorageProofKind::TrieSkipHashesForMerge, true, false), + StorageProofKind::TrieSkipHashesForMerge => (StorageProofKind::TrieSkipHashesForMerge, true, true), + s => (*s, s.use_full_partial_db().unwrap_or(false), false) } } } @@ -644,8 +687,7 @@ impl StorageProof { let mut result = ChildrenProofMap::default(); for (child_info, set) in collected.iter() { let root = roots.get(&child_info.proof_info()) - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input())?; + .ok_or_else(|| missing_pack_input())?.clone(); let trie_nodes: HashMap<_, _> = set .iter() .filter_map(|(k, v)| v.as_ref().map(|v| (k.encode(), v.to_vec()))) @@ -742,7 +784,7 @@ impl StorageProof { /// The function cannot pack back proof as it does not have reference to additional information /// needed. So for this the additional information need to be merged separately and the result /// of this merge be packed with it afterward. - pub fn merge(proofs: I, prefer_full: bool) -> Result + pub fn merge(proofs: I, prefer_full: bool, recurse: bool) -> Result where I: IntoIterator, H: Hasher, @@ -818,6 +860,9 @@ impl StorageProof { } } if let Some(children) = packable_child_sets { + if recurse { + return Ok(StorageProof::TrieSkipHashesForMerge(children)) + } if prefer_full { let mut result = ChildrenProofMap::default(); for (child_info, (set, root)) in children.into_iter() { From d6c82f5e508b083e3dcf843d54e792518907b762 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 22 Apr 2020 18:42:37 +0200 Subject: [PATCH 120/185] remove rwlock for collecting roots --- primitives/state-machine/src/trie_backend.rs | 7 +++---- .../state-machine/src/trie_backend_essence.rs | 14 +++++++------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 09dca0deb6991..18af6aec768cb 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -27,8 +27,7 @@ use crate::{ StorageKey, StorageValue, Backend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; -use std::sync::Arc; -use parking_lot::RwLock; +use std::cell::RefCell; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { @@ -46,7 +45,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec /// Activate storage of roots (can be use /// to pack proofs and does small caching of child trie root)). pub fn new_with_roots(storage: S, root: H::Out) -> Self { - let register_roots = Some(Arc::new(RwLock::new(Default::default()))); + let register_roots = Some(RefCell::new(Default::default())); TrieBackend { essence: TrieBackendEssence::new(storage, root, register_roots), } @@ -58,7 +57,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); let roots = { - std::mem::replace(&mut *register_roots.write(), Default::default()) + std::mem::replace(&mut *register_roots.borrow_mut(), Default::default()) }; for (child_info, root) in roots.into_iter() { if let Some(root) = root { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 86e271301c3a7..07a9d8a776795 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -29,7 +29,7 @@ use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Decode, Encode}; -use parking_lot::RwLock; +use std::cell::RefCell; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { @@ -43,7 +43,7 @@ pub struct TrieBackendEssence, H: Hasher> { root: H::Out, /// If defined, we store encoded visited roots for top_trie and child trie in this /// map. It also act as a cache. - pub register_roots: Option>>>>, + pub register_roots: Option>>>, } impl, H: Hasher> TrieBackendEssence where H::Out: Decode + Encode { @@ -51,7 +51,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub fn new( storage: S, root: H::Out, - register_roots: Option>>>>, + register_roots: Option>>>, ) -> Self { TrieBackendEssence { storage, @@ -84,7 +84,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Access the root of the child storage in its parent trie pub(crate) fn child_root_encoded(&self, child_info: &ChildInfo) -> Result, String> { if let Some(cache) = self.register_roots.as_ref() { - if let Some(result) = cache.read().get(child_info) { + if let Some(result) = cache.borrow().get(child_info) { return Ok(result.clone()); } } @@ -92,7 +92,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let root: Option = self.storage(&child_info.prefixed_storage_key()[..])?; if let Some(cache) = self.register_roots.as_ref() { - cache.write().insert(child_info.clone(), root.clone()); + cache.borrow_mut().insert(child_info.clone(), root.clone()); } Ok(root) @@ -101,7 +101,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Access the root of the child storage in its parent trie fn child_root(&self, child_info: &ChildInfo) -> Result, String> { if let Some(cache) = self.register_roots.as_ref() { - if let Some(root) = cache.read().get(child_info) { + if let Some(root) = cache.borrow().get(child_info) { let root = root.as_ref() .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); return Ok(root); @@ -110,7 +110,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let encoded_root = self.storage(&child_info.prefixed_storage_key()[..])?; if let Some(cache) = self.register_roots.as_ref() { - cache.write().insert(child_info.clone(), encoded_root.clone()); + cache.borrow_mut().insert(child_info.clone(), encoded_root.clone()); } let root: Option = encoded_root From f0a84552f7aea449a130c9f8b38fdb97b382fe79 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 22 Apr 2020 20:08:18 +0200 Subject: [PATCH 121/185] triedbmut requiring hashdb block refacto (it should require a variant of hashdb that is not sync and mut). --- client/src/call_executor.rs | 2 +- .../state-machine/src/proving_backend.rs | 43 +++++++++--------- primitives/state-machine/src/trie_backend.rs | 12 +++-- .../state-machine/src/trie_backend_essence.rs | 44 +++++++++---------- 4 files changed, 54 insertions(+), 47 deletions(-) diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index e1cfba153faf2..d3f33bf3ce739 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -225,7 +225,7 @@ where .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_trie_state>>( + fn prove_at_trie_state>>( &self, trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index c2d4152b7d76e..eb373e46f4f52 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -16,11 +16,10 @@ //! Proving state machine backend. -use std::sync::Arc; -use parking_lot::RwLock; +use std::cell::RefCell; use codec::{Decode, Codec}; use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; +use hash_db::{Hasher, HashDBRef, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, StorageProofKind, StorageProof, ProofInputKind, ProofInput, @@ -120,9 +119,9 @@ pub enum ProofRecorder { // root of each child is added to be able to pack. /// Proof keep a separation between child trie content, this is usually useless, /// but when we use proof compression we want this separation. - Full(Arc>>>), + Full(RefCell>>), /// Single level of storage for all recoded nodes. - Flat(Arc>>), + Flat(RefCell>), } impl Default for ProofRecorder { @@ -131,16 +130,19 @@ impl Default for ProofRecorder { ProofRecorder::Flat(Default::default()) } } - -impl Clone for ProofRecorder { +/* +impl Clone for ProofRecorder + where + H::Out: Clone, +{ fn clone(&self) -> Self { match self { - ProofRecorder::Full(a) => ProofRecorder::Full(a.clone()), - ProofRecorder::Flat(a) => ProofRecorder::Flat(a.clone()), + ProofRecorder::Full(a) => ProofRecorder::Full(RefCell::new((*a.borrow()).clone())), + ProofRecorder::Flat(a) => ProofRecorder::Flat(RefCell::new(a.borrow().clone())), } } } - +*/ /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -219,10 +221,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Drop the backend, but keep the state to use it again afterward pub fn recording_state(mut self) -> Result<(ProofRecorder, ProofInput), String> { self.update_input()?; - Ok(( - self.trie_backend.essence().backend_storage().proof_recorder.clone(), - self.previous_input - )) + let proof_recorder = self.trie_backend + .into_essence() + .into_storage() + .proof_recorder; + Ok((proof_recorder, self.previous_input)) } } @@ -238,12 +241,12 @@ impl ProofRecorder ) -> Result { Ok(match self { ProofRecorder::Flat(rec) => StorageProof::extract_proof_from_flat( - &*rec.read(), + &*rec.borrow(), kind, &input, ).map_err(|e| format!("{}", e))?, ProofRecorder::Full(rec) => StorageProof::extract_proof( - &*rec.read(), + &*rec.borrow(), kind, &input, ).map_err(|e| format!("{}", e))?, @@ -259,19 +262,19 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { match &self.proof_recorder { ProofRecorder::Flat(rec) => { - if let Some(v) = rec.read().get(key) { + if let Some(v) = (**rec.borrow()).get(key) { return Ok(v.clone()); } let backend_value = self.backend.get(child_info, key, prefix)?; - rec.write().insert(key.clone(), backend_value.clone()); + rec.borrow_mut().insert(key.clone(), backend_value.clone()); Ok(backend_value) }, ProofRecorder::Full(rec) => { - if let Some(v) = rec.read().get(child_info).and_then(|s| s.get(key)) { + if let Some(v) = rec.borrow().get(child_info).and_then(|s| (**s).get(key)) { return Ok(v.clone()); } let backend_value = self.backend.get(child_info, key, prefix)?; - rec.write().entry(child_info.clone()) + rec.borrow_mut().entry(child_info.clone()) .or_default() .insert(key.clone(), backend_value.clone()); Ok(backend_value) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 18af6aec768cb..d8c5fb9226f79 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -23,10 +23,9 @@ use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_roo use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; -use crate::{ - StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, -}; +use crate::{StorageKey, StorageValue, Backend}; +use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, + Ephemeral}; use std::cell::RefCell; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -75,6 +74,11 @@ impl, H: Hasher> TrieBackend where H::Out: Codec &self.essence } + /// Extracte essence + pub fn into_essence(self) -> TrieBackendEssence { + self.essence + } + /// Get backend storage reference. pub fn backend_storage(&self) -> &S { self.essence.backend_storage() diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 07a9d8a776795..46bf13180df2c 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -412,28 +412,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { - Some(val) - } else { - let top; - let child_info = if let Some(child_info) = self.child_info { - child_info - } else { - top = ChildInfo::top_trie(); - &top - }; - match self.storage.get(child_info, &key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } + hash_db::HashDBRef::get(self, key, prefix) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() + hash_db::HashDBRef::contains(self, key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { @@ -453,16 +436,33 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + if let Some(val) = hash_db::HashDBRef::get(self.overlay, key, prefix) { + Some(val) + } else { + let top; + let child_info = if let Some(child_info) = self.child_info { + child_info + } else { + top = ChildInfo::top_trie(); + &top + }; + match self.storage.get(child_info, &key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } + } } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + hash_db::HashDBRef::get(self, key, prefix).is_some() } } /// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage: Send + Sync { +pub trait TrieBackendStorage { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. From facd6f625644617c5b7623705cfdff8d32332986 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 22 Apr 2020 20:09:19 +0200 Subject: [PATCH 122/185] Revert "triedbmut requiring hashdb block refacto (it should require a variant" This reverts commit f0a84552f7aea449a130c9f8b38fdb97b382fe79. --- client/src/call_executor.rs | 2 +- .../state-machine/src/proving_backend.rs | 43 +++++++++--------- primitives/state-machine/src/trie_backend.rs | 12 ++--- .../state-machine/src/trie_backend_essence.rs | 44 +++++++++---------- 4 files changed, 47 insertions(+), 54 deletions(-) diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index d3f33bf3ce739..e1cfba153faf2 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -225,7 +225,7 @@ where .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_trie_state>>( + fn prove_at_trie_state>>( &self, trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index eb373e46f4f52..c2d4152b7d76e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -16,10 +16,11 @@ //! Proving state machine backend. -use std::cell::RefCell; +use std::sync::Arc; +use parking_lot::RwLock; use codec::{Decode, Codec}; use log::debug; -use hash_db::{Hasher, HashDBRef, EMPTY_PREFIX, Prefix}; +use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, StorageProofKind, StorageProof, ProofInputKind, ProofInput, @@ -119,9 +120,9 @@ pub enum ProofRecorder { // root of each child is added to be able to pack. /// Proof keep a separation between child trie content, this is usually useless, /// but when we use proof compression we want this separation. - Full(RefCell>>), + Full(Arc>>>), /// Single level of storage for all recoded nodes. - Flat(RefCell>), + Flat(Arc>>), } impl Default for ProofRecorder { @@ -130,19 +131,16 @@ impl Default for ProofRecorder { ProofRecorder::Flat(Default::default()) } } -/* -impl Clone for ProofRecorder - where - H::Out: Clone, -{ + +impl Clone for ProofRecorder { fn clone(&self) -> Self { match self { - ProofRecorder::Full(a) => ProofRecorder::Full(RefCell::new((*a.borrow()).clone())), - ProofRecorder::Flat(a) => ProofRecorder::Flat(RefCell::new(a.borrow().clone())), + ProofRecorder::Full(a) => ProofRecorder::Full(a.clone()), + ProofRecorder::Flat(a) => ProofRecorder::Flat(a.clone()), } } } -*/ + /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -221,11 +219,10 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Drop the backend, but keep the state to use it again afterward pub fn recording_state(mut self) -> Result<(ProofRecorder, ProofInput), String> { self.update_input()?; - let proof_recorder = self.trie_backend - .into_essence() - .into_storage() - .proof_recorder; - Ok((proof_recorder, self.previous_input)) + Ok(( + self.trie_backend.essence().backend_storage().proof_recorder.clone(), + self.previous_input + )) } } @@ -241,12 +238,12 @@ impl ProofRecorder ) -> Result { Ok(match self { ProofRecorder::Flat(rec) => StorageProof::extract_proof_from_flat( - &*rec.borrow(), + &*rec.read(), kind, &input, ).map_err(|e| format!("{}", e))?, ProofRecorder::Full(rec) => StorageProof::extract_proof( - &*rec.borrow(), + &*rec.read(), kind, &input, ).map_err(|e| format!("{}", e))?, @@ -262,19 +259,19 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { match &self.proof_recorder { ProofRecorder::Flat(rec) => { - if let Some(v) = (**rec.borrow()).get(key) { + if let Some(v) = rec.read().get(key) { return Ok(v.clone()); } let backend_value = self.backend.get(child_info, key, prefix)?; - rec.borrow_mut().insert(key.clone(), backend_value.clone()); + rec.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) }, ProofRecorder::Full(rec) => { - if let Some(v) = rec.borrow().get(child_info).and_then(|s| (**s).get(key)) { + if let Some(v) = rec.read().get(child_info).and_then(|s| s.get(key)) { return Ok(v.clone()); } let backend_value = self.backend.get(child_info, key, prefix)?; - rec.borrow_mut().entry(child_info.clone()) + rec.write().entry(child_info.clone()) .or_default() .insert(key.clone(), backend_value.clone()); Ok(backend_value) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index d8c5fb9226f79..18af6aec768cb 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -23,9 +23,10 @@ use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_roo use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; -use crate::{StorageKey, StorageValue, Backend}; -use crate::trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, - Ephemeral}; +use crate::{ + StorageKey, StorageValue, Backend, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, +}; use std::cell::RefCell; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -74,11 +75,6 @@ impl, H: Hasher> TrieBackend where H::Out: Codec &self.essence } - /// Extracte essence - pub fn into_essence(self) -> TrieBackendEssence { - self.essence - } - /// Get backend storage reference. pub fn backend_storage(&self) -> &S { self.essence.backend_storage() diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 46bf13180df2c..07a9d8a776795 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -412,11 +412,28 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDBRef::get(self, key, prefix) + if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + Some(val) + } else { + let top; + let child_info = if let Some(child_info) = self.child_info { + child_info + } else { + top = ChildInfo::top_trie(); + &top + }; + match self.storage.get(child_info, &key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } + } } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDBRef::contains(self, key, prefix) + hash_db::HashDB::get(self, key, prefix).is_some() } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { @@ -436,33 +453,16 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDBRef::get(self.overlay, key, prefix) { - Some(val) - } else { - let top; - let child_info = if let Some(child_info) = self.child_info { - child_info - } else { - top = ChildInfo::top_trie(); - &top - }; - match self.storage.get(child_info, &key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } + hash_db::HashDB::get(self, key, prefix) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDBRef::get(self, key, prefix).is_some() + hash_db::HashDB::contains(self, key, prefix) } } /// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage { +pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. From 2fdb0f8571b84e73c7014f2c5d659b5ebb675cb8 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 22 Apr 2020 20:21:35 +0200 Subject: [PATCH 123/185] actually need rwlock for client backend --- primitives/state-machine/src/trie_backend.rs | 6 +++--- .../state-machine/src/trie_backend_essence.rs | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 18af6aec768cb..8cb9adeb7fcab 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -27,7 +27,7 @@ use crate::{ StorageKey, StorageValue, Backend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; -use std::cell::RefCell; +use parking_lot::RwLock; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { @@ -45,7 +45,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec /// Activate storage of roots (can be use /// to pack proofs and does small caching of child trie root)). pub fn new_with_roots(storage: S, root: H::Out) -> Self { - let register_roots = Some(RefCell::new(Default::default())); + let register_roots = Some(RwLock::new(Default::default())); TrieBackend { essence: TrieBackendEssence::new(storage, root, register_roots), } @@ -57,7 +57,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); let roots = { - std::mem::replace(&mut *register_roots.borrow_mut(), Default::default()) + std::mem::replace(&mut *register_roots.write(), Default::default()) }; for (child_info, root) in roots.into_iter() { if let Some(root) = root { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 07a9d8a776795..25cb166b588ad 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -29,7 +29,7 @@ use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Decode, Encode}; -use std::cell::RefCell; +use parking_lot::RwLock; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { @@ -43,7 +43,7 @@ pub struct TrieBackendEssence, H: Hasher> { root: H::Out, /// If defined, we store encoded visited roots for top_trie and child trie in this /// map. It also act as a cache. - pub register_roots: Option>>>, + pub register_roots: Option>>>, } impl, H: Hasher> TrieBackendEssence where H::Out: Decode + Encode { @@ -51,7 +51,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub fn new( storage: S, root: H::Out, - register_roots: Option>>>, + register_roots: Option>>>, ) -> Self { TrieBackendEssence { storage, @@ -84,7 +84,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Access the root of the child storage in its parent trie pub(crate) fn child_root_encoded(&self, child_info: &ChildInfo) -> Result, String> { if let Some(cache) = self.register_roots.as_ref() { - if let Some(result) = cache.borrow().get(child_info) { + if let Some(result) = cache.read().get(child_info) { return Ok(result.clone()); } } @@ -92,7 +92,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let root: Option = self.storage(&child_info.prefixed_storage_key()[..])?; if let Some(cache) = self.register_roots.as_ref() { - cache.borrow_mut().insert(child_info.clone(), root.clone()); + cache.write().insert(child_info.clone(), root.clone()); } Ok(root) @@ -101,7 +101,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Access the root of the child storage in its parent trie fn child_root(&self, child_info: &ChildInfo) -> Result, String> { if let Some(cache) = self.register_roots.as_ref() { - if let Some(root) = cache.borrow().get(child_info) { + if let Some(root) = cache.read().get(child_info) { let root = root.as_ref() .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); return Ok(root); @@ -110,7 +110,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let encoded_root = self.storage(&child_info.prefixed_storage_key()[..])?; if let Some(cache) = self.register_roots.as_ref() { - cache.borrow_mut().insert(child_info.clone(), encoded_root.clone()); + cache.write().insert(child_info.clone(), encoded_root.clone()); } let root: Option = encoded_root From 2e4ed7dca15cd6cf1e2382421d96dd63bd4d26d3 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 23 Apr 2020 10:02:46 +0200 Subject: [PATCH 124/185] fix doc test --- client/basic-authorship/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 5ec0bc6f9a520..b396d613144a8 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -20,7 +20,7 @@ //! //! ``` //! # use sc_basic_authorship::ProposerFactory; -//! # use sp_consensus::{Environment, Proposer, RecordProof}; +//! # use sp_consensus::{Environment, Proposer, RecordProof, StorageProofKind}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{self, runtime::{Extrinsic, Transfer}, AccountKeyring}; @@ -44,7 +44,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes, +//! RecordProof::Yes(StorageProofKind::TrieSkipHashes), //! ); //! //! // We wait until the proposition is performed. From f465388cef5e35a22a0ef682d38e8f5fd6dd0a8b Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 23 Apr 2020 11:14:43 +0200 Subject: [PATCH 125/185] Complete merge --- client/network/src/protocol.rs | 9 --------- client/network/src/protocol/light_client_handler.rs | 2 +- client/rpc/src/state/state_full.rs | 6 ++++-- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 8326468f7b1b9..b1e368d4e70d3 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -54,13 +54,7 @@ use std::sync::Arc; use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; -<<<<<<< HEAD -use crate::chain::{Client, FinalityProofProvider}; use sc_client_api::{ChangesProof, StorageProof, StorageProofKind}; -use crate::error; -======= -use sc_client_api::{ChangesProof, StorageProof}; ->>>>>>> master use util::LruHashSet; use wasm_timer::Instant; @@ -1577,10 +1571,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), -<<<<<<< HEAD StorageProofKind::TrieSkipHashes, -======= ->>>>>>> master )) { Ok(proof) => proof, Err(error) => { diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 70b5326e5accf..9991daf5d4d6d 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -634,7 +634,7 @@ where let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), &child_info, - &mut request.keys.iter().map(AsRef::as_ref) + &mut request.keys.iter().map(AsRef::as_ref), StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 4546692b7bd18..e75e46771ba7c 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -41,7 +41,7 @@ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; use std::marker::PhantomData; -use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider, ProofProvider}; +use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider, ProofProvider, StorageProofKind}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -363,8 +363,10 @@ impl StateBackend for FullState Date: Mon, 4 May 2020 18:24:35 +0200 Subject: [PATCH 126/185] fix merge --- client/service/src/chain_ops.rs | 19 +++--- client/service/src/client/call_executor.rs | 1 + client/service/test/src/client/light.rs | 11 +-- primitives/state-machine/src/lib.rs | 17 ++--- primitives/state-machine/src/trie_backend.rs | 6 +- .../state-machine/src/trie_backend_essence.rs | 67 ++++++++++++++----- primitives/storage/src/lib.rs | 2 +- 7 files changed, 75 insertions(+), 48 deletions(-) diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index 612e9310d182b..b7fb85490be28 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -33,7 +33,7 @@ use sp_consensus::{ import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, }; use sc_executor::{NativeExecutor, NativeExecutionDispatch}; -use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; +use sp_core::storage::{StorageKey, ChildType, ChildInfo, Storage, StorageChild, StorageMap, PrefixedStorageKey}; use sc_client_api::{StorageProvider, BlockBackend, UsageProvider}; use std::{io::{Read, Write, Seek}, pin::Pin, collections::HashMap}; @@ -311,19 +311,18 @@ impl< let empty_key = StorageKey(Vec::new()); let mut top_storage = self.client.storage_pairs(&block, &empty_key)?; let mut children_default = HashMap::new(); - // Remove all default child storage roots from the top storage and collect the child storage // pairs. - while let Some(pos) = top_storage - .iter() - .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { - let (key, _) = top_storage.swap_remove(pos); - - let key = StorageKey( - key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), - ); + while let Some((pos, child_type, unprefixed_key)) = top_storage + .iter().enumerate() + .find_map(|(i, (k, _))| ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(&k.0)) + .map(|(t, k)| (i, t, k))) { + debug_assert!(child_type == ChildType::ParentKeyId); + let key = StorageKey(unprefixed_key.to_vec()); let child_info = ChildInfo::new_default(&key.0); + top_storage.remove(pos); + let keys = self.client.child_storage_keys(&block, &child_info, &empty_key)?; let mut pairs = StorageMap::new(); keys.into_iter().try_for_each(|k| { diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 84bad09833a81..5d6c8c1b89ab7 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -179,6 +179,7 @@ where input_backend, ); let result = { + use std::borrow::BorrowMut; let changes = &mut *changes.borrow_mut(); let mut state_machine = StateMachine::new( &backend, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index aa14d22cbdf95..053a01fcd512d 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -33,11 +33,11 @@ use parking_lot::Mutex; use substrate_test_runtime_client::{ runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, }; -use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; +use sp_api::{InitializeBlock, StorageTransactionCache, RuntimeApiProofRecorder, OffchainOverlayedChanges}; use sp_consensus::{BlockOrigin}; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; -use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest}; +use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, StorageProofKind}; use sp_externalities::Extensions; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::{ @@ -53,7 +53,8 @@ use substrate_test_runtime_client::{ AccountKeyring, runtime::{self, Extrinsic}, }; -use sp_core::{blake2_256, ChangesTrieConfiguration, storage::{well_known_keys, StorageKey, ChildInfo}}; +use sp_core::{blake2_256, ChangesTrieConfiguration}; +use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_state_machine::Backend as _; pub type DummyBlockchain = Blockchain; @@ -222,7 +223,7 @@ impl CallExecutor for DummyCallExecutor { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: &Option>, + _proof_recorder: Option<&RefCell>>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() @@ -237,7 +238,7 @@ impl CallExecutor for DummyCallExecutor { _trie_state: &sp_state_machine::TrieBackend>, _overlay: &mut OverlayedChanges, _method: &str, - _call_data: &[u8] + _call_data: &[u8], _kind: StorageProofKind, ) -> Result<(Vec, StorageProof), ClientError> { unreachable!() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index f7de726d51ebb..f64e09e6c933b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -512,24 +512,13 @@ where N: crate::changes_trie::BlockNumber, { let mut offchain_overlay = OffchainOverlayedChanges::default(); - let proving_backend = proving_backend::ProvingBackend::new(trie_backend, kind); - let mut sm = StateMachine::<_, H, N, Exec>::new( - &proving_backend, - None, - overlay, - &mut offchain_overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); + let mut proving_backend = proving_backend::ProvingBackend::new(trie_backend, kind); let result = { let mut sm = StateMachine::<_, H, N, Exec>::new( &proving_backend, None, overlay, + &mut offchain_overlay, exec, method, call_data, @@ -612,10 +601,12 @@ where Exec: CodeExecutor + Clone + 'static, N: crate::changes_trie::BlockNumber, { + let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut sm = StateMachine::<_, H, N, Exec>::new( trie_backend, None, overlay, + &mut offchain_overlay, exec, method, call_data, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 067725328b1e2..792e4204c91e1 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -160,7 +160,8 @@ impl, H: Hasher> Backend for TrieBackend where fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(self.essence(), self.essence.root())?; + let backend = self.essence().top_backend(); + let trie = TrieDB::::new(&backend, self.essence.root())?; let mut v = Vec::new(); for x in trie.iter()? { let (key, value) = x?; @@ -181,7 +182,8 @@ impl, H: Hasher> Backend for TrieBackend where fn keys(&self, prefix: &[u8]) -> Vec { let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(self.essence(), self.essence.root())?; + let backend = self.essence().top_backend(); + let trie = TrieDB::::new(&backend, self.essence.root())?; let mut v = Vec::new(); for x in trie.iter()? { let (key, _) = x?; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 26704b34e5878..6a278307934c1 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -21,7 +21,7 @@ use std::ops::Deref; use std::sync::Arc; use std::marker::PhantomData; use log::{debug, warn}; -use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; +use hash_db::{self, Hasher, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, ChildrenProofMap, empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; @@ -47,7 +47,29 @@ pub struct TrieBackendEssence, H: Hasher> { pub register_roots: Option>>>, } +/// Patricia trie-based pairs storage essence, with reference to child info. +pub struct ChildTrieBackendEssence<'a, S: TrieBackendStorage, H: Hasher> { + pub essence: &'a TrieBackendEssence, + pub child_info: Option<&'a ChildInfo>, +} + impl, H: Hasher> TrieBackendEssence where H::Out: Decode + Encode { + /// Get trie backend for top trie. + pub fn top_backend(&self) -> ChildTrieBackendEssence { + ChildTrieBackendEssence{ + essence: self, + child_info: None, + } + } + + /// Get trie backend for child trie. + pub fn child_backend<'a>(&'a self, child_info: &'a ChildInfo) -> ChildTrieBackendEssence<'a, S, H> { + ChildTrieBackendEssence{ + essence: self, + child_info: Some(child_info), + } + } + /// Create new trie-based backend. pub fn new( storage: S, @@ -155,11 +177,15 @@ impl, H: Hasher> TrieBackendEssence where H::Out: ) -> Result, String> { let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; + let top_backend; + let child_backend; if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); + child_backend = self.child_backend(&child_info); + keyspace_eph = KeySpacedDB::new(&child_backend, child_info.keyspace()); dyn_eph = &keyspace_eph; } else { - dyn_eph = self; + top_backend = self.top_backend(); + dyn_eph = &top_backend; } let trie = TrieDB::::new(dyn_eph, root) @@ -195,7 +221,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: pub fn storage(&self, key: &[u8]) -> Result, String> { let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value::, _>(self, &self.root, key).map_err(map_e) + read_trie_value::, _>(&self.top_backend(), &self.root, key).map_err(map_e) } /// Get the value of child storage at given key. @@ -209,7 +235,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(child_info.keyspace(), self, &root, key) + read_child_trie_value::, _>(child_info.keyspace(), &self.child_backend(child_info), &root, key) .map_err(map_e) } @@ -229,7 +255,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: if let Err(e) = for_keys_in_child_trie::, _, _>( child_info.keyspace(), - self, + &self.child_backend(child_info), &root, f, ) { @@ -283,10 +309,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; let result = if let Some(child_info) = child_info { - let db = KeySpacedDB::new(self, child_info.keyspace()); + let backend = self.child_backend(&child_info); + let db = KeySpacedDB::new(&backend, child_info.keyspace()); iter(&db) } else { - iter(self) + iter(&self.top_backend()) }; if let Err(e) = result { debug!(target: "trie", "Error while iterating by prefix: {}", e); @@ -431,21 +458,28 @@ impl TrieBackendStorage for ChildrenProofMap> { } } -impl, H: Hasher> hash_db::AsHashDB - for TrieBackendEssence +impl<'a, S: TrieBackendStorage, H: Hasher> hash_db::AsHashDB + for ChildTrieBackendEssence<'a, S, H> { fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl, H: Hasher> hash_db::HashDB - for TrieBackendEssence +impl<'a, S: TrieBackendStorage, H: Hasher> hash_db::HashDB + for ChildTrieBackendEssence<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if *key == self.empty { + if *key == self.essence.empty { return Some([0u8].to_vec()) } - match self.storage.get(&key, prefix) { + let top; + let child_info = if let Some(child_info) = self.child_info { + child_info + } else { + top = ChildInfo::top_trie(); + &top + }; + match self.essence.storage.get(child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -471,8 +505,8 @@ impl, H: Hasher> hash_db::HashDB } } -impl, H: Hasher> hash_db::HashDBRef - for TrieBackendEssence +impl<'a, S: TrieBackendStorage, H: Hasher> hash_db::HashDBRef + for ChildTrieBackendEssence<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) @@ -483,7 +517,6 @@ impl, H: Hasher> hash_db::HashDBRef } } - #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index ea3fd78751399..2d0fa2dea0a03 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -361,7 +361,7 @@ impl ChildType { /// is one. pub fn parent_prefix(&self) -> &'static [u8] { match self { - &ChildType::ParentKeyId => well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX, + &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, } } } From cb93d5316f0880274046dc24d2b0bd281756c0de Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 18 May 2020 16:36:28 +0200 Subject: [PATCH 127/185] remove unused parameter, rename api proof recorder --- client/api/src/call_executor.rs | 4 +-- client/block-builder/src/lib.rs | 4 +-- client/service/src/client/call_executor.rs | 6 ++-- .../service/src/client/light/call_executor.rs | 4 +-- client/service/test/src/client/light.rs | 4 +-- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 19 ++++-------- .../proc-macro/src/mock_impl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 30 +++++++++++++------ primitives/trie/src/storage_proof.rs | 6 ++-- 10 files changed, 42 insertions(+), 39 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 53a457e6abc95..e2a1eb4eac906 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -30,7 +30,7 @@ use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; use sp_core::{NativeOrEncoded,offchain::storage::OffchainOverlayedChanges}; -use sp_api::{RuntimeApiProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; /// Executor Provider @@ -93,7 +93,7 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: Option<&RefCell>>, + proof_recorder: Option<&RefCell>>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 1cc25076ee4b8..2bdcd1e15a265 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -33,7 +33,7 @@ use sp_runtime::{ }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; -use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, ProofInput}; +use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof}; use sp_consensus::RecordProof; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -190,7 +190,7 @@ where ), ); - let proof = self.api.extract_proof(ProofInput::None); + let proof = self.api.extract_proof(); let state = self.backend.state_at(self.block_id)?; let changes_trie_state = backend::changes_tries_state_at_block( diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index e97b17b61fc56..7f07407610b8d 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -27,7 +27,7 @@ use sp_state_machine::{ use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; use sp_core::{NativeOrEncoded, NeverNativeValue, traits::CodeExecutor, offchain::storage::OffchainOverlayedChanges}; -use sp_api::{RuntimeApiProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; use super::client::ClientConfig; @@ -139,7 +139,7 @@ where initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, - recorder: Option<&RefCell>>, + recorder: Option<&RefCell>>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { match initialize_block { @@ -161,7 +161,7 @@ where match recorder { Some(recorder) => { - let RuntimeApiProofRecorder{ recorder, kind, input} = &mut *recorder.borrow_mut(); + let ProofRecorder{ recorder, kind, input} = &mut *recorder.borrow_mut(); let trie_state = state.as_trie_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box diff --git a/client/service/src/client/light/call_executor.rs b/client/service/src/client/light/call_executor.rs index 57afa182f8416..74709f76e5199 100644 --- a/client/service/src/client/light/call_executor.rs +++ b/client/service/src/client/light/call_executor.rs @@ -34,7 +34,7 @@ use sp_state_machine::{ }; use hash_db::Hasher; -use sp_api::{RuntimeApiProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -115,7 +115,7 @@ impl CallExecutor for initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, - recorder: Option<&RefCell>>, + recorder: Option<&RefCell>>, extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { // there's no actual way/need to specify native/wasm execution strategy on light node diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index ecc3627361216..2e43414e1b7ef 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -36,7 +36,7 @@ use parking_lot::Mutex; use substrate_test_runtime_client::{ runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, }; -use sp_api::{InitializeBlock, StorageTransactionCache, RuntimeApiProofRecorder, OffchainOverlayedChanges}; +use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; use sp_consensus::{BlockOrigin}; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; @@ -226,7 +226,7 @@ impl CallExecutor for DummyCallExecutor { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: Option<&RefCell>>, + _proof_recorder: Option<&RefCell>>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 5a4a37bd77f78..6ceed91cacab8 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -415,7 +415,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, - recorder: Option<&std::cell::RefCell<#crate_::RuntimeApiProofRecorder>>, + recorder: Option<&std::cell::RefCell<#crate_::ProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { let version = call_runtime_at.runtime_version_at(at)?; use #crate_::InitializeBlock; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index b975de3272399..8ecc8d6ce4e33 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -212,7 +212,7 @@ fn generate_runtime_api_base_structures() -> Result { storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, - recorder: Option>>, + recorder: Option>>, } // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a @@ -282,23 +282,14 @@ fn generate_runtime_api_base_structures() -> Result { } fn record_proof(&mut self, kind: #crate_::StorageProofKind) { - let recorder = if kind.need_register_full() { - #crate_::ProofRecorder::::Full(Default::default()) - } else { - #crate_::ProofRecorder::::Flat(Default::default()) - }; - self.recorder = Some(std::cell::RefCell::new(#crate_::RuntimeApiProofRecorder { - recorder, - kind, - input: #crate_::ProofInput::None, - })) + self.recorder = Some(std::cell::RefCell::new(kind.into())); } - fn extract_proof(&mut self, input: #crate_::ProofInput) -> Option<#crate_::StorageProof> { + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() .and_then(|recorder| { - let #crate_::RuntimeApiProofRecorder{ recorder, kind, input } = &mut *recorder.borrow_mut(); + let #crate_::ProofRecorder{ recorder, kind, input } = &mut *recorder.borrow_mut(); let input = std::mem::replace(input, #crate_::ProofInput::None); recorder.extract_proof(*kind, input).ok() }) @@ -366,7 +357,7 @@ fn generate_runtime_api_base_structures() -> Result { &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, &std::cell::RefCell>>, - Option<&std::cell::RefCell<#crate_::RuntimeApiProofRecorder>>, + Option<&std::cell::RefCell<#crate_::ProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, >( diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 0d7a4c558ac66..4664a295483bc 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -99,7 +99,7 @@ fn implement_common_api_traits( unimplemented!("`record_proof` not implemented for runtime api mocks") } - fn extract_proof(&mut self, _input: #crate_::ProofInput) -> Option<#crate_::StorageProof> { + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index d90d4bfa38987..561714ef6ba45 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -301,10 +301,6 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// ``` pub use sp_api_proc_macro::mock_impl_runtime_apis; -/// A type that records all accessed trie nodes and generates a proof out of it. -#[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder>; - /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] pub type StorageTransactionCache = @@ -385,7 +381,7 @@ pub trait ApiExt: ApiErrorExt { /// This stops the proof recording. /// /// If `record_proof` was not called before, this will return `None`. - fn extract_proof(&mut self, input: ProofInput) -> Option; + fn extract_proof(&mut self) -> Option; /// Convert the api object into the storage changes that were done while executing runtime /// api functions. @@ -447,7 +443,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>>, + pub recorder: Option<&'a RefCell>>, } /// Something that can call into the an api at a given block. @@ -526,14 +522,30 @@ pub trait RuntimeApiInfo { } /// Inner struct for storage of proof management. -/// TODO consider renaming to ProofRecorder (if type alias is not use) #[cfg(feature = "std")] -pub struct RuntimeApiProofRecorder { - pub recorder: ProofRecorder, +pub struct ProofRecorder { + pub recorder: sp_state_machine::ProofRecorder>, pub kind: StorageProofKind, pub input: ProofInput, } +#[cfg(feature = "std")] +impl From for ProofRecorder { + fn from(kind: StorageProofKind) -> Self { + let recorder = if kind.need_register_full() { + sp_state_machine::ProofRecorder::>::Full(Default::default()) + } else { + sp_state_machine::ProofRecorder::>::Flat(Default::default()) + }; + + ProofRecorder { + recorder, + kind, + input: ProofInput::None, + } + } +} + /// Extracts the `Api::Error` for a type that provides a runtime api. #[cfg(feature = "std")] pub type ApiErrorFor = <>::Api as ApiErrorExt>::Error; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index eec8ea466a538..b8dfcaf0f226e 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -28,10 +28,10 @@ use trie_db::DBValue; // no_std use of trie_db. #[cfg(not(feature = "std"))] use hashbrown::HashMap; - + #[cfg(feature = "std")] use std::collections::HashMap; - + type Result = sp_std::result::Result; type CodecResult = sp_std::result::Result; @@ -123,7 +123,7 @@ pub enum StorageProofKind { KnownQueryPlanAndValues, /// Technical only - + /// Kind for `StorageProof::TrieSkipHashesForMerge`. TrieSkipHashesForMerge = 125, From be1fc618b57b84e24b39848f3014d1d797f3cb05 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 18 May 2020 17:36:50 +0200 Subject: [PATCH 128/185] restore to default proof --- client/api/src/cht.rs | 2 +- client/api/src/lib.rs | 2 +- client/basic-authorship/src/lib.rs | 2 +- client/network/src/light_client_handler.rs | 22 +++++++++---------- client/network/src/protocol.rs | 6 ++--- client/service/test/src/client/light.rs | 2 -- primitives/state-machine/src/lib.rs | 9 ++++---- .../state-machine/src/proving_backend.rs | 4 ++-- primitives/storage/Cargo.toml | 3 +++ primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof.rs | 1 + 11 files changed, 29 insertions(+), 26 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 57aa82964b3ff..5c14c172fa00e 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -122,7 +122,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ).map_err(ClientError::Execution) } diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 04cfd5dd6d10a..38fe961ed34ff 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -37,7 +37,7 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{StorageProof, +pub use sp_state_machine::{StorageProof, LegacyDecodeAdapter, LegacyEncodeAdapter, StorageProofKind, ExecutionStrategy, CloneableSpawn}; /// Usage Information Provider interface diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 12e67eb58c724..691dd16c564c6 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -45,7 +45,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::TrieSkipHashes), +//! RecordProof::Yes(StorageProofKind::Flatten), //! ); //! //! // We wait until the proposition is performed. diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 5a688d2e7aaee..e3dd866fdc696 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,7 +56,7 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, + StorageProof, StorageProofKind, LegacyDecodeAdapter, LegacyEncodeAdapter, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -444,7 +444,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -453,12 +453,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -467,7 +467,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = StorageProof::decode(&mut response.roots_proof.as_ref())?; + let roots_proof = LegacyDecodeAdapter::decode(&mut response.roots_proof.as_ref())?.0; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -495,7 +495,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -564,7 +564,7 @@ where }; let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteCallResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteCallResponse(r) }; @@ -606,7 +606,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -656,7 +656,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -684,7 +684,7 @@ where }; let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; + let r = schema::v1::light::RemoteHeaderResponse { header, proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteHeaderResponse(r) }; @@ -744,7 +744,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: proof.roots_proof.encode(), + roots_proof: LegacyEncodeAdapter(&proof.roots_proof).encode(), }; schema::v1::light::response::Response::RemoteChangesResponse(r) }; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9b7dd31981241..22a7f682d8c20 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1446,7 +1446,7 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1593,7 +1593,7 @@ impl Protocol { let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -1649,7 +1649,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 2e43414e1b7ef..8b531afd800c0 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -390,7 +390,6 @@ fn execution_proof_is_generated_and_checked() { let kinds = [ StorageProofKind::Flatten, StorageProofKind::TrieSkipHashes, - //StorageProofKind::KnownQueryPlanAndValues, ]; for kind in &kinds { @@ -461,7 +460,6 @@ const KINDS: [StorageProofKind; 4] = [ StorageProofKind::Full, StorageProofKind::TrieSkipHashes, StorageProofKind::TrieSkipHashesFull, - //StorageProofKind::KnownQueryPlanAndValues, // this is currently unsupported ]; type TestChecker = LightDataChecker< diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 710176dfc612e..419bcc4c57a54 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -45,7 +45,8 @@ mod trie_backend_essence; mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind}; + StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, + LegacyDecodeAdapter, LegacyEncodeAdapter}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; @@ -587,7 +588,7 @@ where } /// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_flat_proof_check_on_trie_backend( +fn execution_flat_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, @@ -853,7 +854,7 @@ where } /// Check child storage read proof on pre-created flat proving backend. -pub fn read_child_proof_check_on_flat_proving_backend( +fn read_child_proof_check_on_flat_proving_backend( proving_backend: &TrieBackend, H>, child_info: &ChildInfo, key: &[u8], @@ -867,7 +868,7 @@ where } /// Check child storage read proof on pre-created proving backend. -pub fn read_child_proof_check_on_proving_backend( +fn read_child_proof_check_on_proving_backend( proving_backend: &TrieBackend>, H>, child_info: &ChildInfo, key: &[u8], diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 1e962e6e81a23..0f082caee1ee9 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -146,14 +146,14 @@ impl Clone for ProofRecorder { /// These can be sent to remote node and used as a proof of execution. pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { trie_backend: TrieBackend, H>, - previous_input: ProofInput, // TODO consider &'a mut previous_input + previous_input: ProofInput, proof_kind: StorageProofKind, } /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { backend: &'a S, - proof_recorder: ProofRecorder, // TODO if removing rwlock, consider &'a mut + proof_recorder: ProofRecorder, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index e916f1a3dc0a3..57b89419a4ef7 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -9,6 +9,9 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-storage/" +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + [dependencies] codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } sp-std = { version = "2.0.0-dev", default-features = false, path = "../std" } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index d1ec6beb75145..8285bd741b44f 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -38,7 +38,7 @@ pub use trie_stream::TrieStream; pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, LegacyStorageProof, ChildrenProofMap, StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, - RecordMapTrieNodes}; + RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index b8dfcaf0f226e..076f97eb688ed 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -412,6 +412,7 @@ pub enum StorageProof { /// A legacy encoding of proof, it is the same as the inner encoding /// of `StorageProof::Flatten`. +/// TODO EMCH consider removing, encoding adapter should be enough. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct LegacyStorageProof { trie_nodes: Vec>, From 16554e5fb8365fda4d34b417c069f16d8f33566c Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 18 May 2020 18:08:29 +0200 Subject: [PATCH 129/185] removing Legacy storage proof struct, codec adapters are enough --- .../state-machine/src/proving_backend.rs | 2 - primitives/storage/Cargo.toml | 3 -- primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof.rs | 45 ++----------------- 4 files changed, 4 insertions(+), 48 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0f082caee1ee9..61ae096a6cdd7 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -195,8 +195,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } /// Extracting the gathered unordered proof. - /// TODO remove or make it consiming: here it is doable to get - /// intermediate proof, not sure if of any use. pub fn extract_proof(&mut self) -> Result { self.update_input()?; self.trie_backend.essence().backend_storage().proof_recorder diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 57b89419a4ef7..eebb4cc7c313d 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -23,6 +23,3 @@ sp-debug-derive = { version = "2.0.0-dev", path = "../debug-derive" } [features] default = [ "std" ] std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8285bd741b44f..6551de0278b39 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,7 +36,7 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, LegacyStorageProof, ChildrenProofMap, +pub use storage_proof::{StorageProof, ChildrenProofMap, StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter}; /// Various re-exports from the `trie-db` crate. diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 076f97eb688ed..3c86af00b30d3 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -366,7 +366,6 @@ type ProofCompacted = Vec>; pub enum StorageProof { /// Single flattened proof component, all default child trie are flattened over a same /// container, no child trie information is provided. - /// This is the same representation as the `LegacyStorageProof`. Flatten(ProofNodes), /// This skip encoding of hashes that are @@ -410,31 +409,6 @@ pub enum StorageProof { TrieSkipHashesFull(ChildrenProofMap), } -/// A legacy encoding of proof, it is the same as the inner encoding -/// of `StorageProof::Flatten`. -/// TODO EMCH consider removing, encoding adapter should be enough. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct LegacyStorageProof { - trie_nodes: Vec>, -} - -impl LegacyStorageProof { - /// Create a empty proof. - pub fn empty() -> Self { - LegacyStorageProof { trie_nodes: Default::default() } - } - - /// Create a proof from encoded trie nodes. - pub fn new(trie_nodes: Vec>) -> Self { - LegacyStorageProof { trie_nodes } - } - - /// Convert to a `StorageProof`. - pub fn to_storage_proof(self) -> StorageProof { - StorageProof::Flatten(self.trie_nodes) - } -} - impl Decode for StorageProof { fn decode(value: &mut I) -> CodecResult { let kind = value.read_byte()?; @@ -1017,15 +991,6 @@ impl StorageProof { } Ok(db) } - - /// Cast a flatten proof to a legacy one. - pub fn legacy(self) -> Result { - if let StorageProof::Flatten(trie_nodes) = self { - Ok(LegacyStorageProof{ trie_nodes }) - } else { - Err(error("Cannot use as legacy proof")) - } - } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to @@ -1190,20 +1155,16 @@ fn legacy_proof_codec() { // random content for proof, we test serialization let content = vec![b"first".to_vec(), b"second".to_vec()]; - let legacy = LegacyStorageProof::new(content.clone()); - let encoded_legacy = legacy.encode(); let proof = StorageProof::Flatten(content.clone()); let encoded_proof = proof.encode(); - assert_eq!(StorageProof::decode(&mut &encoded_proof[..]).unwrap(), proof); - // test encoded minus first bytes equal to storage proof - assert_eq!(&encoded_legacy[..], &encoded_proof[1..]); - // test adapter let encoded_adapter = LegacyEncodeAdapter(&proof).encode(); + + assert_eq!(StorageProof::decode(&mut &encoded_proof[..]).unwrap(), proof); assert_eq!(encoded_adapter[0], 0); assert_eq!(&encoded_adapter[1..], &encoded_proof[..]); + let adapter_proof = LegacyDecodeAdapter(proof); - assert_eq!(LegacyDecodeAdapter::decode(&mut &encoded_legacy[..]).unwrap(), adapter_proof); assert_eq!(LegacyDecodeAdapter::decode(&mut &encoded_adapter[..]).unwrap(), adapter_proof); } From cb7a0b5e8505cb676998b43ea200bcfac149a88b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 10:01:12 +0200 Subject: [PATCH 130/185] Legacy compatibility. Note that in this state the PR does change light client encoding to byte prefixed one. --- client/network/src/light_client_handler.rs | 2 +- primitives/trie/src/storage_proof.rs | 33 +++++++++++++++++++++- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index e3dd866fdc696..d483bd0c4189b 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -1352,7 +1352,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - StorageProof::empty().encode() + LegacyEncodeAdapter(&StorageProof::empty()).encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 3c86af00b30d3..91ec415465418 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -411,7 +411,14 @@ pub enum StorageProof { impl Decode for StorageProof { fn decode(value: &mut I) -> CodecResult { - let kind = value.read_byte()?; + let kind = match value.read_byte() { + Ok(kind) => kind, + Err(_) => { + // we allow empty proof to decode to encoded empty proof for + // compatibility with legacy encoding. + return Ok(StorageProof::Flatten(Vec::new())); + }, + }; Ok(match StorageProofKind::read_from_byte(kind) .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { StorageProofKind::Flatten => StorageProof::Flatten(Decode::decode(value)?), @@ -1152,6 +1159,30 @@ impl HashDBRef for ProofMapTrieNodes #[test] fn legacy_proof_codec() { + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + struct OldStorageProof { + trie_nodes: Vec>, + } + + let old_empty = OldStorageProof { + trie_nodes: Default::default(), + }.encode(); + + assert_eq!(&old_empty[..], &[0][..]); + + let adapter_proof = LegacyDecodeAdapter(StorageProof::Flatten(Vec::new())); + assert_eq!(LegacyDecodeAdapter::decode(&mut &old_empty[..]).unwrap(), adapter_proof); + + let old_one = OldStorageProof { + trie_nodes: vec![vec![4u8, 5u8]], + }.encode(); + + assert_eq!(&old_one[..], &[4, 8, 4, 5][..]); + + let adapter_proof = LegacyDecodeAdapter(StorageProof::Flatten(vec![vec![4u8, 5u8]])); + assert_eq!(LegacyDecodeAdapter::decode(&mut &old_one[..]).unwrap(), adapter_proof); + + // random content for proof, we test serialization let content = vec![b"first".to_vec(), b"second".to_vec()]; From 9a9c9828e62c7cc8cf9cb9e3eab1f521af73b61e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 10:42:40 +0200 Subject: [PATCH 131/185] revert to former light client proof encoding --- client/api/src/lib.rs | 2 +- client/network/src/light_client_handler.rs | 9 +++++---- primitives/state-machine/src/lib.rs | 2 +- primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof.rs | 19 ++++++++++++++++--- 5 files changed, 24 insertions(+), 10 deletions(-) diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 38fe961ed34ff..0eeadce174464 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -38,7 +38,7 @@ pub use notifications::*; pub use proof_provider::*; pub use sp_state_machine::{StorageProof, LegacyDecodeAdapter, LegacyEncodeAdapter, - StorageProofKind, ExecutionStrategy, CloneableSpawn}; + FlattenEncodeAdapter, StorageProofKind, ExecutionStrategy, CloneableSpawn}; /// Usage Information Provider interface /// diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index d483bd0c4189b..1586620a20904 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,7 +56,8 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, LegacyDecodeAdapter, LegacyEncodeAdapter, + StorageProof, StorageProofKind, LegacyDecodeAdapter, + FlattenEncodeAdapter as LegacyEncodeAdapter, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -549,7 +550,7 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(e) => { @@ -592,7 +593,7 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -641,7 +642,7 @@ where &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 419bcc4c57a54..8ff1e957751bd 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -46,7 +46,7 @@ mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, - LegacyDecodeAdapter, LegacyEncodeAdapter}; + LegacyDecodeAdapter, LegacyEncodeAdapter, FlattenEncodeAdapter}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 6551de0278b39..bb2c40c3687dd 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -38,7 +38,7 @@ pub use trie_stream::TrieStream; pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, - RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter}; + RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter, FlattenEncodeAdapter}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 91ec415465418..b16a430bf7766 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -114,13 +114,13 @@ const fn no_partial_db_support() -> Error { #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { /// Kind for `StorageProof::Flatten`. - Flatten, + Flatten = 1, /// Kind for `StorageProof::TrieSkipHashes`. - TrieSkipHashes, + TrieSkipHashes = 2, /// Kind for `StorageProof::KnownQueryPlanAndValues`. - KnownQueryPlanAndValues, + KnownQueryPlanAndValues = 3, /// Technical only @@ -462,6 +462,19 @@ impl<'a> Encode for LegacyEncodeAdapter<'a> { } } +/// This encodes only if storage proof if it is guarantied +/// to be a flatten proof. +pub struct FlattenEncodeAdapter<'a>(pub &'a StorageProof); + +impl<'a> Encode for FlattenEncodeAdapter<'a> { + fn encode_to(&self, dest: &mut T) { + match self.0 { + StorageProof::Flatten(nodes) => nodes.encode_to(dest), + _ => panic!("Usage of flatten encoder on non flatten proof"), + } + } +} + #[cfg_attr(test, derive(Debug, PartialEq, Eq))] /// Decode variant of `LegacyEncodeAdapter`. pub struct LegacyDecodeAdapter(pub StorageProof); From 109267b42fa9fd6da96f13ef2b4ac0d9d1792dc0 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 11:16:41 +0200 Subject: [PATCH 132/185] Revert protocol format to. --- client/network/src/protocol.rs | 10 +++++----- client/network/src/protocol/message.rs | 5 ++++- primitives/trie/src/storage_proof.rs | 8 ++++++++ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 22a7f682d8c20..4aba49a41f6e0 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1467,7 +1467,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1612,7 +1612,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1669,7 +1669,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1699,7 +1699,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1762,7 +1762,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, + roots_proof: proof.roots_proof.expect_flatten_content(), }), ); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 155e945f4eec5..231bf2013bc79 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -27,7 +27,10 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; -use sc_client_api::StorageProof; + +/// Forme storage proof type, to be replace by +/// `use sc_client_api::StorageProof`; +type StorageProof = Vec>; /// A unique ID of a request. pub type RequestId = u64; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index b16a430bf7766..f4862324b5cdb 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -1011,6 +1011,14 @@ impl StorageProof { } Ok(db) } + + /// Get flatten content form proof. + pub fn expect_flatten_content(self) -> Vec> { + match self { + StorageProof::Flatten(proof) => proof, + _ => panic!("Flat proof expected"), + } + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to From 6bd14691538aeded737709df3de9a1f6fed995a0 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 11:55:51 +0200 Subject: [PATCH 133/185] also use non breakable finality proof --- client/finality-grandpa/src/finality_proof.rs | 14 +++++++------- client/finality-grandpa/src/light_import.rs | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 2d0e569cf4ca4..03abd42a0e185 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -228,7 +228,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + pub authorities_proof: Option>>, } /// Proof of finality is the ordered set of finality fragments, where: @@ -344,7 +344,7 @@ pub(crate) fn prove_finality, J>( block: current, justification, unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof, + authorities_proof: new_authorities_proof.map(StorageProof::expect_flatten_content), }; // append justification to finality proof if required @@ -511,7 +511,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - new_authorities_proof, + StorageProof::Flatten(new_authorities_proof), )?; current_set_id += 1; @@ -853,14 +853,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), + authorities_proof: Some(vec![vec![50]]), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), + authorities_proof: Some(vec![vec![70]]), }, ]); @@ -935,7 +935,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(vec![vec![42]]), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -985,7 +985,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(vec![vec![42]]), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 41e381a6ce32b..25389f8e2dae3 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![])), + authorities_proof: Some(vec![]), }, ].encode(), &mut verifier, From f298e6b1628d0e67db52bab1b8936c29d2a8751c Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 14:06:53 +0200 Subject: [PATCH 134/185] Switch to compact proof everywhere --- bin/node/cli/src/service.rs | 2 +- client/api/src/cht.rs | 2 +- client/basic-authorship/src/lib.rs | 2 +- client/block-builder/src/lib.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 16 +++++----- client/finality-grandpa/src/light_import.rs | 4 +-- client/finality-grandpa/src/tests.rs | 2 +- client/network/src/light_client_handler.rs | 31 +++++++++---------- client/network/src/protocol.rs | 16 +++++----- client/network/src/protocol/message.rs | 5 +-- client/rpc/src/state/state_full.rs | 2 +- primitives/trie/src/storage_proof.rs | 8 ----- 12 files changed, 40 insertions(+), 52 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 69bae09f1e6cf..af3c0949ac5d0 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -589,7 +589,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes(StorageProofKind::Flatten), + RecordProof::Yes(StorageProofKind::TrieSkipHashes), ).await }).expect("Error making test block").block; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 5c14c172fa00e..57aa82964b3ff 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -122,7 +122,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ).map_err(ClientError::Execution) } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 691dd16c564c6..12e67eb58c724 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -45,7 +45,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::Flatten), +//! RecordProof::Yes(StorageProofKind::TrieSkipHashes), //! ); //! //! // We wait until the proposition is performed. diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 2bdcd1e15a265..723f191530317 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -231,7 +231,7 @@ mod tests { &client, client.info().best_hash, client.info().best_number, - RecordProof::Yes(sp_api::StorageProofKind::Flatten), + RecordProof::Yes(sp_api::StorageProofKind::TrieSkipHashes), Default::default(), &*backend, ).unwrap().build().unwrap(); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 03abd42a0e185..9609f0090c30b 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -97,7 +97,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flatten) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::TrieSkipHashes) } } @@ -228,7 +228,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option>>, + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -344,7 +344,7 @@ pub(crate) fn prove_finality, J>( block: current, justification, unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof.map(StorageProof::expect_flatten_content), + authorities_proof: new_authorities_proof, }; // append justification to finality proof if required @@ -511,7 +511,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - StorageProof::Flatten(new_authorities_proof), + new_authorities_proof, )?; current_set_id += 1; @@ -853,14 +853,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(vec![vec![50]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(vec![vec![70]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), }, ]); @@ -935,7 +935,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(vec![vec![42]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -985,7 +985,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(vec![vec![42]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 25389f8e2dae3..41e381a6ce32b 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(vec![]), + authorities_proof: Some(StorageProof::Flatten(vec![])), }, ].encode(), &mut verifier, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 1d36de0fe4281..cb7b58189b7b0 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -248,7 +248,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flatten) + let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::TrieSkipHashes) .expect("failure proving read from in-memory storage backend"); Ok(proof) } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 1586620a20904..5a688d2e7aaee 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,8 +56,7 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, LegacyDecodeAdapter, - FlattenEncodeAdapter as LegacyEncodeAdapter, + StorageProof, StorageProofKind, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -445,7 +444,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -454,12 +453,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -468,7 +467,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = LegacyDecodeAdapter::decode(&mut response.roots_proof.as_ref())?.0; + let roots_proof = StorageProof::decode(&mut response.roots_proof.as_ref())?; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -496,7 +495,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -550,7 +549,7 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok((_, proof)) => proof, Err(e) => { @@ -565,7 +564,7 @@ where }; let response = { - let r = schema::v1::light::RemoteCallResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteCallResponse(r) }; @@ -593,7 +592,7 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok(proof) => proof, Err(error) => { @@ -607,7 +606,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -642,7 +641,7 @@ where &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, Err(error) => { @@ -657,7 +656,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -685,7 +684,7 @@ where }; let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; schema::v1::light::response::Response::RemoteHeaderResponse(r) }; @@ -745,7 +744,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: LegacyEncodeAdapter(&proof.roots_proof).encode(), + roots_proof: proof.roots_proof.encode(), }; schema::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1353,7 +1352,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - LegacyEncodeAdapter(&StorageProof::empty()).encode() + StorageProof::empty().encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 4aba49a41f6e0..9b7dd31981241 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1446,7 +1446,7 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1467,7 +1467,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1593,7 +1593,7 @@ impl Protocol { let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok(proof) => proof, Err(error) => { @@ -1612,7 +1612,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1649,7 +1649,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, Err(error) => { @@ -1669,7 +1669,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1699,7 +1699,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1762,7 +1762,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof.expect_flatten_content(), + roots_proof: proof.roots_proof, }), ); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 231bf2013bc79..155e945f4eec5 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -27,10 +27,7 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; - -/// Forme storage proof type, to be replace by -/// `use sc_client_api::StorageProof`; -type StorageProof = Vec>; +use sc_client_api::StorageProof; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 33e53a9fdcad7..43c54facef783 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -363,7 +363,7 @@ impl StateBackend for FullState Vec> { - match self { - StorageProof::Flatten(proof) => proof, - _ => panic!("Flat proof expected"), - } - } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to From 4d5bfa7ed593ece03bdd377181bbfda628797875 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 14:07:15 +0200 Subject: [PATCH 135/185] Revert "Switch to compact proof everywhere" This reverts commit f298e6b1628d0e67db52bab1b8936c29d2a8751c. --- bin/node/cli/src/service.rs | 2 +- client/api/src/cht.rs | 2 +- client/basic-authorship/src/lib.rs | 2 +- client/block-builder/src/lib.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 16 +++++----- client/finality-grandpa/src/light_import.rs | 4 +-- client/finality-grandpa/src/tests.rs | 2 +- client/network/src/light_client_handler.rs | 31 ++++++++++--------- client/network/src/protocol.rs | 16 +++++----- client/network/src/protocol/message.rs | 5 ++- client/rpc/src/state/state_full.rs | 2 +- primitives/trie/src/storage_proof.rs | 8 +++++ 12 files changed, 52 insertions(+), 40 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index af3c0949ac5d0..69bae09f1e6cf 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -589,7 +589,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes(StorageProofKind::TrieSkipHashes), + RecordProof::Yes(StorageProofKind::Flatten), ).await }).expect("Error making test block").block; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 57aa82964b3ff..5c14c172fa00e 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -122,7 +122,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ).map_err(ClientError::Execution) } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 12e67eb58c724..691dd16c564c6 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -45,7 +45,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::TrieSkipHashes), +//! RecordProof::Yes(StorageProofKind::Flatten), //! ); //! //! // We wait until the proposition is performed. diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 723f191530317..2bdcd1e15a265 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -231,7 +231,7 @@ mod tests { &client, client.info().best_hash, client.info().best_number, - RecordProof::Yes(sp_api::StorageProofKind::TrieSkipHashes), + RecordProof::Yes(sp_api::StorageProofKind::Flatten), Default::default(), &*backend, ).unwrap().build().unwrap(); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 9609f0090c30b..03abd42a0e185 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -97,7 +97,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::TrieSkipHashes) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flatten) } } @@ -228,7 +228,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + pub authorities_proof: Option>>, } /// Proof of finality is the ordered set of finality fragments, where: @@ -344,7 +344,7 @@ pub(crate) fn prove_finality, J>( block: current, justification, unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof, + authorities_proof: new_authorities_proof.map(StorageProof::expect_flatten_content), }; // append justification to finality proof if required @@ -511,7 +511,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - new_authorities_proof, + StorageProof::Flatten(new_authorities_proof), )?; current_set_id += 1; @@ -853,14 +853,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), + authorities_proof: Some(vec![vec![50]]), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), + authorities_proof: Some(vec![vec![70]]), }, ]); @@ -935,7 +935,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(vec![vec![42]]), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -985,7 +985,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(vec![vec![42]]), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 41e381a6ce32b..25389f8e2dae3 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![])), + authorities_proof: Some(vec![]), }, ].encode(), &mut verifier, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index cb7b58189b7b0..1d36de0fe4281 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -248,7 +248,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::TrieSkipHashes) + let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flatten) .expect("failure proving read from in-memory storage backend"); Ok(proof) } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 5a688d2e7aaee..1586620a20904 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,7 +56,8 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, + StorageProof, StorageProofKind, LegacyDecodeAdapter, + FlattenEncodeAdapter as LegacyEncodeAdapter, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -444,7 +445,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -453,12 +454,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -467,7 +468,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = StorageProof::decode(&mut response.roots_proof.as_ref())?; + let roots_proof = LegacyDecodeAdapter::decode(&mut response.roots_proof.as_ref())?.0; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -495,7 +496,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -549,7 +550,7 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(e) => { @@ -564,7 +565,7 @@ where }; let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteCallResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteCallResponse(r) }; @@ -592,7 +593,7 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -606,7 +607,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -641,7 +642,7 @@ where &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { @@ -656,7 +657,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -684,7 +685,7 @@ where }; let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; + let r = schema::v1::light::RemoteHeaderResponse { header, proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteHeaderResponse(r) }; @@ -744,7 +745,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: proof.roots_proof.encode(), + roots_proof: LegacyEncodeAdapter(&proof.roots_proof).encode(), }; schema::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1352,7 +1353,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - StorageProof::empty().encode() + LegacyEncodeAdapter(&StorageProof::empty()).encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9b7dd31981241..4aba49a41f6e0 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1446,7 +1446,7 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1467,7 +1467,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1593,7 +1593,7 @@ impl Protocol { let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -1612,7 +1612,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1649,7 +1649,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { @@ -1669,7 +1669,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1699,7 +1699,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1762,7 +1762,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, + roots_proof: proof.roots_proof.expect_flatten_content(), }), ); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 155e945f4eec5..231bf2013bc79 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -27,7 +27,10 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; -use sc_client_api::StorageProof; + +/// Forme storage proof type, to be replace by +/// `use sc_client_api::StorageProof`; +type StorageProof = Vec>; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 43c54facef783..33e53a9fdcad7 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -363,7 +363,7 @@ impl StateBackend for FullState Vec> { + match self { + StorageProof::Flatten(proof) => proof, + _ => panic!("Flat proof expected"), + } + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to From a2eca57ebd3bb6217b9308c1ba9d2151de6255e5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 17:47:19 +0200 Subject: [PATCH 136/185] Doc improvment, remove dead code, more sensible recurse calls. --- client/service/src/client/client.rs | 5 +- .../service/src/client/light/call_executor.rs | 5 +- primitives/api/src/lib.rs | 5 +- .../state-machine/src/proving_backend.rs | 23 +- primitives/state-machine/src/trie_backend.rs | 5 +- primitives/storage/src/lib.rs | 64 ++---- primitives/trie/src/lib.rs | 9 +- primitives/trie/src/storage_proof.rs | 202 ++++++++---------- 8 files changed, 138 insertions(+), 180 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index ec1157a0d5176..9198d6ba55191 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1209,7 +1209,7 @@ impl ProofProvider for Client where call_data: &[u8], kind: StorageProofKind, ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let (merge_kind, prefer_full, recurse) = kind.mergeable_kind(); + let (merge_kind, prefer_full) = kind.mergeable_kind(); // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1229,11 +1229,12 @@ impl ProofProvider for Client where method, call_data, merge_kind, + true, ).and_then(|(r, p)| { Ok((r, StorageProof::merge::, _>( vec![p, code_proof], prefer_full, - recurse, + false, ).map_err(|e| format!("{}", e))?)) }) } diff --git a/client/service/src/client/light/call_executor.rs b/client/service/src/client/light/call_executor.rs index 74709f76e5199..33e780901ac5a 100644 --- a/client/service/src/client/light/call_executor.rs +++ b/client/service/src/client/light/call_executor.rs @@ -183,6 +183,7 @@ pub fn prove_execution( method: &str, call_data: &[u8], kind: StorageProofKind, + proof_used_in_other: bool, ) -> ClientResult<(Vec, StorageProof)> where Block: BlockT, @@ -195,7 +196,7 @@ pub fn prove_execution( Box )?; - let (merge_kind, prefer_full, recurse) = kind.mergeable_kind(); + let (merge_kind, prefer_full) = kind.mergeable_kind(); // prepare execution environment + record preparation proof let mut changes = Default::default(); let (_, init_proof) = executor.prove_at_trie_state( @@ -217,7 +218,7 @@ pub fn prove_execution( let total_proof = StorageProof::merge::, _>( vec![init_proof, exec_proof], prefer_full, - recurse, + proof_used_in_other, ).map_err(|e| format!("{}", e))?; Ok((result, total_proof)) diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 561714ef6ba45..ced312ec0dd47 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -521,11 +521,14 @@ pub trait RuntimeApiInfo { const VERSION: u32; } -/// Inner struct for storage of proof management. +/// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] pub struct ProofRecorder { + /// The recorder to use over the db use by trie db. pub recorder: sp_state_machine::ProofRecorder>, + /// The kind of proof to produce. pub kind: StorageProofKind, + /// The additional input needed for the proof. pub input: ProofInput, } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 61ae096a6cdd7..d29083663f2eb 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -115,14 +115,16 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> } } -/// Global proof recorder, act as a layer over a hash db for recording queried -/// data. +/// A type that records all accessed trie nodes. pub enum ProofRecorder { - // root of each child is added to be able to pack. - /// Proof keep a separation between child trie content, this is usually useless, - /// but when we use proof compression we want this separation. + /// Records are separated by child trie, this is needed for + /// proof compaction. Full(Arc>>>), - /// Single level of storage for all recoded nodes. + /// Single storage for all recoded nodes (as in + /// state db column). + /// That this variant exists only for performance + /// (on less map access than in `Full`), but is not strictly + /// necessary. Flat(Arc>>), } @@ -182,7 +184,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; - let trie_backend = if let ProofInputKind::ChildTrieRoots = proof_kind.processing_input_kind() { + let trie_backend = if let ProofInputKind::ChildTrieRoots = proof_kind.process_input_kind() { TrieBackend::new_with_roots(recorder, root) } else { TrieBackend::new(recorder, root) @@ -202,7 +204,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } fn update_input(&mut self) -> Result<(), String> { - let input = match self.proof_kind.processing_input_kind() { + let input = match self.proof_kind.process_input_kind() { ProofInputKind::ChildTrieRoots => { self.trie_backend.extract_registered_roots() }, @@ -215,7 +217,8 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } - /// Drop the backend, but keep the state to use it again afterward + /// Extract current recording state, this allows using the state back when recording + /// multiple operations. pub fn recording_state(mut self) -> Result<(ProofRecorder, ProofInput), String> { self.update_input()?; Ok(( @@ -229,7 +232,7 @@ impl ProofRecorder where H::Out: Codec, { - /// Extracts the gathered unordered proof. + /// Extracts and transform the gathered unordered content. pub fn extract_proof( &self, kind: StorageProofKind, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 8ce08dac87cb3..a2abed87a68ec 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -43,7 +43,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } } - /// Activate storage of roots (can be use + /// Create a trie backend that also record visited trie roots. /// to pack proofs and does small caching of child trie root)). pub fn new_with_roots(storage: S, root: H::Out) -> Self { let register_roots = Some(RwLock::new(Default::default())); @@ -52,7 +52,8 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } } - /// Get registered roots + /// Get registered roots. Empty input is returned when the backend is + /// not configured to register roots. pub fn extract_registered_roots(&self) -> ProofInput { if let Some(register_roots) = self.essence.register_roots.as_ref() { let mut dest = ChildrenProofMap::default(); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 4dd340ff03855..8f9f39045e0bb 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -162,16 +162,15 @@ pub mod well_known_keys { /// Child information needed for proof construction. /// -/// It is similar to standard child information but can -/// be a bit more lightweight as long term storage is not -/// needed in proof. +/// It contains `ChildInfo` strictly needed for proofs. /// -/// One can also use this information to use different compaction -/// strategy in a same proof. +/// It could also be use for specific proof usage. #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] pub enum ChildInfoProof { - /// A child using the default trie layout, identified by its - /// unprefixed location in the first level trie. + /// By default a child only need to be defined by its location in + /// the block top trie. + /// This variant is reserved for child trie of `ParentKeyId` type + /// and do not require to store the full parent key. /// Empty location is reserved for the top level trie of the proof. Default(ChildTrieParentKeyId), } @@ -207,14 +206,13 @@ impl ChildInfo { } } - /// Top trie defined as the unique crypto id trie with - /// 0 length unique id. + /// ChildInfo definition for top trie. + /// The top trie is defined as a default trie with an empty key. pub fn top_trie() -> Self { Self::new_default(&[]) } - /// Top trie defined as the unique crypto id trie with - /// 0 length unique id. + /// Test if the child info is the block top trie. pub fn is_top_trie(&self) -> bool { match self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => data.len() == 0, @@ -271,7 +269,7 @@ impl ChildInfo { } } - /// Get corresponding info for proof definition. + /// Get default corresponding info to use with proof. pub fn proof_info(&self) -> ChildInfoProof { match self { ChildInfo::ParentKeyId(parent) => ChildInfoProof::Default(parent.clone()), @@ -280,14 +278,13 @@ impl ChildInfo { } impl ChildInfoProof { - /// Top trie defined as the unique crypto id trie with - /// 0 length unique id. + /// ChildInfoProof definition for top trie. + /// Same as `ChildInfo::top_trie().proof_info()`. pub fn top_trie() -> Self { ChildInfoProof::Default(ChildTrieParentKeyId { data: Vec::new() }) } - /// Top trie defined as the unique crypto id trie with - /// 0 length unique id. + /// Test if the child info proof is the block top trie. pub fn is_top_trie(&self) -> bool { match self { ChildInfoProof::Default(ChildTrieParentKeyId { data }) => data.len() == 0, @@ -391,39 +388,8 @@ impl ChildTrieParentKeyId { } } -#[derive(Clone, PartialEq, Eq, Debug)] -/// Type for storing a map of child trie related information. -/// A few utilities methods are defined. -pub struct ChildrenMap(pub BTreeMap); - -impl sp_std::ops::Deref for ChildrenMap { - type Target = BTreeMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl sp_std::ops::DerefMut for ChildrenMap { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl sp_std::default::Default for ChildrenMap { - fn default() -> Self { - ChildrenMap(BTreeMap::new()) - } -} - -impl IntoIterator for ChildrenMap { - type Item = (ChildInfo, T); - type IntoIter = sp_std::collections::btree_map::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} +/// Map of child trie information stored by `ChildInfo`. +pub type ChildrenMap = BTreeMap; const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index bb2c40c3687dd..a287069184fdf 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -314,7 +314,7 @@ pub fn record_all_keys( Ok(()) } -/// Pack proof from memdb. +/// Pack proof from a collection of encoded node. fn pack_proof_from_collected( root: &TrieHash, input: &dyn hash_db::HashDBRef, @@ -323,7 +323,9 @@ fn pack_proof_from_collected( trie_db::encode_compact(&trie) } -/// Unpack packed proof. +/// Unpack packed proof. Packed proof here is a list of encoded +/// packed node ordered as defined by the compact trie scheme use. +/// Returns a root and a collection on unpacked encoded nodes. fn unpack_proof(input: &[Vec]) -> Result<(TrieHash, Vec>), Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); @@ -332,7 +334,8 @@ fn unpack_proof(input: &[Vec]) } /// Unpack packed proof. -/// This is faster than `unpack_proof`. +/// This is faster than `unpack_proof`, and should be prefered is encoded node +/// will be use in a new memory db. fn unpack_proof_to_memdb(input: &[Vec]) -> Result<(TrieHash, MemoryDB::<::Hash>), Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f4862324b5cdb..62dc99e0928a9 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -23,9 +23,9 @@ use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX}; use crate::{MemoryDB, Layout}; use sp_storage::{ChildInfoProof, ChildType, ChildrenMap}; use trie_db::DBValue; -// we are not using std as this use in no_std is -// only allowed here because it is already use in -// no_std use of trie_db. +// We are not including it to sp_std, this hash map +// usage is restricted here to proof. +// In practice it is already use internally by no_std trie_db. #[cfg(not(feature = "std"))] use hashbrown::HashMap; @@ -40,18 +40,18 @@ type CodecResult = sp_std::result::Result; pub enum Error { /// Error produce by storage proof logic. /// It is formatted in std to simplify type. - Trie(String), - /// Error produce by trie manipulation. Proof(&'static str), + /// Error produce by trie manipulation. + Trie(String), } #[cfg(not(feature = "std"))] #[derive(PartialEq, Eq, Clone, Debug)] pub enum Error { /// Error produce by storage proof logic. - Trie, - /// Error produce by trie manipulation. Proof, + /// Error produce by trie manipulation. + Trie, } #[cfg(feature = "std")] @@ -67,7 +67,7 @@ impl sp_std::fmt::Display for Error { #[cfg(feature = "std")] impl sp_std::convert::From> for Error { fn from(e: sp_std::boxed::Box) -> Self { - // currently only trie error is build from box + // Only trie error is build from box so we use a tiny shortcut here. Error::Trie(format!("{}", e)) } } @@ -110,6 +110,8 @@ const fn no_partial_db_support() -> Error { /// Different kind of proof representation are allowed. /// This definition is used as input parameter when producing /// a storage proof. +/// Some kind are reserved for test or internal use and will +/// not be usable when decoding proof. #[repr(u8)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { @@ -137,8 +139,8 @@ pub enum StorageProofKind { } impl StorageProofKind { - /// Decode a byte value representing the storage byte. - /// Return `None` if value does not exists. + /// Decode a byte value representing the storage kind. + /// Return `None` if the kind does not exists or is not allowed. #[cfg(test)] pub fn read_from_byte(encoded: u8) -> Option { Some(match encoded { @@ -156,8 +158,8 @@ impl StorageProofKind { }) } - /// Decode a byte value representing the storage byte. - /// Return `None` if value does not exists. + /// Decode a byte value representing the storage kind. + /// Return `None` if the kind does not exists or is not allowed. #[cfg(not(test))] pub fn read_from_byte(encoded: u8) -> Option { Some(match encoded { @@ -171,9 +173,9 @@ impl StorageProofKind { } #[derive(Clone)] -/// Additional information needed for packing or unpacking. +/// Additional information needed for packing or unpacking storage proof. /// These do not need to be part of the proof but are required -/// when using the proof. +/// when processing the proof. pub enum Input { /// Proof is self contained. None, @@ -191,14 +193,14 @@ pub enum Input { } impl Input { - #[must_use] /// Update input with new content. /// Return false on failure. - /// Fail when the content differs, except for `None` input + /// Fails when the input type differs, except for `None` input /// that is always reassignable. /// - /// Not that currently all query plan input are not mergeable - /// even if it could in the future. + /// Not that currently query plan inputs are not mergeable + /// even if doable (just unimplemented). + #[must_use] pub fn consolidate(&mut self, other: Self) -> bool { match self { Input::None => { @@ -230,7 +232,7 @@ impl Input { } } -/// Kind for designing an `Input` variant. +/// Kind for a `Input` variant. pub enum InputKind { /// `Input::None` kind. None, @@ -246,9 +248,8 @@ pub enum InputKind { } impl StorageProofKind { - /// Some proof variants requires more than just the collected - /// encoded nodes. - pub fn processing_input_kind(&self) -> InputKind { + /// Input kind needed for processing (create) the proof. + pub fn process_input_kind(&self) -> InputKind { match self { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlan, StorageProofKind::TrieSkipHashesForMerge @@ -259,7 +260,7 @@ impl StorageProofKind { } } - /// Same as `need_additional_info_to_produce` but for reading. + /// Input kind needed for verifying the proof. pub fn verify_input_kind(&self) -> InputKind { match self { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, @@ -271,20 +272,7 @@ impl StorageProofKind { } } - /// Some proof can get unpack into another proof representation. - pub fn can_unpack(&self) -> bool { - match self { - StorageProofKind::KnownQueryPlanAndValues => false, - StorageProofKind::TrieSkipHashes - | StorageProofKind::TrieSkipHashesFull => true, - StorageProofKind::Full - | StorageProofKind::TrieSkipHashesForMerge - | StorageProofKind::Flatten => false, - } - } - - /// Indicates if we need to record proof with splitted child trie information - /// or can simply record on a single collection. + /// Indicates what variant of proof recorder should be use. pub fn need_register_full(&self) -> bool { match self { StorageProofKind::Flatten => false, @@ -311,16 +299,10 @@ impl StorageProofKind { /// Proof that should be use with `verify` method. pub fn can_use_verify(&self) -> bool { - match self { - StorageProofKind::KnownQueryPlanAndValues => true, - _ => false, - } + matches!(self.verify_input_kind(), InputKind::None) } - /// Can be use as a db backend for proof check and - /// result fetch. - /// If false `StorageProof` `as_partial_db` method - /// failure is related to an unsupported capability. + /// Can be use as a trie db backend. pub fn can_use_as_partial_db(&self) -> bool { match self { StorageProofKind::KnownQueryPlanAndValues => false, @@ -328,83 +310,79 @@ impl StorageProofKind { } } - /// Can be use as a db backend without child trie - /// distinction. - /// If false `StorageProof` `as_partial_flat_db` method - /// failure is related to an unsupported capability. - pub fn can_use_as_flat_partial_db(&self) -> bool { - self.can_use_as_partial_db() - } - - /// Return the best kind to use for merging later, and - /// wether the merge should produce full proof, and if - /// we are recursing. - pub fn mergeable_kind(&self) -> (Self, bool, bool) { + /// Return the best kind to use for merging later, + /// a boolean indicationg if merge should produce full proof. + pub fn mergeable_kind(&self) -> (Self, bool) { match self { - StorageProofKind::TrieSkipHashes => (StorageProofKind::TrieSkipHashesForMerge, false, false), - StorageProofKind::TrieSkipHashesFull => (StorageProofKind::TrieSkipHashesForMerge, true, false), - StorageProofKind::TrieSkipHashesForMerge => (StorageProofKind::TrieSkipHashesForMerge, true, true), - s => (*s, s.use_full_partial_db().unwrap_or(false), false) + StorageProofKind::TrieSkipHashes => (StorageProofKind::TrieSkipHashesForMerge, false), + StorageProofKind::TrieSkipHashesFull => (StorageProofKind::TrieSkipHashesForMerge, true), + StorageProofKind::TrieSkipHashesForMerge => (StorageProofKind::TrieSkipHashesForMerge, true), + s => (*s, s.use_full_partial_db().unwrap_or(false)) } } } /// A collection on encoded trie nodes. type ProofNodes = Vec>; -/// A sorted by trie nodes order collection on encoded trie nodes -/// with possibly ommitted content or special compacted encoding. + +/// A collection on encoded and compacted trie nodes. +/// Nodes are sorted by trie node iteration order, and some hash +/// and/or values are ommitted (they can be either calculated from +/// proof content or completed by proof input). type ProofCompacted = Vec>; -/// A proof that some set of key-value pairs are included in the storage trie. The proof contains -/// the storage values so that the partial storage backend can be reconstructed by a verifier that -/// does not already have access to the key-value pairs. +/// A proof that some set of key-value pairs are included in the storage state. The proof contains +/// either values so that the partial storage backend can be reconstructed by a verifier that +/// does not already have access to the key-value pairs, or can be verified with `verify` method. +/// +/// For instance for default trie and flatten storage proof kind, the proof component consists of the set of +/// serialized nodes in the storage trie accessed when looking up the keys covered by the proof. +/// Verifying the proof requires constructing the partial trie from the serialized nodes and +/// performing the key lookups. The proof carries additional information (the result of the query). /// -/// For default trie, the proof component consists of the set of serialized nodes in the storage trie -/// accessed when looking up the keys covered by the proof. Verifying the proof requires constructing -/// the partial trie from the serialized nodes and performing the key lookups. +/// For know query plan and value, the proof is simply verified by running verify method since we +/// are not getting additional information from the proof. #[derive(Debug, PartialEq, Eq, Clone)] pub enum StorageProof { /// Single flattened proof component, all default child trie are flattened over a same /// container, no child trie information is provided. Flatten(ProofNodes), - /// This skip encoding of hashes that are - /// calculated when reading the structue - /// of the trie. - /// It requires that the proof is collected with - /// child trie separation, will encode to struct that - /// separate child trie but do not keep information about - /// them (for compactness) and will therefore produce a flatten - /// verification backend. + /// This works as `Flatten`, but skips encoding of hashes + /// that can be calculated when reading the child nodes + /// in the proof (nodes ordering hold the trie structure information). + /// This requires that the proof is collected with + /// child trie separation and each child trie roots as additional + /// input. + /// We remove child trie info when encoding because it is not strictly needed + /// when decoding. TrieSkipHashes(Vec), - /// This skip encoding of hashes, but need to know the key - /// values that are targetted by the operation. - /// As `TrieSkipHashes`, it does not pack hash that can be - /// calculated, so it requires a specific call to a custom - /// verify function with additional input. - /// This needs to be check for every children proofs. + /// This skips encoding of hashes in a similar way as `TrieSkipHashes`. + /// This also skips values in the proof, and can therefore only be + /// use to check if there was a change of content. + /// This needs to be check for every children proofs, and needs to keep + /// trace of every child trie origin. KnownQueryPlanAndValues(ChildrenProofMap), - // Techincal variant + // Technical variants /// This is an intermediate representation that keep trace of - /// input, in order merge into a `TrieSkipHashes` or a `TrieSkipHashesFull` - /// proof + /// input and is therefore mergeable into compact representation. + /// Compatible with `TrieSkipHashes` and `TrieSkipHashesFull` proofs. TrieSkipHashesForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>), // Following variants are only for testing, they still can be use but // decoding is not implemented. - /// Fully described proof, it includes the child trie individual description and split its - /// content by child trie. - /// Currently Full variant is unused as all our child trie kind can share a same memory db - /// (a bit more compact). - /// This is mainly provided for test purpose and extensibility. + /// Proof with full child trie description. + /// Currently Full variant is unused as all our proof kind can share a same memory db + /// (which is a bit more compact). + /// This currently mainly provided for test purpose and extensibility. Full(ChildrenProofMap), /// Compact form of proofs split by child trie, this is using the same compaction as - /// `TrieSkipHashes` but do not merge the content in a single memorydb backend. + /// `TrieSkipHashes` but keep trace of child trie origin. /// This is mainly provided for test purpose and extensibility. TrieSkipHashesFull(ChildrenProofMap), } @@ -443,16 +421,13 @@ impl Encode for StorageProof { StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), StorageProof::Full(p) => p.encode_to(dest), StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), - StorageProof::TrieSkipHashesForMerge(..) => (), + StorageProof::TrieSkipHashesForMerge(..) => panic!("merge did not recurse as told"), } } } /// This encodes the full proof capabillity under -/// legacy proof format by disabling the empty proof -/// from it (empty proof should not happen because -/// the empty trie still got a empty node recorded in -/// all its proof). +/// legacy proof format. pub struct LegacyEncodeAdapter<'a>(pub &'a StorageProof); impl<'a> Encode for LegacyEncodeAdapter<'a> { @@ -462,8 +437,9 @@ impl<'a> Encode for LegacyEncodeAdapter<'a> { } } -/// This encodes only if storage proof if it is guarantied -/// to be a flatten proof. +/// This encodes only if storage proof is a flatten proof. +/// It panics otherwhise, so it should only be use when we +/// got strong guaranties of the proof kind. pub struct FlattenEncodeAdapter<'a>(pub &'a StorageProof); impl<'a> Encode for FlattenEncodeAdapter<'a> { @@ -479,7 +455,7 @@ impl<'a> Encode for FlattenEncodeAdapter<'a> { /// Decode variant of `LegacyEncodeAdapter`. pub struct LegacyDecodeAdapter(pub StorageProof); -/// Allow read ahead on input. +/// Allow read ahead on input by chaining back some already consumed data. pub struct InputRevertPeek<'a, I>(pub &'a mut &'a [u8], pub &'a mut I); impl<'a, I: CodecInput> CodecInput for InputRevertPeek<'a, I> { @@ -551,7 +527,7 @@ impl StorageProof { } } - /// Returns whether this is an empty proof. + /// Check if proof is empty for any kind of proof. pub fn is_empty(&self) -> bool { match self { StorageProof::Flatten(data) => data.is_empty(), @@ -603,7 +579,7 @@ impl StorageProof { } } - /// This run proof validation when the proof allows immediate + /// Run proof validation when the proof allows immediate /// verification (`StorageProofKind::can_use_verify`). pub fn verify( self, @@ -642,7 +618,7 @@ impl StorageProof { } } - /// This produce the proof from collected information. + /// Produces the proof from collected information. pub fn extract_proof( collected: &ChildrenMap>, kind: StorageProofKind, @@ -751,7 +727,7 @@ impl StorageProof { }) } - /// This produce the proof from collected information on a flat backend. + /// Produce the proof from collected information on a flat backend. pub fn extract_proof_from_flat( collected: &RecordMapTrieNodes, kind: StorageProofKind, @@ -771,14 +747,15 @@ impl StorageProof { }) } - /// Merges multiple storage proofs covering potentially different sets of keys into one proof - /// covering all keys. The merged proof output may be smaller than the aggregate size of the input + /// Merges multiple storage proofs. + /// The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. - /// Merge to `Flatten` if one of the item is flatten (we cannot unflatten), if not `Flatten` we output to - /// non compact form. + /// Merge result in a `Flatten` storage proof if any of the item is flatten (we cannot unflatten). /// The function cannot pack back proof as it does not have reference to additional information - /// needed. So for this the additional information need to be merged separately and the result - /// of this merge be packed with it afterward. + /// needed. + /// So packing back need to be done in a next step with aggregated proof inputs. + /// Using a technical mergeable type is also possible (see `StorageProofKind::TrieSkipHashesForMerge` + /// and `mergeable_kind`). pub fn merge(proofs: I, prefer_full: bool, recurse: bool) -> Result where I: IntoIterator, @@ -887,7 +864,7 @@ impl StorageProof { }) } - /// Get kind description for the storage proof variant. + /// Get kind type for the storage proof variant. pub fn kind(&self) -> StorageProofKind { match self { StorageProof::Flatten(_) => StorageProofKind::Flatten, @@ -1013,6 +990,9 @@ impl StorageProof { } /// Get flatten content form proof. + /// This panic on non flatten proof and should only be + /// use when we got strong guarantie the proof is a `Flatten` + /// proof. pub fn expect_flatten_content(self) -> Vec> { match self { StorageProof::Flatten(proof) => proof, From e3d2067dd69798a1ebaf1b3514c03e339be8b34e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 May 2020 18:07:00 +0200 Subject: [PATCH 137/185] Revert "Revert "Switch to compact proof everywhere"" : uses compact proofs. This reverts commit 4d5bfa7ed593ece03bdd377181bbfda628797875. --- bin/node/cli/src/service.rs | 2 +- client/api/src/cht.rs | 2 +- client/basic-authorship/src/lib.rs | 2 +- client/block-builder/src/lib.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 16 +++++----- client/finality-grandpa/src/light_import.rs | 4 +-- client/finality-grandpa/src/tests.rs | 2 +- client/network/src/light_client_handler.rs | 31 +++++++++---------- client/network/src/protocol.rs | 16 +++++----- client/network/src/protocol/message.rs | 5 +-- client/rpc/src/state/state_full.rs | 2 +- 11 files changed, 40 insertions(+), 44 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 69bae09f1e6cf..af3c0949ac5d0 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -589,7 +589,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes(StorageProofKind::Flatten), + RecordProof::Yes(StorageProofKind::TrieSkipHashes), ).await }).expect("Error making test block").block; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 5c14c172fa00e..57aa82964b3ff 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -122,7 +122,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ).map_err(ClientError::Execution) } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 691dd16c564c6..12e67eb58c724 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -45,7 +45,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::Flatten), +//! RecordProof::Yes(StorageProofKind::TrieSkipHashes), //! ); //! //! // We wait until the proposition is performed. diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 2bdcd1e15a265..723f191530317 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -231,7 +231,7 @@ mod tests { &client, client.info().best_hash, client.info().best_number, - RecordProof::Yes(sp_api::StorageProofKind::Flatten), + RecordProof::Yes(sp_api::StorageProofKind::TrieSkipHashes), Default::default(), &*backend, ).unwrap().build().unwrap(); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 03abd42a0e185..9609f0090c30b 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -97,7 +97,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flatten) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::TrieSkipHashes) } } @@ -228,7 +228,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option>>, + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -344,7 +344,7 @@ pub(crate) fn prove_finality, J>( block: current, justification, unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof.map(StorageProof::expect_flatten_content), + authorities_proof: new_authorities_proof, }; // append justification to finality proof if required @@ -511,7 +511,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - StorageProof::Flatten(new_authorities_proof), + new_authorities_proof, )?; current_set_id += 1; @@ -853,14 +853,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(vec![vec![50]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(vec![vec![70]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), }, ]); @@ -935,7 +935,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(vec![vec![42]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -985,7 +985,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(vec![vec![42]]), + authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 25389f8e2dae3..41e381a6ce32b 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(vec![]), + authorities_proof: Some(StorageProof::Flatten(vec![])), }, ].encode(), &mut verifier, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 1d36de0fe4281..cb7b58189b7b0 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -248,7 +248,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flatten) + let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::TrieSkipHashes) .expect("failure proving read from in-memory storage backend"); Ok(proof) } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 1586620a20904..5a688d2e7aaee 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,8 +56,7 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, LegacyDecodeAdapter, - FlattenEncodeAdapter as LegacyEncodeAdapter, + StorageProof, StorageProofKind, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -445,7 +444,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -454,12 +453,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -468,7 +467,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = LegacyDecodeAdapter::decode(&mut response.roots_proof.as_ref())?.0; + let roots_proof = StorageProof::decode(&mut response.roots_proof.as_ref())?; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -496,7 +495,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = StorageProof::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -550,7 +549,7 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok((_, proof)) => proof, Err(e) => { @@ -565,7 +564,7 @@ where }; let response = { - let r = schema::v1::light::RemoteCallResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteCallResponse(r) }; @@ -593,7 +592,7 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok(proof) => proof, Err(error) => { @@ -607,7 +606,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -642,7 +641,7 @@ where &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, Err(error) => { @@ -657,7 +656,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -685,7 +684,7 @@ where }; let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; schema::v1::light::response::Response::RemoteHeaderResponse(r) }; @@ -745,7 +744,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: LegacyEncodeAdapter(&proof.roots_proof).encode(), + roots_proof: proof.roots_proof.encode(), }; schema::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1353,7 +1352,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - LegacyEncodeAdapter(&StorageProof::empty()).encode() + StorageProof::empty().encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 4aba49a41f6e0..9b7dd31981241 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1446,7 +1446,7 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1467,7 +1467,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1593,7 +1593,7 @@ impl Protocol { let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, ) { Ok(proof) => proof, Err(error) => { @@ -1612,7 +1612,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1649,7 +1649,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::TrieSkipHashes, )) { Ok(proof) => proof, Err(error) => { @@ -1669,7 +1669,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1699,7 +1699,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof: proof.expect_flatten_content(), + proof, }), ); } @@ -1762,7 +1762,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof.expect_flatten_content(), + roots_proof: proof.roots_proof, }), ); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 231bf2013bc79..155e945f4eec5 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -27,10 +27,7 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; - -/// Forme storage proof type, to be replace by -/// `use sc_client_api::StorageProof`; -type StorageProof = Vec>; +use sc_client_api::StorageProof; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 33e53a9fdcad7..43c54facef783 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -363,7 +363,7 @@ impl StateBackend for FullState Date: Tue, 19 May 2020 19:46:59 +0200 Subject: [PATCH 138/185] Revert "Revert "Revert "Switch to compact proof everywhere"" : non breaking" This reverts commit e3d2067dd69798a1ebaf1b3514c03e339be8b34e. --- bin/node/cli/src/service.rs | 2 +- client/api/src/cht.rs | 2 +- client/basic-authorship/src/lib.rs | 2 +- client/block-builder/src/lib.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 16 +++++----- client/finality-grandpa/src/light_import.rs | 4 +-- client/finality-grandpa/src/tests.rs | 2 +- client/network/src/light_client_handler.rs | 31 ++++++++++--------- client/network/src/protocol.rs | 16 +++++----- client/network/src/protocol/message.rs | 5 ++- client/rpc/src/state/state_full.rs | 2 +- 11 files changed, 44 insertions(+), 40 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index af3c0949ac5d0..69bae09f1e6cf 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -589,7 +589,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes(StorageProofKind::TrieSkipHashes), + RecordProof::Yes(StorageProofKind::Flatten), ).await }).expect("Error making test block").block; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 57aa82964b3ff..5c14c172fa00e 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -122,7 +122,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ).map_err(ClientError::Execution) } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 12e67eb58c724..691dd16c564c6 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -45,7 +45,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::TrieSkipHashes), +//! RecordProof::Yes(StorageProofKind::Flatten), //! ); //! //! // We wait until the proposition is performed. diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 723f191530317..2bdcd1e15a265 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -231,7 +231,7 @@ mod tests { &client, client.info().best_hash, client.info().best_number, - RecordProof::Yes(sp_api::StorageProofKind::TrieSkipHashes), + RecordProof::Yes(sp_api::StorageProofKind::Flatten), Default::default(), &*backend, ).unwrap().build().unwrap(); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 9609f0090c30b..03abd42a0e185 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -97,7 +97,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::TrieSkipHashes) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flatten) } } @@ -228,7 +228,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + pub authorities_proof: Option>>, } /// Proof of finality is the ordered set of finality fragments, where: @@ -344,7 +344,7 @@ pub(crate) fn prove_finality, J>( block: current, justification, unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof, + authorities_proof: new_authorities_proof.map(StorageProof::expect_flatten_content), }; // append justification to finality proof if required @@ -511,7 +511,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - new_authorities_proof, + StorageProof::Flatten(new_authorities_proof), )?; current_set_id += 1; @@ -853,14 +853,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![50]])), + authorities_proof: Some(vec![vec![50]]), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![70]])), + authorities_proof: Some(vec![vec![70]]), }, ]); @@ -935,7 +935,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(vec![vec![42]]), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -985,7 +985,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![vec![42]])), + authorities_proof: Some(vec![vec![42]]), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 41e381a6ce32b..25389f8e2dae3 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, StorageProof}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::Flatten(vec![])), + authorities_proof: Some(vec![]), }, ].encode(), &mut verifier, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index cb7b58189b7b0..1d36de0fe4281 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -248,7 +248,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::TrieSkipHashes) + let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flatten) .expect("failure proving read from in-memory storage backend"); Ok(proof) } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 5a688d2e7aaee..1586620a20904 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,7 +56,8 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, + StorageProof, StorageProofKind, LegacyDecodeAdapter, + FlattenEncodeAdapter as LegacyEncodeAdapter, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -444,7 +445,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -453,12 +454,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -467,7 +468,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = StorageProof::decode(&mut response.roots_proof.as_ref())?; + let roots_proof = LegacyDecodeAdapter::decode(&mut response.roots_proof.as_ref())?.0; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -495,7 +496,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = StorageProof::decode(&mut response.proof.as_ref())?; + let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -549,7 +550,7 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(e) => { @@ -564,7 +565,7 @@ where }; let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteCallResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteCallResponse(r) }; @@ -592,7 +593,7 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -606,7 +607,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -641,7 +642,7 @@ where &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { @@ -656,7 +657,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -684,7 +685,7 @@ where }; let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; + let r = schema::v1::light::RemoteHeaderResponse { header, proof: LegacyEncodeAdapter(&proof).encode() }; schema::v1::light::response::Response::RemoteHeaderResponse(r) }; @@ -744,7 +745,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: proof.roots_proof.encode(), + roots_proof: LegacyEncodeAdapter(&proof.roots_proof).encode(), }; schema::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1352,7 +1353,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - StorageProof::empty().encode() + LegacyEncodeAdapter(&StorageProof::empty()).encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9b7dd31981241..4aba49a41f6e0 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1446,7 +1446,7 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1467,7 +1467,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1593,7 +1593,7 @@ impl Protocol { let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, ) { Ok(proof) => proof, Err(error) => { @@ -1612,7 +1612,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1649,7 +1649,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::TrieSkipHashes, + StorageProofKind::Flatten, )) { Ok(proof) => proof, Err(error) => { @@ -1669,7 +1669,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1699,7 +1699,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof, + proof: proof.expect_flatten_content(), }), ); } @@ -1762,7 +1762,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, + roots_proof: proof.roots_proof.expect_flatten_content(), }), ); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 155e945f4eec5..231bf2013bc79 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -27,7 +27,10 @@ pub use self::generic::{ FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, }; -use sc_client_api::StorageProof; + +/// Forme storage proof type, to be replace by +/// `use sc_client_api::StorageProof`; +type StorageProof = Vec>; /// A unique ID of a request. pub type RequestId = u64; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 43c54facef783..33e53a9fdcad7 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -363,7 +363,7 @@ impl StateBackend for FullState Date: Wed, 3 Jun 2020 12:03:20 +0200 Subject: [PATCH 139/185] Pre refacto change from review. --- bin/node/cli/src/service.rs | 2 +- client/api/src/cht.rs | 2 +- client/api/src/lib.rs | 2 +- client/basic-authorship/src/lib.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 12 +- client/finality-grandpa/src/tests.rs | 2 +- client/network/src/light_client_handler.rs | 8 +- client/network/src/protocol.rs | 6 +- client/rpc/src/state/state_full.rs | 2 +- client/service/src/client/light/fetcher.rs | 2 +- client/service/test/src/client/light.rs | 4 +- primitives/api/src/lib.rs | 2 +- primitives/consensus/common/src/lib.rs | 2 +- primitives/state-machine/src/lib.rs | 6 +- .../state-machine/src/overlayed_changes.rs | 8 +- .../state-machine/src/proving_backend.rs | 33 ++-- primitives/state-machine/src/trie_backend.rs | 2 +- .../state-machine/src/trie_backend_essence.rs | 11 +- primitives/storage/src/lib.rs | 11 +- primitives/trie/src/lib.rs | 4 +- primitives/trie/src/storage_proof.rs | 183 ++++++++++-------- 21 files changed, 176 insertions(+), 130 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 9c61546f146fa..c82c3cbe1741a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -591,7 +591,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes(StorageProofKind::Flatten), + RecordProof::Yes(StorageProofKind::Flat), ).await }).expect("Error making test block").block; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 20ab5dc5e6f34..49adc6e1d3e31 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -122,7 +122,7 @@ pub fn build_proof( prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::Flatten, + StorageProofKind::Flat, ).map_err(ClientError::Execution) } diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 0eeadce174464..e9c93b17eea02 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -38,7 +38,7 @@ pub use notifications::*; pub use proof_provider::*; pub use sp_state_machine::{StorageProof, LegacyDecodeAdapter, LegacyEncodeAdapter, - FlattenEncodeAdapter, StorageProofKind, ExecutionStrategy, CloneableSpawn}; + FlatEncodeAdapter, StorageProofKind, ExecutionStrategy, CloneableSpawn, ProofNodes}; /// Usage Information Provider interface /// diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 3f35b0f7c8e1f..8e9bfb39ac9fc 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -45,7 +45,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::Flatten), +//! RecordProof::Yes(StorageProofKind::Flat), //! ); //! //! // We wait until the proposition is performed. diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index f4405c2153596..04e487fac88b6 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -42,7 +42,7 @@ use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result a use sc_client_api::{ backend::Backend, StorageProof, StorageProofKind, light::{FetchChecker, RemoteReadRequest}, - StorageProvider, ProofProvider, + StorageProvider, ProofProvider, ProofNodes, }; use parity_scale_codec::{Encode, Decode}; use finality_grandpa::BlockNumberOps; @@ -97,7 +97,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flatten) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flat) } } @@ -228,7 +228,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec
, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option>>, + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -511,7 +511,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - StorageProof::Flatten(new_authorities_proof), + StorageProof::Flat(new_authorities_proof), )?; current_set_id += 1; @@ -836,8 +836,8 @@ pub(crate) mod tests { _ => unreachable!("no other authorities should be fetched: {:?}", block_id), }, |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::Flatten(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::Flatten(vec![vec![70]])), + BlockId::Number(5) => Ok(StorageProof::Flat(vec![vec![50]])), + BlockId::Number(7) => Ok(StorageProof::Flat(vec![vec![70]])), _ => unreachable!("no other authorities should be proved: {:?}", block_id), }, ), diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 609da5cf7891b..ba35c4f581717 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -248,7 +248,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flatten) + let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flat) .expect("failure proving read from in-memory storage backend"); Ok(proof) } diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 1586620a20904..e37b9c2e63beb 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -57,7 +57,7 @@ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ StorageProof, StorageProofKind, LegacyDecodeAdapter, - FlattenEncodeAdapter as LegacyEncodeAdapter, + FlatEncodeAdapter as LegacyEncodeAdapter, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -550,7 +550,7 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::Flat, ) { Ok((_, proof)) => proof, Err(e) => { @@ -593,7 +593,7 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::Flat, ) { Ok(proof) => proof, Err(error) => { @@ -642,7 +642,7 @@ where &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::Flat, )) { Ok(proof) => proof, Err(error) => { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 1a97d33ffa3d7..a1cb63a627d47 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1459,7 +1459,7 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::Flatten, + StorageProofKind::Flat, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1607,7 +1607,7 @@ impl Protocol { let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::Flat, ) { Ok(proof) => proof, Err(error) => { @@ -1663,7 +1663,7 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flatten, + StorageProofKind::Flat, )) { Ok(proof) => proof, Err(error) => { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 33e53a9fdcad7..caf44bfb9d590 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -363,7 +363,7 @@ impl StateBackend for FullState> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = remote_roots_proof.as_partial_flat_db::() + let storage = remote_roots_proof.into_partial_flat_db::() .map_err(|e| format!("{}", e))?; // remote_roots.keys() are sorted => we can use this to group changes tries roots diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 4895129b343cc..59fc233874447 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -388,7 +388,7 @@ fn execution_proof_is_generated_and_checked() { } let kinds = [ - StorageProofKind::Flatten, + StorageProofKind::Flat, StorageProofKind::TrieSkipHashes, ]; @@ -456,7 +456,7 @@ fn code_is_executed_at_genesis_only() { } const KINDS: [StorageProofKind; 4] = [ - StorageProofKind::Flatten, + StorageProofKind::Flat, StorageProofKind::Full, StorageProofKind::TrieSkipHashes, StorageProofKind::TrieSkipHashesFull, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index ced312ec0dd47..1c9c9d0c8ebf6 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -535,7 +535,7 @@ pub struct ProofRecorder { #[cfg(feature = "std")] impl From for ProofRecorder { fn from(kind: StorageProofKind) -> Self { - let recorder = if kind.need_register_full() { + let recorder = if kind.is_full_proof_recorder_needed() { sp_state_machine::ProofRecorder::>::Full(Default::default()) } else { sp_state_machine::ProofRecorder::>::Flat(Default::default()) diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index e0d80390052eb..c2d37a98343e9 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -150,7 +150,7 @@ impl From for RecordProof { fn from(val: bool) -> Self { if val { // default to a flatten proof. - Self::Yes(StorageProofKind::Flatten) + Self::Yes(StorageProofKind::Flat) } else { Self::No } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index d77f1b924aafc..2f3bf202c6ce9 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -45,7 +45,7 @@ mod stats; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, - LegacyDecodeAdapter, LegacyEncodeAdapter, FlattenEncodeAdapter}; + LegacyDecodeAdapter, LegacyEncodeAdapter, FlatEncodeAdapter, ProofNodes}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use ext::Ext; @@ -1055,7 +1055,7 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { - prove_execution_and_proof_check_works_inner(StorageProofKind::Flatten); + prove_execution_and_proof_check_works_inner(StorageProofKind::Flat); prove_execution_and_proof_check_works_inner(StorageProofKind::Full); prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); @@ -1355,7 +1355,7 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { prove_read_and_proof_check_works_inner(StorageProofKind::Full); - prove_read_and_proof_check_works_inner(StorageProofKind::Flatten); + prove_read_and_proof_check_works_inner(StorageProofKind::Flat); prove_read_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); prove_read_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 6efa88ba6d1a0..f0a2df49538f7 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -29,7 +29,7 @@ use crate::{ #[cfg(test)] use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, Codec}; use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, ChildType}; use sp_core::offchain::storage::OffchainOverlayedChanges; use std::{mem, ops}; @@ -560,7 +560,7 @@ impl OverlayedChanges { mut cache: StorageTransactionCache, ) -> Result, String> where - H::Out: Ord + Decode + Encode + 'static + H::Out: Ord + Codec + 'static { self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) } @@ -574,7 +574,7 @@ impl OverlayedChanges { mut cache: &mut StorageTransactionCache, ) -> Result, String> where - H::Out: Ord + Decode + Encode + 'static + H::Out: Ord + Codec + 'static { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { @@ -691,7 +691,7 @@ impl OverlayedChanges { parent_hash: H::Out, panic_on_storage_error: bool, cache: &mut StorageTransactionCache, - ) -> Result, ()> where H::Out: Ord + Decode + Encode + 'static { + ) -> Result, ()> where H::Out: Ord + Codec + 'static { build_changes_trie::<_, H, N>( backend, changes_trie_state, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d29083663f2eb..f7bd733c01a25 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -163,7 +163,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> { /// Create new proving backend. pub fn new(backend: &'a TrieBackend, kind: StorageProofKind) -> Self { - let proof_recorder = if kind.need_register_full() { + let proof_recorder = if kind.is_full_proof_recorder_needed() { ProofRecorder::Full(Default::default()) } else { ProofRecorder::Flat(Default::default()) @@ -184,7 +184,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; - let trie_backend = if let ProofInputKind::ChildTrieRoots = proof_kind.process_input_kind() { + let trie_backend = if let ProofInputKind::ChildTrieRoots = proof_kind.input_kind_for_processing() { TrieBackend::new_with_roots(recorder, root) } else { TrieBackend::new(recorder, root) @@ -204,14 +204,19 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } fn update_input(&mut self) -> Result<(), String> { - let input = match self.proof_kind.process_input_kind() { + let input = match self.proof_kind.input_kind_for_processing() { ProofInputKind::ChildTrieRoots => { self.trie_backend.extract_registered_roots() }, _ => ProofInput::None, }; - if !self.previous_input.consolidate(input) { - Err("Incompatible inputs".to_string()) + if let Err(e) = self.previous_input.consolidate(input) { + Err(format!( + "{:?} for inputs kind {:?}, {:?}", + e, + self.previous_input.kind(), + ProofInputKind::ChildTrieRoots, + )) } else { Ok(()) } @@ -232,7 +237,9 @@ impl ProofRecorder where H::Out: Codec, { - /// Extracts and transform the gathered unordered content. + /// Extracts the gathered unordered encoded trie nodes. + /// Depending on `kind`, encoded trie nodes can change + /// (usually to compact the proof). pub fn extract_proof( &self, kind: StorageProofKind, @@ -399,7 +406,7 @@ where H: Hasher, H::Out: Codec, { - let db = proof.as_partial_flat_db() + let db = proof.into_partial_flat_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new(db, root)) @@ -418,7 +425,7 @@ where H::Out: Codec, { use std::ops::Deref; - let db = proof.as_partial_db() + let db = proof.into_partial_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.deref().get(&ChildInfoProof::top_trie()) .map(|db| db.contains(&root, EMPTY_PREFIX)) @@ -448,7 +455,7 @@ mod tests { #[test] fn proof_is_empty_until_value_is_read() { let trie_backend = test_trie(); - let kind = StorageProofKind::Flatten; + let kind = StorageProofKind::Flat; assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); let kind = StorageProofKind::Full; assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); @@ -461,7 +468,7 @@ mod tests { #[test] fn proof_is_non_empty_after_value_is_read() { let trie_backend = test_trie(); - let kind = StorageProofKind::Flatten; + let kind = StorageProofKind::Flat; let mut backend = test_proving(&trie_backend, kind); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().unwrap().is_empty()); @@ -494,7 +501,7 @@ mod tests { assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); }; - test(StorageProofKind::Flatten); + test(StorageProofKind::Flat); test(StorageProofKind::Full); } @@ -520,7 +527,7 @@ mod tests { let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); }; - test(StorageProofKind::Flatten); + test(StorageProofKind::Flat); test(StorageProofKind::Full); test(StorageProofKind::TrieSkipHashesFull); test(StorageProofKind::TrieSkipHashes); @@ -609,7 +616,7 @@ mod tests { ); } }; - test(StorageProofKind::Flatten); + test(StorageProofKind::Flat); test(StorageProofKind::Full); test(StorageProofKind::TrieSkipHashesFull); test(StorageProofKind::TrieSkipHashes); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index a2abed87a68ec..59ace143c6a9b 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -55,7 +55,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec /// Get registered roots. Empty input is returned when the backend is /// not configured to register roots. pub fn extract_registered_roots(&self) -> ProofInput { - if let Some(register_roots) = self.essence.register_roots.as_ref() { + if let Some(register_roots) = self.essence.register_roots() { let mut dest = ChildrenProofMap::default(); dest.insert(ChildInfoProof::top_trie(), self.essence.root().encode()); let roots = { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index e44d0d0fa173b..0308d0a842089 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -45,12 +45,16 @@ pub struct TrieBackendEssence, H: Hasher> { empty: H::Out, /// If defined, we store encoded visited roots for top_trie and child trie in this /// map. It also act as a cache. - pub register_roots: Option>>>, + register_roots: Option>>>, } /// Patricia trie-based pairs storage essence, with reference to child info. pub struct ChildTrieBackendEssence<'a, S: TrieBackendStorage, H: Hasher> { + /// Trie backend to use. + /// For the default child trie it is the top trie one. pub essence: &'a TrieBackendEssence, + /// Definition of the child trie, this is use to be able to pass + /// child_info information when registering proof. pub child_info: Option<&'a ChildInfo>, } @@ -95,6 +99,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &mut self.storage } + /// Get register root reference. + pub fn register_roots(&self) -> Option<&RwLock>>> { + self.register_roots.as_ref() + } + /// Get trie root. pub fn root(&self) -> &H::Out { &self.root diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 8f9f39045e0bb..be3350be71311 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -150,6 +150,11 @@ pub mod well_known_keys { /// Prefix of child storage keys. pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; + /// Prefix of child storage keys of default type. + /// Most of the time using `ChildInfo::from_prefixed_key` is preferable to using + /// this constant. + pub const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; + /// Whether a key is a child storage key. /// /// This is convenience function which basically checks if the given `key` starts @@ -358,7 +363,7 @@ impl ChildType { /// is one. pub fn parent_prefix(&self) -> &'static [u8] { match self { - &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + &ChildType::ParentKeyId => well_known_keys::DEFAULT_CHILD_TYPE_PARENT_PREFIX, } } } @@ -391,8 +396,6 @@ impl ChildTrieParentKeyId { /// Map of child trie information stored by `ChildInfo`. pub type ChildrenMap = BTreeMap; -const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; - #[cfg(test)] mod tests { use super::*; @@ -402,6 +405,6 @@ mod tests { let child_info = ChildInfo::new_default(b"any key"); let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); - assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX)); + assert!(prefix.starts_with(well_known_keys::DEFAULT_CHILD_TYPE_PARENT_PREFIX)); } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index a287069184fdf..91628f13f45a5 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,9 +36,9 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, ChildrenProofMap, +pub use storage_proof::{StorageProof, ChildrenProofMap, ProofNodes, StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, - RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter, FlattenEncodeAdapter}; + RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter, FlatEncodeAdapter}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 62dc99e0928a9..8a37ad3ea4fdb 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -115,8 +115,8 @@ const fn no_partial_db_support() -> Error { #[repr(u8)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { - /// Kind for `StorageProof::Flatten`. - Flatten = 1, + /// Kind for `StorageProof::Flat`. + Flat = 1, /// Kind for `StorageProof::TrieSkipHashes`. TrieSkipHashes = 2, @@ -142,9 +142,9 @@ impl StorageProofKind { /// Decode a byte value representing the storage kind. /// Return `None` if the kind does not exists or is not allowed. #[cfg(test)] - pub fn read_from_byte(encoded: u8) -> Option { + pub fn from_byte(encoded: u8) -> Option { Some(match encoded { - x if x == StorageProofKind::Flatten as u8 => StorageProofKind::Flatten, + x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, x if x == StorageProofKind::KnownQueryPlanAndValues as u8 => StorageProofKind::KnownQueryPlanAndValues, @@ -161,9 +161,9 @@ impl StorageProofKind { /// Decode a byte value representing the storage kind. /// Return `None` if the kind does not exists or is not allowed. #[cfg(not(test))] - pub fn read_from_byte(encoded: u8) -> Option { + pub fn from_byte(encoded: u8) -> Option { Some(match encoded { - x if x == StorageProofKind::Flatten as u8 => StorageProofKind::Flatten, + x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, x if x == StorageProofKind::KnownQueryPlanAndValues as u8 => StorageProofKind::KnownQueryPlanAndValues, @@ -193,15 +193,26 @@ pub enum Input { } impl Input { - /// Update input with new content. + /// Get input kind for a given input. + pub fn kind(&self) -> InputKind { + match self { + Input::ChildTrieRoots(..) => InputKind::ChildTrieRoots, + Input::None => InputKind::None, + Input::QueryPlan(..) => InputKind::QueryPlan, + Input::QueryPlanWithValues(..) => InputKind::QueryPlanWithValues, + } + } + + /// Updates input with new content. /// Return false on failure. /// Fails when the input type differs, except for `None` input /// that is always reassignable. /// - /// Not that currently query plan inputs are not mergeable - /// even if doable (just unimplemented). + /// Merging query plan inputs is not allowed (unimplemented), + /// but could be. #[must_use] - pub fn consolidate(&mut self, other: Self) -> bool { + pub fn consolidate(&mut self, other: Self) -> Result<()> { + let incompatible_types = || Err(error("Incompatible types for consolidating proofs")); match self { Input::None => { *self = other; @@ -213,7 +224,7 @@ impl Input { for (child_info, root) in children_other { match children.entry(child_info) { btree_map::Entry::Occupied(v) => if v.get() != &root { - return false; + return Err(error("Incompatible children root when consolidating proofs")); }, btree_map::Entry::Vacant(v) => { v.insert(root); @@ -221,61 +232,59 @@ impl Input { } } }, - Input::QueryPlan(..) => return false, - Input::QueryPlanWithValues(..) => return false, + Input::QueryPlan(..) => return incompatible_types(), + Input::QueryPlanWithValues(..) => return incompatible_types(), } }, - Input::QueryPlan(..) => return false, - Input::QueryPlanWithValues(..) => return false, + Input::QueryPlan(..) => return incompatible_types(), + Input::QueryPlanWithValues(..) => return incompatible_types(), } - true + Ok(()) } } /// Kind for a `Input` variant. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum InputKind { /// `Input::None` kind. None, - /// `Input::ChildTrieRoots` kind. ChildTrieRoots, - /// `Input::QueryPlan` kind. QueryPlan, - /// `Input::QueryPlanWithValues` kind. QueryPlanWithValues, } impl StorageProofKind { /// Input kind needed for processing (create) the proof. - pub fn process_input_kind(&self) -> InputKind { + pub fn input_kind_for_processing(&self) -> InputKind { match self { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlan, StorageProofKind::TrieSkipHashesForMerge | StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull => InputKind::ChildTrieRoots, StorageProofKind::Full - | StorageProofKind::Flatten => InputKind::None, + | StorageProofKind::Flat => InputKind::None, } } /// Input kind needed for verifying the proof. - pub fn verify_input_kind(&self) -> InputKind { + pub fn input_kind_for_checking(&self) -> InputKind { match self { StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, StorageProofKind::TrieSkipHashes | StorageProofKind::TrieSkipHashesFull | StorageProofKind::TrieSkipHashesForMerge | StorageProofKind::Full - | StorageProofKind::Flatten => InputKind::None, + | StorageProofKind::Flat => InputKind::None, } } /// Indicates what variant of proof recorder should be use. - pub fn need_register_full(&self) -> bool { + pub fn is_full_proof_recorder_needed(&self) -> bool { match self { - StorageProofKind::Flatten => false, + StorageProofKind::Flat => false, StorageProofKind::Full | StorageProofKind::KnownQueryPlanAndValues | StorageProofKind::TrieSkipHashes @@ -288,7 +297,7 @@ impl StorageProofKind { /// and if so, if the backend need to be full. pub fn use_full_partial_db(&self) -> Option { match self { - StorageProofKind::Flatten + StorageProofKind::Flat | StorageProofKind::TrieSkipHashes => Some(false), StorageProofKind::Full | StorageProofKind::TrieSkipHashesForMerge @@ -299,7 +308,7 @@ impl StorageProofKind { /// Proof that should be use with `verify` method. pub fn can_use_verify(&self) -> bool { - matches!(self.verify_input_kind(), InputKind::None) + matches!(self.input_kind_for_checking(), InputKind::None) } /// Can be use as a trie db backend. @@ -323,7 +332,7 @@ impl StorageProofKind { } /// A collection on encoded trie nodes. -type ProofNodes = Vec>; +pub type ProofNodes = Vec>; /// A collection on encoded and compacted trie nodes. /// Nodes are sorted by trie node iteration order, and some hash @@ -346,9 +355,11 @@ type ProofCompacted = Vec>; pub enum StorageProof { /// Single flattened proof component, all default child trie are flattened over a same /// container, no child trie information is provided. - Flatten(ProofNodes), + Flat(ProofNodes), - /// This works as `Flatten`, but skips encoding of hashes + /// Compacted flat proof. + /// + /// This works as `Flat`, but skips encoding of hashes /// that can be calculated when reading the child nodes /// in the proof (nodes ordering hold the trie structure information). /// This requires that the proof is collected with @@ -358,6 +369,8 @@ pub enum StorageProof { /// when decoding. TrieSkipHashes(Vec), + /// Proof for a known key value content. + /// /// This skips encoding of hashes in a similar way as `TrieSkipHashes`. /// This also skips values in the proof, and can therefore only be /// use to check if there was a change of content. @@ -369,6 +382,7 @@ pub enum StorageProof { /// This is an intermediate representation that keep trace of /// input and is therefore mergeable into compact representation. + /// /// Compatible with `TrieSkipHashes` and `TrieSkipHashesFull` proofs. TrieSkipHashesForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>), @@ -376,12 +390,15 @@ pub enum StorageProof { // decoding is not implemented. /// Proof with full child trie description. + /// /// Currently Full variant is unused as all our proof kind can share a same memory db /// (which is a bit more compact). /// This currently mainly provided for test purpose and extensibility. Full(ChildrenProofMap), - /// Compact form of proofs split by child trie, this is using the same compaction as + /// Compact form of proofs split by child trie. + /// + /// This is using the same compaction as /// `TrieSkipHashes` but keep trace of child trie origin. /// This is mainly provided for test purpose and extensibility. TrieSkipHashesFull(ChildrenProofMap), @@ -394,12 +411,12 @@ impl Decode for StorageProof { Err(_) => { // we allow empty proof to decode to encoded empty proof for // compatibility with legacy encoding. - return Ok(StorageProof::Flatten(Vec::new())); + return Ok(StorageProof::Flat(Vec::new())); }, }; - Ok(match StorageProofKind::read_from_byte(kind) + Ok(match StorageProofKind::from_byte(kind) .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { - StorageProofKind::Flatten => StorageProof::Flatten(Decode::decode(value)?), + StorageProofKind::Flat => StorageProof::Flat(Decode::decode(value)?), StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Decode::decode(value)?), StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(Decode::decode(value)?), @@ -416,11 +433,12 @@ impl Encode for StorageProof { fn encode_to(&self, dest: &mut T) { (self.kind() as u8).encode_to(dest); match self { - StorageProof::Flatten(p) => p.encode_to(dest), + StorageProof::Flat(p) => p.encode_to(dest), StorageProof::TrieSkipHashes(p) => p.encode_to(dest), StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), StorageProof::Full(p) => p.encode_to(dest), StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), + // TODO no error in encode: this should be ok after refactoring StorageProof::TrieSkipHashesForMerge(..) => panic!("merge did not recurse as told"), } } @@ -440,12 +458,12 @@ impl<'a> Encode for LegacyEncodeAdapter<'a> { /// This encodes only if storage proof is a flatten proof. /// It panics otherwhise, so it should only be use when we /// got strong guaranties of the proof kind. -pub struct FlattenEncodeAdapter<'a>(pub &'a StorageProof); +pub struct FlatEncodeAdapter<'a>(pub &'a StorageProof); -impl<'a> Encode for FlattenEncodeAdapter<'a> { +impl<'a> Encode for FlatEncodeAdapter<'a> { fn encode_to(&self, dest: &mut T) { match self.0 { - StorageProof::Flatten(nodes) => nodes.encode_to(dest), + StorageProof::Flat(nodes) => nodes.encode_to(dest), _ => panic!("Usage of flatten encoder on non flatten proof"), } } @@ -471,9 +489,9 @@ impl<'a, I: CodecInput> CodecInput for InputRevertPeek<'a, I> { *self.0 = &self.0[into.len()..]; return Ok(()); } else { - into[..self.0.len()].copy_from_slice(&self.0[..]); - *self.0 = &[][..]; offset = self.0.len(); + into[..offset].copy_from_slice(&self.0[..]); + *self.0 = &[][..]; } } self.1.read(&mut into[offset..]) @@ -498,7 +516,7 @@ impl Decode for LegacyDecodeAdapter { } else { let mut legacy = &[legacy][..]; let mut input = InputRevertPeek(&mut legacy, value); - LegacyDecodeAdapter(StorageProof::Flatten(Decode::decode(&mut input)?)) + LegacyDecodeAdapter(StorageProof::Flat(Decode::decode(&mut input)?)) }) } } @@ -510,13 +528,13 @@ impl StorageProof { /// key-value pairs exist in storage). pub fn empty() -> Self { // we default to flatten for compatibility - Self::empty_for(StorageProofKind::Flatten) + Self::empty_for(StorageProofKind::Flat) } /// Returns a new empty proof of a given kind. pub fn empty_for(kind: StorageProofKind) -> Self { match kind { - StorageProofKind::Flatten => StorageProof::Flatten(Default::default()), + StorageProofKind::Flat => StorageProof::Flat(Default::default()), StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(ChildrenProofMap::default()), StorageProofKind::TrieSkipHashesForMerge => StorageProof::TrieSkipHashesForMerge( @@ -530,7 +548,7 @@ impl StorageProof { /// Check if proof is empty for any kind of proof. pub fn is_empty(&self) -> bool { match self { - StorageProof::Flatten(data) => data.is_empty(), + StorageProof::Flat(data) => data.is_empty(), StorageProof::Full(data) => data.is_empty(), StorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), StorageProof::TrieSkipHashes(data) => data.is_empty(), @@ -541,7 +559,7 @@ impl StorageProof { /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed /// to be traversed in any particular order. - /// This iterator is only for `Flatten` proofs, other kind of proof will return an iterator with + /// This iterator is only for `Flat` proofs, other kind of proof will return an iterator with /// no content. pub fn iter_nodes_flatten(self) -> StorageProofNodeIterator { StorageProofNodeIterator::new(self) @@ -573,7 +591,7 @@ impl StorageProof { result.extend(unpacked_proof); } - Ok(StorageProof::Flatten(result)) + Ok(StorageProof::Flat(result)) }, s => Ok(s), } @@ -627,7 +645,7 @@ impl StorageProof { where H::Out: Codec, { Ok(match kind { - StorageProofKind::Flatten => { + StorageProofKind::Flat => { let mut result = Vec::new(); collected.iter().for_each(|(child_info, proof)| { match child_info.child_type() { @@ -640,7 +658,7 @@ impl StorageProof { } } }); - StorageProof::Flatten(result) + StorageProof::Flat(result) }, StorageProofKind::Full => { let mut result = ChildrenProofMap::default(); @@ -736,12 +754,12 @@ impl StorageProof { where H::Out: Codec, { Ok(match kind { - StorageProofKind::Flatten => { + StorageProofKind::Flat => { let trie_nodes = collected .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) .collect(); - StorageProof::Flatten(trie_nodes) + StorageProof::Flat(trie_nodes) }, _ => return Err(no_partial_db_support()), }) @@ -750,7 +768,7 @@ impl StorageProof { /// Merges multiple storage proofs. /// The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. - /// Merge result in a `Flatten` storage proof if any of the item is flatten (we cannot unflatten). + /// Merge result in a `Flat` storage proof if any of the item is flatten (we cannot unflatten). /// The function cannot pack back proof as it does not have reference to additional information /// needed. /// So packing back need to be done in a next step with aggregated proof inputs. @@ -804,7 +822,7 @@ impl StorageProof { | StorageProof::TrieSkipHashes(..) | StorageProof::KnownQueryPlanAndValues(..) => unreachable!("Unpacked or early return earlier"), - StorageProof::Flatten(proof) => { + StorageProof::Flat(proof) => { if packable_child_sets.is_some() { return Err(error("Proof incompatibility for merging")); } @@ -854,7 +872,7 @@ impl StorageProof { } } Ok(if do_flatten { - StorageProof::Flatten(unique_set.into_iter().collect()) + StorageProof::Flat(unique_set.into_iter().collect()) } else { let mut result = ChildrenProofMap::default(); for (child_info, set) in child_sets.into_iter() { @@ -867,7 +885,7 @@ impl StorageProof { /// Get kind type for the storage proof variant. pub fn kind(&self) -> StorageProofKind { match self { - StorageProof::Flatten(_) => StorageProofKind::Flatten, + StorageProof::Flat(_) => StorageProofKind::Flat, StorageProof::TrieSkipHashes(_) => StorageProofKind::TrieSkipHashes, StorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, StorageProof::Full(_) => StorageProofKind::Full, @@ -877,18 +895,22 @@ impl StorageProof { } /// Create in-memory storage of proof check backend. + /// /// Currently child trie are all with same backend - /// implementation, therefore using - /// `as_partial_flat_db` is prefered. - pub fn as_partial_db(self) -> Result>> + /// implementation, therefore using `into_partial_flat_db` is prefered. + /// + /// Fail when proof do not support producing a partial db `can_use_as_partial_db` + /// returns false. + /// Can also fail on invalid compact proof. + pub fn into_partial_db(self) -> Result>> where H: Hasher, H::Out: Decode, { let mut result = ChildrenProofMap::default(); match self { - s@StorageProof::Flatten(..) => { - let db = s.as_partial_flat_db::()?; + s@StorageProof::Flat(..) => { + let db = s.into_partial_flat_db::()?; result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::Full(children) => { @@ -919,7 +941,7 @@ impl StorageProof { } }, s@StorageProof::TrieSkipHashes(..) => { - let db = s.as_partial_flat_db::()?; + let db = s.into_partial_flat_db::()?; result.insert(ChildInfoProof::top_trie(), db); }, StorageProof::KnownQueryPlanAndValues(_children) => { @@ -930,7 +952,9 @@ impl StorageProof { } /// Create in-memory storage of proof check backend. - pub fn as_partial_flat_db(self) -> Result> + /// + /// Behave similarily to `into_partial_db`. + pub fn into_partial_flat_db(self) -> Result> where H: Hasher, H::Out: Decode, @@ -938,7 +962,7 @@ impl StorageProof { let mut db = MemoryDB::default(); let mut db_empty = true; match self { - s@StorageProof::Flatten(..) => { + s@StorageProof::Flat(..) => { for item in s.iter_nodes_flatten() { db.insert(EMPTY_PREFIX, &item[..]); } @@ -991,11 +1015,12 @@ impl StorageProof { /// Get flatten content form proof. /// This panic on non flatten proof and should only be - /// use when we got strong guarantie the proof is a `Flatten` + /// use when we got strong guarantee the proof is a `Flat` /// proof. - pub fn expect_flatten_content(self) -> Vec> { + /// TODO change result to Option (will be refactor in theory) + pub fn expect_flatten_content(self) -> ProofNodes { match self { - StorageProof::Flatten(proof) => proof, + StorageProof::Flat(proof) => proof, _ => panic!("Flat proof expected"), } } @@ -1010,7 +1035,7 @@ pub struct StorageProofNodeIterator { impl StorageProofNodeIterator { fn new(proof: StorageProof) -> Self { match proof { - StorageProof::Flatten(data) => StorageProofNodeIterator { + StorageProof::Flat(data) => StorageProofNodeIterator { inner: data.into_iter(), }, _ => StorageProofNodeIterator { @@ -1035,7 +1060,7 @@ impl TryInto> for StorageProof type Error = Error; fn try_into(self) -> Result> { - self.as_partial_flat_db() + self.into_partial_flat_db() } } @@ -1046,13 +1071,13 @@ impl TryInto>> for StorageProof type Error = Error; fn try_into(self) -> Result>> { - self.as_partial_db() + self.into_partial_db() } } -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] /// Type for storing a map of child trie proof related information. /// A few utilities methods are defined. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct ChildrenProofMap(pub BTreeMap); impl sp_std::ops::Deref for ChildrenProofMap { @@ -1089,10 +1114,6 @@ impl IntoIterator for ChildrenProofMap { #[derive(Clone)] pub struct RecordMapTrieNodes(HashMap>); -/// Container recording trie nodes and their encoded hash. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ProofMapTrieNodes(pub HashMap, DBValue>); - impl sp_std::default::Default for RecordMapTrieNodes { fn default() -> Self { RecordMapTrieNodes(Default::default()) @@ -1123,6 +1144,10 @@ impl HashDBRef for RecordMapTrieNodes { } } +/// Container recording trie nodes and their encoded hash. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ProofMapTrieNodes(pub HashMap, DBValue>); + impl sp_std::default::Default for ProofMapTrieNodes { fn default() -> Self { ProofMapTrieNodes(Default::default()) @@ -1159,7 +1184,7 @@ impl HashDBRef for ProofMapTrieNodes } #[test] -fn legacy_proof_codec() { +fn legacy_proof_compatibility() { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] struct OldStorageProof { trie_nodes: Vec>, @@ -1171,7 +1196,7 @@ fn legacy_proof_codec() { assert_eq!(&old_empty[..], &[0][..]); - let adapter_proof = LegacyDecodeAdapter(StorageProof::Flatten(Vec::new())); + let adapter_proof = LegacyDecodeAdapter(StorageProof::Flat(Vec::new())); assert_eq!(LegacyDecodeAdapter::decode(&mut &old_empty[..]).unwrap(), adapter_proof); let old_one = OldStorageProof { @@ -1180,14 +1205,16 @@ fn legacy_proof_codec() { assert_eq!(&old_one[..], &[4, 8, 4, 5][..]); - let adapter_proof = LegacyDecodeAdapter(StorageProof::Flatten(vec![vec![4u8, 5u8]])); + let adapter_proof = LegacyDecodeAdapter(StorageProof::Flat(vec![vec![4u8, 5u8]])); assert_eq!(LegacyDecodeAdapter::decode(&mut &old_one[..]).unwrap(), adapter_proof); +} - +#[test] +fn legacy_proof_codec() { // random content for proof, we test serialization let content = vec![b"first".to_vec(), b"second".to_vec()]; - let proof = StorageProof::Flatten(content.clone()); + let proof = StorageProof::Flat(content.clone()); let encoded_proof = proof.encode(); // test adapter From 830dbf960cceedefe60cdc89a8943c0f11a24f32 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 3 Jun 2020 19:08:08 +0200 Subject: [PATCH 140/185] Refactoring in progress, base traits. --- Cargo.lock | 1 + primitives/state-machine/src/backend.rs | 73 +- primitives/trie/Cargo.toml | 2 + primitives/trie/src/lib.rs | 5 +- primitives/trie/src/storage_proof.rs | 1229 ----------------- primitives/trie/src/storage_proof/compact.rs | 17 + primitives/trie/src/storage_proof/mod.rs | 435 ++++++ .../trie/src/storage_proof/query_plan.rs | 13 + primitives/trie/src/storage_proof/simple.rs | 131 ++ 9 files changed, 670 insertions(+), 1236 deletions(-) delete mode 100644 primitives/trie/src/storage_proof.rs create mode 100644 primitives/trie/src/storage_proof/compact.rs create mode 100644 primitives/trie/src/storage_proof/mod.rs create mode 100644 primitives/trie/src/storage_proof/query_plan.rs create mode 100644 primitives/trie/src/storage_proof/simple.rs diff --git a/Cargo.lock b/Cargo.lock index 060bbff1a0a6e..740e73bf77054 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7900,6 +7900,7 @@ dependencies = [ "hex-literal", "memory-db", "parity-scale-codec", + "parking_lot 0.10.2", "sp-core", "sp-runtime", "sp-std", diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 20a3ab7500a98..1a6bae4f47d43 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -37,8 +37,18 @@ pub trait Backend: std::fmt::Debug { /// Storage changes to be applied if committing type Transaction: Consolidate + Default + Send; - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; + /// The proof format use while registering proof. + type StorageProofReg: sp_trie::RegStorageProof + Into; + + /// The actual proof produced. + type StorageProof: sp_trie::BackendStorageProof + + sp_trie::WithRegStorageProof; + + /// Type of proof backend. + type ProofRegBackend: ProofRegBackend; + + /// Type of proof backend. + type ProofCheckBackend: ProofCheckBackend; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result, Self::Error>; @@ -217,10 +227,65 @@ pub trait Backend: std::fmt::Debug { } } -impl<'a, T: Backend, H: Hasher> Backend for &'a T { +/// Backend that can be instantiated from its state. +pub trait InstantiableStateBackend: Backend + where + H: Hasher, +{ + /// Storage to use to instantiate. + type Storage; + + /// Instantiation method. + fn new(storage: Self::Storage, state: H::Out) -> Self; + + /// Extract state out of the backend. + fn extract_state(self) -> (Self::Storage, H::Out); +} + +/// Backend that can be instantiated from intital content. +pub trait GenesisStateBackend: Backend + where + H: Hasher, +{ + /// Instantiation method. + fn new(storage: sp_core::storage::Storage) -> Self; +} + +/// Backend used to register a proof record. +pub trait ProofRegBackend: crate::backend::Backend + where + H: Hasher, +{ + /// State of a backend. + type State: Default + Send + Sync + Clone; + + /// Extract proof when run. + fn extract_proof(&self) -> Self::StorageProof; +} + +/// Backend used to produce proof. +pub trait ProofCheckBackend: Sized + crate::backend::Backend + where + H: Hasher, +{ + /// Instantiate backend from proof. + fn create_proof_check_backend( + root: H::Out, + proof: Self::StorageProof, + ) -> Result>; +} + + +impl<'a, T, H> Backend for &'a T + where + H: Hasher, + T: Backend, +{ type Error = T::Error; type Transaction = T::Transaction; - type TrieBackendStorage = T::TrieBackendStorage; + type StorageProof = T::StorageProof; + type ProofRegBackend = T::ProofRegBackend; + type ProofCheckBackend = T::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result, Self::Error> { (*self).storage(key) diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index abca6927e4ff4..db7abda66cef6 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -26,6 +26,7 @@ trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.20.0", default-features = false } sp-core = { version = "2.0.0-rc2", default-features = false, path = "../core" } hashbrown = { version = "0.6.3", default-features = false, features = [ "ahash" ] } +parking_lot = { version = "0.10.0", optional = true } # TODO EMCH remove if changing trait [dev-dependencies] trie-bench = "0.21.0" @@ -37,6 +38,7 @@ sp-runtime = { version = "2.0.0-rc2", path = "../runtime" } [features] default = ["std"] std = [ + "parking_lot", "sp-std/std", "codec/std", "hash-db/std", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 4a132840dc875..a3ff95d00c514 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -34,9 +34,8 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, ChildrenProofMap, ProofNodes, - StorageProofKind, Input as ProofInput, InputKind as ProofInputKind, - RecordMapTrieNodes, LegacyDecodeAdapter, LegacyEncodeAdapter, FlatEncodeAdapter}; +pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, + Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs deleted file mode 100644 index 8a37ad3ea4fdb..0000000000000 --- a/primitives/trie/src/storage_proof.rs +++ /dev/null @@ -1,1229 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use sp_std::collections::{btree_map::BTreeMap, btree_map}; -use sp_std::collections::btree_set::BTreeSet; -use sp_std::vec::Vec; -use sp_std::convert::TryInto; -use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; -use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX}; -use crate::{MemoryDB, Layout}; -use sp_storage::{ChildInfoProof, ChildType, ChildrenMap}; -use trie_db::DBValue; -// We are not including it to sp_std, this hash map -// usage is restricted here to proof. -// In practice it is already use internally by no_std trie_db. -#[cfg(not(feature = "std"))] -use hashbrown::HashMap; - -#[cfg(feature = "std")] -use std::collections::HashMap; - -type Result = sp_std::result::Result; -type CodecResult = sp_std::result::Result; - -#[cfg(feature = "std")] -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum Error { - /// Error produce by storage proof logic. - /// It is formatted in std to simplify type. - Proof(&'static str), - /// Error produce by trie manipulation. - Trie(String), -} - -#[cfg(not(feature = "std"))] -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum Error { - /// Error produce by storage proof logic. - Proof, - /// Error produce by trie manipulation. - Trie, -} - -#[cfg(feature = "std")] -impl sp_std::fmt::Display for Error { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - match self { - Error::Trie(msg) => write!(f, "Proof error trie: {}", msg), - Error::Proof(msg) => write!(f, "Proof error: {}", msg), - } - } -} - -#[cfg(feature = "std")] -impl sp_std::convert::From> for Error { - fn from(e: sp_std::boxed::Box) -> Self { - // Only trie error is build from box so we use a tiny shortcut here. - Error::Trie(format!("{}", e)) - } -} - -#[cfg(not(feature = "std"))] -impl sp_std::convert::From> for Error { - fn from(_e: sp_std::boxed::Box) -> Self { - Error::Trie - } -} - -impl sp_std::convert::From for Error { - fn from(e: CodecError) -> Self { - error(e.what()) - } -} - -#[cfg(feature = "std")] -const fn error(message: &'static str) -> Error { - Error::Proof(message) -} - -#[cfg(not(feature = "std"))] -const fn error(_message: &'static str) -> Error { - Error::Proof -} - -const fn missing_pack_input() -> Error { - error("Packing input missing for proof") -} - -const fn missing_verify_input() -> Error { - error("Input missing for proof verification") -} - -const fn no_partial_db_support() -> Error { - error("Partial db not supported for this proof") -} - -/// Different kind of proof representation are allowed. -/// This definition is used as input parameter when producing -/// a storage proof. -/// Some kind are reserved for test or internal use and will -/// not be usable when decoding proof. -#[repr(u8)] -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum StorageProofKind { - /// Kind for `StorageProof::Flat`. - Flat = 1, - - /// Kind for `StorageProof::TrieSkipHashes`. - TrieSkipHashes = 2, - - /// Kind for `StorageProof::KnownQueryPlanAndValues`. - KnownQueryPlanAndValues = 3, - - /// Technical only - - /// Kind for `StorageProof::TrieSkipHashesForMerge`. - TrieSkipHashesForMerge = 125, - - /// Testing only indices - - /// Kind for `StorageProof::Full`. - Full = 126, - - /// Kind for `StorageProof::TrieSkipHashesFull`. - TrieSkipHashesFull = 127, -} - -impl StorageProofKind { - /// Decode a byte value representing the storage kind. - /// Return `None` if the kind does not exists or is not allowed. - #[cfg(test)] - pub fn from_byte(encoded: u8) -> Option { - Some(match encoded { - x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, - x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, - x if x == StorageProofKind::KnownQueryPlanAndValues as u8 - => StorageProofKind::KnownQueryPlanAndValues, - x if x == StorageProofKind::Full as u8 => StorageProofKind::Full, - x if x == StorageProofKind::TrieSkipHashesFull as u8 => StorageProofKind::TrieSkipHashesFull, - x if x == StorageProofKind::TrieSkipHashesForMerge as u8 - => StorageProofKind::TrieSkipHashesForMerge, - x if x == StorageProofKind::TrieSkipHashesFull as u8 - => StorageProofKind::TrieSkipHashesFull, - _ => return None, - }) - } - - /// Decode a byte value representing the storage kind. - /// Return `None` if the kind does not exists or is not allowed. - #[cfg(not(test))] - pub fn from_byte(encoded: u8) -> Option { - Some(match encoded { - x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, - x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, - x if x == StorageProofKind::KnownQueryPlanAndValues as u8 - => StorageProofKind::KnownQueryPlanAndValues, - _ => return None, - }) - } -} - -#[derive(Clone)] -/// Additional information needed for packing or unpacking storage proof. -/// These do not need to be part of the proof but are required -/// when processing the proof. -pub enum Input { - /// Proof is self contained. - None, - - /// Contains trie roots used during proof processing. - ChildTrieRoots(ChildrenProofMap>), - - /// Contains trie roots used during proof processing. - /// Contains key and values queried during the proof processing. - QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), - - /// Contains trie roots used during proof processing. - /// Contains keys queried during the proof processing. - QueryPlan(ChildrenProofMap<(Vec, Vec>)>), -} - -impl Input { - /// Get input kind for a given input. - pub fn kind(&self) -> InputKind { - match self { - Input::ChildTrieRoots(..) => InputKind::ChildTrieRoots, - Input::None => InputKind::None, - Input::QueryPlan(..) => InputKind::QueryPlan, - Input::QueryPlanWithValues(..) => InputKind::QueryPlanWithValues, - } - } - - /// Updates input with new content. - /// Return false on failure. - /// Fails when the input type differs, except for `None` input - /// that is always reassignable. - /// - /// Merging query plan inputs is not allowed (unimplemented), - /// but could be. - #[must_use] - pub fn consolidate(&mut self, other: Self) -> Result<()> { - let incompatible_types = || Err(error("Incompatible types for consolidating proofs")); - match self { - Input::None => { - *self = other; - }, - Input::ChildTrieRoots(children) => { - match other { - Input::None => (), - Input::ChildTrieRoots(children_other) => { - for (child_info, root) in children_other { - match children.entry(child_info) { - btree_map::Entry::Occupied(v) => if v.get() != &root { - return Err(error("Incompatible children root when consolidating proofs")); - }, - btree_map::Entry::Vacant(v) => { - v.insert(root); - }, - } - } - }, - Input::QueryPlan(..) => return incompatible_types(), - Input::QueryPlanWithValues(..) => return incompatible_types(), - } - }, - Input::QueryPlan(..) => return incompatible_types(), - Input::QueryPlanWithValues(..) => return incompatible_types(), - } - Ok(()) - } -} - -/// Kind for a `Input` variant. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum InputKind { - /// `Input::None` kind. - None, - /// `Input::ChildTrieRoots` kind. - ChildTrieRoots, - /// `Input::QueryPlan` kind. - QueryPlan, - /// `Input::QueryPlanWithValues` kind. - QueryPlanWithValues, -} - -impl StorageProofKind { - /// Input kind needed for processing (create) the proof. - pub fn input_kind_for_processing(&self) -> InputKind { - match self { - StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlan, - StorageProofKind::TrieSkipHashesForMerge - | StorageProofKind::TrieSkipHashes - | StorageProofKind::TrieSkipHashesFull => InputKind::ChildTrieRoots, - StorageProofKind::Full - | StorageProofKind::Flat => InputKind::None, - } - } - - /// Input kind needed for verifying the proof. - pub fn input_kind_for_checking(&self) -> InputKind { - match self { - StorageProofKind::KnownQueryPlanAndValues => InputKind::QueryPlanWithValues, - StorageProofKind::TrieSkipHashes - | StorageProofKind::TrieSkipHashesFull - | StorageProofKind::TrieSkipHashesForMerge - | StorageProofKind::Full - | StorageProofKind::Flat => InputKind::None, - } - } - - /// Indicates what variant of proof recorder should be use. - pub fn is_full_proof_recorder_needed(&self) -> bool { - match self { - StorageProofKind::Flat => false, - StorageProofKind::Full - | StorageProofKind::KnownQueryPlanAndValues - | StorageProofKind::TrieSkipHashes - | StorageProofKind::TrieSkipHashesForMerge - | StorageProofKind::TrieSkipHashesFull => true, - } - } - - /// Indicates if we should execute proof over a backend, - /// and if so, if the backend need to be full. - pub fn use_full_partial_db(&self) -> Option { - match self { - StorageProofKind::Flat - | StorageProofKind::TrieSkipHashes => Some(false), - StorageProofKind::Full - | StorageProofKind::TrieSkipHashesForMerge - | StorageProofKind::TrieSkipHashesFull => Some(true), - StorageProofKind::KnownQueryPlanAndValues => None, - } - } - - /// Proof that should be use with `verify` method. - pub fn can_use_verify(&self) -> bool { - matches!(self.input_kind_for_checking(), InputKind::None) - } - - /// Can be use as a trie db backend. - pub fn can_use_as_partial_db(&self) -> bool { - match self { - StorageProofKind::KnownQueryPlanAndValues => false, - _ => true, - } - } - - /// Return the best kind to use for merging later, - /// a boolean indicationg if merge should produce full proof. - pub fn mergeable_kind(&self) -> (Self, bool) { - match self { - StorageProofKind::TrieSkipHashes => (StorageProofKind::TrieSkipHashesForMerge, false), - StorageProofKind::TrieSkipHashesFull => (StorageProofKind::TrieSkipHashesForMerge, true), - StorageProofKind::TrieSkipHashesForMerge => (StorageProofKind::TrieSkipHashesForMerge, true), - s => (*s, s.use_full_partial_db().unwrap_or(false)) - } - } -} - -/// A collection on encoded trie nodes. -pub type ProofNodes = Vec>; - -/// A collection on encoded and compacted trie nodes. -/// Nodes are sorted by trie node iteration order, and some hash -/// and/or values are ommitted (they can be either calculated from -/// proof content or completed by proof input). -type ProofCompacted = Vec>; - -/// A proof that some set of key-value pairs are included in the storage state. The proof contains -/// either values so that the partial storage backend can be reconstructed by a verifier that -/// does not already have access to the key-value pairs, or can be verified with `verify` method. -/// -/// For instance for default trie and flatten storage proof kind, the proof component consists of the set of -/// serialized nodes in the storage trie accessed when looking up the keys covered by the proof. -/// Verifying the proof requires constructing the partial trie from the serialized nodes and -/// performing the key lookups. The proof carries additional information (the result of the query). -/// -/// For know query plan and value, the proof is simply verified by running verify method since we -/// are not getting additional information from the proof. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum StorageProof { - /// Single flattened proof component, all default child trie are flattened over a same - /// container, no child trie information is provided. - Flat(ProofNodes), - - /// Compacted flat proof. - /// - /// This works as `Flat`, but skips encoding of hashes - /// that can be calculated when reading the child nodes - /// in the proof (nodes ordering hold the trie structure information). - /// This requires that the proof is collected with - /// child trie separation and each child trie roots as additional - /// input. - /// We remove child trie info when encoding because it is not strictly needed - /// when decoding. - TrieSkipHashes(Vec), - - /// Proof for a known key value content. - /// - /// This skips encoding of hashes in a similar way as `TrieSkipHashes`. - /// This also skips values in the proof, and can therefore only be - /// use to check if there was a change of content. - /// This needs to be check for every children proofs, and needs to keep - /// trace of every child trie origin. - KnownQueryPlanAndValues(ChildrenProofMap), - - // Technical variants - - /// This is an intermediate representation that keep trace of - /// input and is therefore mergeable into compact representation. - /// - /// Compatible with `TrieSkipHashes` and `TrieSkipHashesFull` proofs. - TrieSkipHashesForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>), - - // Following variants are only for testing, they still can be use but - // decoding is not implemented. - - /// Proof with full child trie description. - /// - /// Currently Full variant is unused as all our proof kind can share a same memory db - /// (which is a bit more compact). - /// This currently mainly provided for test purpose and extensibility. - Full(ChildrenProofMap), - - /// Compact form of proofs split by child trie. - /// - /// This is using the same compaction as - /// `TrieSkipHashes` but keep trace of child trie origin. - /// This is mainly provided for test purpose and extensibility. - TrieSkipHashesFull(ChildrenProofMap), -} - -impl Decode for StorageProof { - fn decode(value: &mut I) -> CodecResult { - let kind = match value.read_byte() { - Ok(kind) => kind, - Err(_) => { - // we allow empty proof to decode to encoded empty proof for - // compatibility with legacy encoding. - return Ok(StorageProof::Flat(Vec::new())); - }, - }; - Ok(match StorageProofKind::from_byte(kind) - .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { - StorageProofKind::Flat => StorageProof::Flat(Decode::decode(value)?), - StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Decode::decode(value)?), - StorageProofKind::KnownQueryPlanAndValues - => StorageProof::KnownQueryPlanAndValues(Decode::decode(value)?), - StorageProofKind::Full => StorageProof::Full(Decode::decode(value)?), - StorageProofKind::TrieSkipHashesForMerge - => return Err(codec::Error::from("Invalid storage kind")), - StorageProofKind::TrieSkipHashesFull - => StorageProof::TrieSkipHashesFull(Decode::decode(value)?), - }) - } -} - -impl Encode for StorageProof { - fn encode_to(&self, dest: &mut T) { - (self.kind() as u8).encode_to(dest); - match self { - StorageProof::Flat(p) => p.encode_to(dest), - StorageProof::TrieSkipHashes(p) => p.encode_to(dest), - StorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), - StorageProof::Full(p) => p.encode_to(dest), - StorageProof::TrieSkipHashesFull(p) => p.encode_to(dest), - // TODO no error in encode: this should be ok after refactoring - StorageProof::TrieSkipHashesForMerge(..) => panic!("merge did not recurse as told"), - } - } -} - -/// This encodes the full proof capabillity under -/// legacy proof format. -pub struct LegacyEncodeAdapter<'a>(pub &'a StorageProof); - -impl<'a> Encode for LegacyEncodeAdapter<'a> { - fn encode_to(&self, dest: &mut T) { - 0u8.encode_to(dest); - self.0.encode_to(dest); - } -} - -/// This encodes only if storage proof is a flatten proof. -/// It panics otherwhise, so it should only be use when we -/// got strong guaranties of the proof kind. -pub struct FlatEncodeAdapter<'a>(pub &'a StorageProof); - -impl<'a> Encode for FlatEncodeAdapter<'a> { - fn encode_to(&self, dest: &mut T) { - match self.0 { - StorageProof::Flat(nodes) => nodes.encode_to(dest), - _ => panic!("Usage of flatten encoder on non flatten proof"), - } - } -} - -#[cfg_attr(test, derive(Debug, PartialEq, Eq))] -/// Decode variant of `LegacyEncodeAdapter`. -pub struct LegacyDecodeAdapter(pub StorageProof); - -/// Allow read ahead on input by chaining back some already consumed data. -pub struct InputRevertPeek<'a, I>(pub &'a mut &'a [u8], pub &'a mut I); - -impl<'a, I: CodecInput> CodecInput for InputRevertPeek<'a, I> { - fn remaining_len(&mut self) -> CodecResult> { - Ok(self.1.remaining_len()?.map(|l| l + self.0.len())) - } - - fn read(&mut self, into: &mut [u8]) -> CodecResult<()> { - let mut offset = 0; - if self.0.len() > 0 { - if self.0.len() > into.len() { - into.copy_from_slice(&self.0[..into.len()]); - *self.0 = &self.0[into.len()..]; - return Ok(()); - } else { - offset = self.0.len(); - into[..offset].copy_from_slice(&self.0[..]); - *self.0 = &[][..]; - } - } - self.1.read(&mut into[offset..]) - } - - fn read_byte(&mut self) -> CodecResult { - if self.0.len() > 0 { - let result = self.0[0]; - *self.0 = &self.0[1..]; - Ok(result) - } else { - self.1.read_byte() - } - } -} - -impl Decode for LegacyDecodeAdapter { - fn decode(value: &mut I) -> CodecResult { - let legacy = value.read_byte()?; - Ok(if legacy == 0 { - LegacyDecodeAdapter(Decode::decode(value)?) - } else { - let mut legacy = &[legacy][..]; - let mut input = InputRevertPeek(&mut legacy, value); - LegacyDecodeAdapter(StorageProof::Flat(Decode::decode(&mut input)?)) - }) - } -} - -impl StorageProof { - /// Returns a new empty proof. - /// - /// An empty proof is capable of only proving trivial statements (ie. that an empty set of - /// key-value pairs exist in storage). - pub fn empty() -> Self { - // we default to flatten for compatibility - Self::empty_for(StorageProofKind::Flat) - } - - /// Returns a new empty proof of a given kind. - pub fn empty_for(kind: StorageProofKind) -> Self { - match kind { - StorageProofKind::Flat => StorageProof::Flat(Default::default()), - StorageProofKind::Full => StorageProof::Full(ChildrenProofMap::default()), - StorageProofKind::TrieSkipHashesFull => StorageProof::TrieSkipHashesFull(ChildrenProofMap::default()), - StorageProofKind::TrieSkipHashesForMerge => StorageProof::TrieSkipHashesForMerge( - ChildrenProofMap::default(), - ), - StorageProofKind::KnownQueryPlanAndValues => StorageProof::KnownQueryPlanAndValues(ChildrenProofMap::default()), - StorageProofKind::TrieSkipHashes => StorageProof::TrieSkipHashes(Default::default()), - } - } - - /// Check if proof is empty for any kind of proof. - pub fn is_empty(&self) -> bool { - match self { - StorageProof::Flat(data) => data.is_empty(), - StorageProof::Full(data) => data.is_empty(), - StorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), - StorageProof::TrieSkipHashes(data) => data.is_empty(), - StorageProof::TrieSkipHashesFull(data) => data.is_empty(), - StorageProof::TrieSkipHashesForMerge(data) => data.is_empty(), - } - } - - /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed - /// to be traversed in any particular order. - /// This iterator is only for `Flat` proofs, other kind of proof will return an iterator with - /// no content. - pub fn iter_nodes_flatten(self) -> StorageProofNodeIterator { - StorageProofNodeIterator::new(self) - } - - fn trie_skip_unpack( - self, - ) -> Result - where H::Out: Codec, - { - match self { - StorageProof::TrieSkipHashesFull(children) => { - let mut result = ChildrenProofMap::default(); - for (child_info, proof) in children { - match child_info.child_type() { - ChildType::ParentKeyId => { - // Note that we could return roots from unpacking. - let (_root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; - result.insert(child_info, unpacked_proof); - } - } - } - Ok(StorageProof::Full(result)) - }, - StorageProof::TrieSkipHashes(children) => { - let mut result = ProofNodes::default(); - for proof in children { - let (_root, unpacked_proof) = crate::unpack_proof::>(proof.as_slice())?; - result.extend(unpacked_proof); - } - - Ok(StorageProof::Flat(result)) - }, - s => Ok(s), - } - } - - /// Run proof validation when the proof allows immediate - /// verification (`StorageProofKind::can_use_verify`). - pub fn verify( - self, - input: &Input, - ) -> Result> - where H::Out: Codec, - { - match self { - StorageProof::KnownQueryPlanAndValues(proof_children) => { - if let Input::QueryPlanWithValues(input_children) = input { - let mut root_hash = H::Out::default(); - for (child_info, nodes) in proof_children.iter() { - if let Some((root, input)) = input_children.get(child_info) { - // Layout h is the only supported one at the time being - if root.len() != root_hash.as_ref().len() { - return Ok(Some(false)); - } - root_hash.as_mut().copy_from_slice(&root[..]); - if let Err(_) = trie_db::proof::verify_proof::, _, _, _>( - &root_hash, - &nodes[..], - input.iter(), - ) { - return Ok(Some(false)); - } - } else { - return Err(missing_verify_input()); - } - } - Ok(Some(true)) - } else { - Err(missing_verify_input()) - } - }, - _ => Ok(None), - } - } - - /// Produces the proof from collected information. - pub fn extract_proof( - collected: &ChildrenMap>, - kind: StorageProofKind, - input: &Input, - ) -> Result - where H::Out: Codec, - { - Ok(match kind { - StorageProofKind::Flat => { - let mut result = Vec::new(); - collected.iter().for_each(|(child_info, proof)| { - match child_info.child_type() { - ChildType::ParentKeyId => { - // this can get merged with top, we do not use key prefix - result.extend(proof.0.clone() - .drain() - .filter_map(|(_k, v)| v) - ); - } - } - }); - StorageProof::Flat(result) - }, - StorageProofKind::Full => { - let mut result = ChildrenProofMap::default(); - for (child_info, set) in collected.iter() { - let trie_nodes: Vec> = set - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - result.insert(child_info.proof_info(), trie_nodes); - } - StorageProof::Full(result) - }, - StorageProofKind::TrieSkipHashesForMerge => { - if let Input::ChildTrieRoots(roots) = input { - let mut result = ChildrenProofMap::default(); - for (child_info, set) in collected.iter() { - let root = roots.get(&child_info.proof_info()) - .ok_or_else(|| missing_pack_input())?.clone(); - let trie_nodes: HashMap<_, _> = set - .iter() - .filter_map(|(k, v)| v.as_ref().map(|v| (k.encode(), v.to_vec()))) - .collect(); - result.insert(child_info.proof_info(), (ProofMapTrieNodes(trie_nodes), root)); - } - StorageProof::TrieSkipHashesForMerge(result) - } else { - return Err(missing_pack_input()); - } - }, - StorageProofKind::TrieSkipHashesFull => { - if let Input::ChildTrieRoots(roots) = input { - let mut result = ChildrenProofMap::default(); - for (child_info, set) in collected.iter() { - let root = roots.get(&child_info.proof_info()) - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input())?; - let trie_nodes = crate::pack_proof_from_collected::>(&root, set)?; - result.insert(child_info.proof_info(), trie_nodes); - } - StorageProof::TrieSkipHashesFull(result) - } else { - return Err(missing_pack_input()); - } - }, - StorageProofKind::TrieSkipHashes => { - if let Input::ChildTrieRoots(roots) = input { - let mut result = Vec::default(); - for (child_info, set) in collected.iter() { - let root = roots.get(&child_info.proof_info()) - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .ok_or_else(|| missing_pack_input())?; - let trie_nodes = crate::pack_proof_from_collected::>(&root, set)?; - result.push(trie_nodes); - } - StorageProof::TrieSkipHashes(result) - } else { - return Err(missing_pack_input()); - } - }, - StorageProofKind::KnownQueryPlanAndValues => { - if let Input::QueryPlan(input_children) = input { - let mut result = ChildrenProofMap::default(); - let mut root_hash = H::Out::default(); - for (child_info, set) in collected.iter() { - let child_info_proof = child_info.proof_info(); - if let Some((root, keys)) = input_children.get(&child_info_proof) { - // Layout h is the only supported one at the time being - if root.len() != root_hash.as_ref().len() { - return Err(missing_pack_input()); - } - root_hash.as_mut().copy_from_slice(&root[..]); - let trie = >>::new(set, &root_hash)?; - let compacted = trie_db::proof::generate_proof(&trie, keys)?; - result.insert(child_info_proof, compacted); - } else { - return Err(missing_pack_input()); - } - } - StorageProof::KnownQueryPlanAndValues(result) - } else { - return Err(missing_pack_input()); - } - }, - }) - } - - /// Produce the proof from collected information on a flat backend. - pub fn extract_proof_from_flat( - collected: &RecordMapTrieNodes, - kind: StorageProofKind, - _input: &Input, - ) -> Result - where H::Out: Codec, - { - Ok(match kind { - StorageProofKind::Flat => { - let trie_nodes = collected - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - StorageProof::Flat(trie_nodes) - }, - _ => return Err(no_partial_db_support()), - }) - } - - /// Merges multiple storage proofs. - /// The merged proof output may be smaller than the aggregate size of the input - /// proofs due to deduplication of trie nodes. - /// Merge result in a `Flat` storage proof if any of the item is flatten (we cannot unflatten). - /// The function cannot pack back proof as it does not have reference to additional information - /// needed. - /// So packing back need to be done in a next step with aggregated proof inputs. - /// Using a technical mergeable type is also possible (see `StorageProofKind::TrieSkipHashesForMerge` - /// and `mergeable_kind`). - pub fn merge(proofs: I, prefer_full: bool, recurse: bool) -> Result - where - I: IntoIterator, - H: Hasher, - H::Out: Codec, - { - let mut do_flatten = !prefer_full; - let mut child_sets = ChildrenProofMap::>>::default(); - let mut unique_set = BTreeSet::>::default(); - let mut packable_child_sets: Option)>> = None; - // lookup for best encoding - for mut proof in proofs { - // unpack - match &proof { - &StorageProof::TrieSkipHashesFull(..) => { - proof = proof.trie_skip_unpack::()?; - }, - &StorageProof::TrieSkipHashes(..) => { - proof = proof.trie_skip_unpack::()?; - }, - &StorageProof::KnownQueryPlanAndValues(..) => { - return Err(error("Proof incompatibility for merging")); - }, - _ => (), - } - let proof = proof; - match proof { - StorageProof::TrieSkipHashesForMerge(proof) => { - if !child_sets.is_empty() || !unique_set.is_empty() { - return Err(error("Proof incompatibility for merging")); - } - if let Some(p) = packable_child_sets.as_mut() { - for (child_info, (mut proof, root)) in proof.into_iter() { - p.entry(child_info) - .and_modify(|entry| { - debug_assert!(&root == &entry.1); - entry.0.extend(proof.drain()); - }) - .or_insert((proof, root)); - } - } else { - packable_child_sets = Some(proof); - } - }, - StorageProof::TrieSkipHashesFull(..) - | StorageProof::TrieSkipHashes(..) - | StorageProof::KnownQueryPlanAndValues(..) - => unreachable!("Unpacked or early return earlier"), - StorageProof::Flat(proof) => { - if packable_child_sets.is_some() { - return Err(error("Proof incompatibility for merging")); - } - if !do_flatten { - do_flatten = true; - for (_, set) in sp_std::mem::replace(&mut child_sets, Default::default()).into_iter() { - unique_set.extend(set); - } - } - unique_set.extend(proof); - }, - StorageProof::Full(children) => { - if packable_child_sets.is_some() { - return Err(error("Proof incompatibility for merging")); - } - for (child_info, child) in children.into_iter() { - if do_flatten { - unique_set.extend(child); - } else { - let set = child_sets.entry(child_info).or_default(); - set.extend(child); - } - } - }, - } - } - if let Some(children) = packable_child_sets { - if recurse { - return Ok(StorageProof::TrieSkipHashesForMerge(children)) - } - if prefer_full { - let mut result = ChildrenProofMap::default(); - for (child_info, (set, root)) in children.into_iter() { - let root = Decode::decode(&mut &root[..])?; - let trie_nodes = crate::pack_proof_from_collected::>(&root, &set)?; - result.insert(child_info, trie_nodes); - } - return Ok(StorageProof::TrieSkipHashesFull(result)) - } else { - let mut result = Vec::default(); - for (_child_info, (set, root)) in children.iter() { - let root = Decode::decode(&mut &root[..])?; - let trie_nodes = crate::pack_proof_from_collected::>(&root, &*set)?; - result.push(trie_nodes); - } - return Ok(StorageProof::TrieSkipHashes(result)) - } - } - Ok(if do_flatten { - StorageProof::Flat(unique_set.into_iter().collect()) - } else { - let mut result = ChildrenProofMap::default(); - for (child_info, set) in child_sets.into_iter() { - result.insert(child_info, set.into_iter().collect()); - } - StorageProof::Full(result) - }) - } - - /// Get kind type for the storage proof variant. - pub fn kind(&self) -> StorageProofKind { - match self { - StorageProof::Flat(_) => StorageProofKind::Flat, - StorageProof::TrieSkipHashes(_) => StorageProofKind::TrieSkipHashes, - StorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, - StorageProof::Full(_) => StorageProofKind::Full, - StorageProof::TrieSkipHashesFull(_) => StorageProofKind::TrieSkipHashesFull, - StorageProof::TrieSkipHashesForMerge(_) => StorageProofKind::TrieSkipHashesForMerge, - } - } - - /// Create in-memory storage of proof check backend. - /// - /// Currently child trie are all with same backend - /// implementation, therefore using `into_partial_flat_db` is prefered. - /// - /// Fail when proof do not support producing a partial db `can_use_as_partial_db` - /// returns false. - /// Can also fail on invalid compact proof. - pub fn into_partial_db(self) -> Result>> - where - H: Hasher, - H::Out: Decode, - { - let mut result = ChildrenProofMap::default(); - match self { - s@StorageProof::Flat(..) => { - let db = s.into_partial_flat_db::()?; - result.insert(ChildInfoProof::top_trie(), db); - }, - StorageProof::Full(children) => { - for (child_info, proof) in children.into_iter() { - let mut db = MemoryDB::default(); - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - result.insert(child_info, db); - } - }, - StorageProof::TrieSkipHashesForMerge(children) => { - for (child_info, (proof, _root)) in children.into_iter() { - let mut db = MemoryDB::default(); - for (key, value) in proof.0.into_iter() { - let key = Decode::decode(&mut &key[..])?; - db.emplace(key, EMPTY_PREFIX, value); - } - result.insert(child_info, db); - } - }, - StorageProof::TrieSkipHashesFull(children) => { - for (child_info, proof) in children.into_iter() { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - result.insert(child_info, db); - } - }, - s@StorageProof::TrieSkipHashes(..) => { - let db = s.into_partial_flat_db::()?; - result.insert(ChildInfoProof::top_trie(), db); - }, - StorageProof::KnownQueryPlanAndValues(_children) => { - return Err(no_partial_db_support()); - }, - } - Ok(result) - } - - /// Create in-memory storage of proof check backend. - /// - /// Behave similarily to `into_partial_db`. - pub fn into_partial_flat_db(self) -> Result> - where - H: Hasher, - H::Out: Decode, - { - let mut db = MemoryDB::default(); - let mut db_empty = true; - match self { - s@StorageProof::Flat(..) => { - for item in s.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item[..]); - } - }, - StorageProof::Full(children) => { - for (_child_info, proof) in children.into_iter() { - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - } - }, - StorageProof::TrieSkipHashesForMerge(children) => { - for (_child_info, (proof, _root)) in children.into_iter() { - for (key, value) in proof.0.into_iter() { - let key = Decode::decode(&mut &key[..])?; - db.emplace(key, EMPTY_PREFIX, value); - } - } - }, - StorageProof::TrieSkipHashesFull(children) => { - for (_child_info, proof) in children.into_iter() { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - if db_empty { - db_empty = false; - db = child_db; - } else { - db.consolidate(child_db); - } - } - }, - StorageProof::TrieSkipHashes(children) => { - for proof in children.into_iter() { - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - if db_empty { - db_empty = false; - db = child_db; - } else { - db.consolidate(child_db); - } - } - }, - StorageProof::KnownQueryPlanAndValues(_children) => { - return Err(no_partial_db_support()); - }, - } - Ok(db) - } - - /// Get flatten content form proof. - /// This panic on non flatten proof and should only be - /// use when we got strong guarantee the proof is a `Flat` - /// proof. - /// TODO change result to Option (will be refactor in theory) - pub fn expect_flatten_content(self) -> ProofNodes { - match self { - StorageProof::Flat(proof) => proof, - _ => panic!("Flat proof expected"), - } - } -} - -/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to -/// be traversed in any particular order. -pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, -} - -impl StorageProofNodeIterator { - fn new(proof: StorageProof) -> Self { - match proof { - StorageProof::Flat(data) => StorageProofNodeIterator { - inner: data.into_iter(), - }, - _ => StorageProofNodeIterator { - inner: Vec::new().into_iter(), - }, - } - } -} - -impl Iterator for StorageProofNodeIterator { - type Item = Vec; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -impl TryInto> for StorageProof - where - H::Out: Decode, -{ - type Error = Error; - - fn try_into(self) -> Result> { - self.into_partial_flat_db() - } -} - -impl TryInto>> for StorageProof - where - H::Out: Decode, -{ - type Error = Error; - - fn try_into(self) -> Result>> { - self.into_partial_db() - } -} - -/// Type for storing a map of child trie proof related information. -/// A few utilities methods are defined. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -pub struct ChildrenProofMap(pub BTreeMap); - -impl sp_std::ops::Deref for ChildrenProofMap { - type Target = BTreeMap; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl sp_std::ops::DerefMut for ChildrenProofMap { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl sp_std::default::Default for ChildrenProofMap { - fn default() -> Self { - ChildrenProofMap(BTreeMap::new()) - } -} - -impl IntoIterator for ChildrenProofMap { - type Item = (ChildInfoProof, T); - type IntoIter = sp_std::collections::btree_map::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - - -/// Container recording trie nodes. -#[derive(Clone)] -pub struct RecordMapTrieNodes(HashMap>); - -impl sp_std::default::Default for RecordMapTrieNodes { - fn default() -> Self { - RecordMapTrieNodes(Default::default()) - } -} - -impl sp_std::ops::Deref for RecordMapTrieNodes { - type Target = HashMap>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl sp_std::ops::DerefMut for RecordMapTrieNodes { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl HashDBRef for RecordMapTrieNodes { - fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { - self.0.get(key).and_then(Clone::clone) - } - - fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { - self.0.get(key).map(Option::is_some).unwrap_or(false) - } -} - -/// Container recording trie nodes and their encoded hash. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ProofMapTrieNodes(pub HashMap, DBValue>); - -impl sp_std::default::Default for ProofMapTrieNodes { - fn default() -> Self { - ProofMapTrieNodes(Default::default()) - } -} - -impl sp_std::ops::Deref for ProofMapTrieNodes { - type Target = HashMap, DBValue>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl sp_std::ops::DerefMut for ProofMapTrieNodes { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl HashDBRef for ProofMapTrieNodes - where - H::Out: Encode, -{ - fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { - let key = key.encode(); - self.0.get(&key).cloned() - } - - fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { - let key = key.encode(); - self.0.contains_key(&key) - } -} - -#[test] -fn legacy_proof_compatibility() { - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - struct OldStorageProof { - trie_nodes: Vec>, - } - - let old_empty = OldStorageProof { - trie_nodes: Default::default(), - }.encode(); - - assert_eq!(&old_empty[..], &[0][..]); - - let adapter_proof = LegacyDecodeAdapter(StorageProof::Flat(Vec::new())); - assert_eq!(LegacyDecodeAdapter::decode(&mut &old_empty[..]).unwrap(), adapter_proof); - - let old_one = OldStorageProof { - trie_nodes: vec![vec![4u8, 5u8]], - }.encode(); - - assert_eq!(&old_one[..], &[4, 8, 4, 5][..]); - - let adapter_proof = LegacyDecodeAdapter(StorageProof::Flat(vec![vec![4u8, 5u8]])); - assert_eq!(LegacyDecodeAdapter::decode(&mut &old_one[..]).unwrap(), adapter_proof); -} - -#[test] -fn legacy_proof_codec() { - // random content for proof, we test serialization - let content = vec![b"first".to_vec(), b"second".to_vec()]; - - let proof = StorageProof::Flat(content.clone()); - let encoded_proof = proof.encode(); - - // test adapter - let encoded_adapter = LegacyEncodeAdapter(&proof).encode(); - - assert_eq!(StorageProof::decode(&mut &encoded_proof[..]).unwrap(), proof); - assert_eq!(encoded_adapter[0], 0); - assert_eq!(&encoded_adapter[1..], &encoded_proof[..]); - - let adapter_proof = LegacyDecodeAdapter(proof); - assert_eq!(LegacyDecodeAdapter::decode(&mut &encoded_adapter[..]).unwrap(), adapter_proof); -} diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs new file mode 100644 index 0000000000000..89b93ef7d0867 --- /dev/null +++ b/primitives/trie/src/storage_proof/compact.rs @@ -0,0 +1,17 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs that are a compacted collection of encoded nodes. + +/// A collection on encoded and compacted trie nodes. +/// Nodes are sorted by trie node iteration order, and some hash +/// and/or values are ommitted (they can be either calculated from +/// proof content or completed by proof input). +pub type ProofCompacted = Vec>; + + diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs new file mode 100644 index 0000000000000..1607e1e5e22ce --- /dev/null +++ b/primitives/trie/src/storage_proof/mod.rs @@ -0,0 +1,435 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use sp_std::collections::{btree_map::BTreeMap, btree_map}; +use sp_std::collections::btree_set::BTreeSet; +use sp_std::vec::Vec; +use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; +use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX, Prefix}; +use crate::{MemoryDB, Layout}; +use sp_storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap}; +use trie_db::DBValue; + +#[cfg(feature = "std")] +use std::sync::Arc; +#[cfg(feature = "std")] +use parking_lot::RwLock; + +pub mod simple; +pub mod compact; +pub mod query_plan; +pub mod multiple; + +// We are not including it to sp_std, this hash map +// usage is restricted here to proof. +// In practice it is already use internally by no_std trie_db. +#[cfg(not(feature = "std"))] +use hashbrown::HashMap; + +#[cfg(feature = "std")] +use std::collections::HashMap; + +type Result = sp_std::result::Result; +type CodecResult = sp_std::result::Result; + +#[cfg(feature = "std")] +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + /// Error produce by storage proof logic. + /// It is formatted in std to simplify type. + Proof(&'static str), + /// Error produce by trie manipulation. + Trie(String), +} + +#[cfg(not(feature = "std"))] +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum Error { + /// Error produce by storage proof logic. + Proof, + /// Error produce by trie manipulation. + Trie, +} + +#[cfg(feature = "std")] +impl sp_std::fmt::Display for Error { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match self { + Error::Trie(msg) => write!(f, "Proof error trie: {}", msg), + Error::Proof(msg) => write!(f, "Proof error: {}", msg), + } + } +} + +#[cfg(feature = "std")] +impl sp_std::convert::From> for Error { + fn from(e: sp_std::boxed::Box) -> Self { + // Only trie error is build from box so we use a tiny shortcut here. + Error::Trie(format!("{}", e)) + } +} + +#[cfg(not(feature = "std"))] +impl sp_std::convert::From> for Error { + fn from(_e: sp_std::boxed::Box) -> Self { + Error::Trie + } +} + +impl sp_std::convert::From for Error { + fn from(e: CodecError) -> Self { + error(e.what()) + } +} + +#[cfg(feature = "std")] +const fn error(message: &'static str) -> Error { + Error::Proof(message) +} + +#[cfg(not(feature = "std"))] +const fn error(_message: &'static str) -> Error { + Error::Proof +} + +const fn missing_pack_input() -> Error { + error("Packing input missing for proof") +} + +const fn missing_verify_input() -> Error { + error("Input missing for proof verification") +} + +const fn no_partial_db_support() -> Error { + error("Partial db not supported for this proof") +} + +#[derive(Clone)] +/// Additional information needed for packing or unpacking storage proof. +/// These do not need to be part of the proof but are required +/// when processing the proof. +pub enum Input { + /// Proof is self contained. + None, + + /// Contains trie roots used during proof processing. + ChildTrieRoots(ChildrenProofMap>), + + /// Contains trie roots used during proof processing. + /// Contains key and values queried during the proof processing. + QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), + + /// Contains trie roots used during proof processing. + /// Contains keys queried during the proof processing. + QueryPlan(ChildrenProofMap<(Vec, Vec>)>), +} + +impl Input { + /// Get input kind for a given input. + pub fn kind(&self) -> InputKind { + match self { + Input::ChildTrieRoots(..) => InputKind::ChildTrieRoots, + Input::QueryPlan(..) => InputKind::QueryPlan, + Input::QueryPlanWithValues(..) => InputKind::QueryPlanWithValues, + Input::None => InputKind::None, + } + } + + /// Updates input with new content. + /// Return false on failure. + /// Fails when the input type differs, except for `None` input + /// that is always reassignable. + /// + /// Merging query plan inputs is not allowed (unimplemented), + /// but could be. + #[must_use] + pub fn consolidate(&mut self, other: Self) -> Result<()> { + let incompatible_types = || Err(error("Incompatible types for consolidating proofs")); + match self { + Input::None => { + *self = other; + }, + Input::ChildTrieRoots(children) => { + match other { + Input::None => (), + Input::ChildTrieRoots(children_other) => { + for (child_info, root) in children_other { + match children.entry(child_info) { + btree_map::Entry::Occupied(v) => if v.get() != &root { + return Err(error("Incompatible children root when consolidating proofs")); + }, + btree_map::Entry::Vacant(v) => { + v.insert(root); + }, + } + } + }, + Input::QueryPlan(..) => return incompatible_types(), + Input::QueryPlanWithValues(..) => return incompatible_types(), + } + }, + Input::QueryPlan(..) => return incompatible_types(), + Input::QueryPlanWithValues(..) => return incompatible_types(), + } + Ok(()) + } +} + +/// Kind for a `Input` variant. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum InputKind { + /// `Input::None` kind. + None, + /// `Input::ChildTrieRoots` kind. + ChildTrieRoots, + /// `Input::QueryPlan` kind. + QueryPlan, + /// `Input::QueryPlanWithValues` kind. + QueryPlanWithValues, +} + +/// Trait for proofs that can be use as a partial backend for verification. +pub trait StorageProof: sp_std::fmt::Debug + Sized + 'static { + /// Returns a new empty proof. + /// + /// An empty proof is capable of only proving trivial statements (ie. that an empty set of + /// key-value pairs exist in storage). + fn empty() -> Self; + + /// Returns whether this is an empty proof. + fn is_empty(&self) -> bool; +} + +/// Trait for proofs that can be merged. +pub trait MergeableStorageProof: StorageProof { + /// Merges multiple storage proofs covering potentially different sets of keys into one proof + /// covering all keys. The merged proof output may be smaller than the aggregate size of the input + /// proofs due to deduplication of trie nodes. + fn merge(proofs: I) -> Self where I: IntoIterator; +} + +/// Trait for proofs that can be recorded against a trie backend. +pub trait RegStorageProof: MergeableStorageProof { + /// Variant of enum input to use. + const INPUT_KIND: InputKind; + + /// The data structure for recording proof entries. + type RecordBackend: RecordBackend; + + /// Extracts the gathered unordered encoded trie nodes. + /// Depending on `kind`, encoded trie nodes can change + /// (usually to compact the proof). + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result; +} + +/// Associate a different proof kind for recording proof. +/// The recorded proof will need to be convertible to this type. +/// +/// This trait is not strictly needed but ensure simple proof construction +/// rules (a single possible registration proof). +/// +/// TODO EMCH really consider removing. +pub trait WithRegStorageProof: Sized { + /// Associated proof to register. + type RegStorageProof: Into + RegStorageProof; +} + +pub trait BackendStorageProof: Codec + StorageProof {} + +/// Trait for proofs that can use to create a partial trie backend. +pub trait CheckableStorageProof: Codec + StorageProof { + /// Run proof validation when the proof allows immediate + /// verification. + fn verify(self, input: &Input) -> Result>; +} + +/// Trie encoded node recorder. +/// TODO EMCH consider using &mut and change reg storage (consume) proof +/// to implement without rc & sync, and encapsulate from calling +/// code. +pub trait RecordBackend: Clone + Default { + /// Access recorded value, allow using the backend as a cache. + fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option>; + /// Record the actual value. + fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option); +} + +#[cfg(feature = "std")] +#[derive(Clone, Default)] +/// Records are separated by child trie, this is needed for +/// proof compaction. +pub struct FullSyncRecorder(Arc>>>); + +#[cfg(feature = "std")] +#[derive(Clone, Default)] +/// Single storage for all recoded nodes (as in +/// state db column). +/// That this variant exists only for performance +/// (on less map access than in `Full`), but is not strictly +/// necessary. +pub struct FlatSyncRecorder(Arc>>); + +#[cfg(feature = "std")] +impl RecordBackend for FullSyncRecorder { + fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { + self.0.read().get(child_info).and_then(|s| (**s).get(&key).cloned()) + } + + fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option) { + self.0.write().entry(child_info.clone()) + .or_default() + .insert(key.clone(), value.clone()); + } +} + +#[cfg(feature = "std")] +impl RecordBackend for FlatSyncRecorder { + fn get(&self, _child_info: &ChildInfo, key: &Hash) -> Option> { + (**self.0.read()).get(&key).cloned() + } + + fn record(&self, _child_info: &ChildInfo, key: &Hash, value: Option) { + self.0.write().insert(key.clone(), value.clone()); + } +} + +/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to +/// be traversed in any particular order. +pub struct StorageProofNodeIterator { + inner: > as IntoIterator>::IntoIter, +} + +impl StorageProofNodeIterator { + // TODO EMCH looks very useless + fn new(proof: multiple::MultipleStorageProof) -> Self { +/* match proof { + multiple::MultipleStorageProof::Flat(data) => StorageProofNodeIterator { + inner: data.0.into_iter(), + }, + _ => StorageProofNodeIterator { + inner: Vec::new().into_iter(), + }, + }*/ + unimplemented!() + } +} + +impl Iterator for StorageProofNodeIterator { + type Item = Vec; + + fn next(&mut self) -> Option { + self.inner.next() + } +} + +/// Type for storing a map of child trie proof related information. +/// A few utilities methods are defined. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +pub struct ChildrenProofMap(pub BTreeMap); + +impl sp_std::ops::Deref for ChildrenProofMap { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ChildrenProofMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl sp_std::default::Default for ChildrenProofMap { + fn default() -> Self { + ChildrenProofMap(BTreeMap::new()) + } +} + +impl IntoIterator for ChildrenProofMap { + type Item = (ChildInfoProof, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +/// Container recording trie nodes. +#[derive(Clone)] +pub struct RecordMapTrieNodes(HashMap>); + +impl sp_std::default::Default for RecordMapTrieNodes { + fn default() -> Self { + RecordMapTrieNodes(Default::default()) + } +} + +impl sp_std::ops::Deref for RecordMapTrieNodes { + type Target = HashMap>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for RecordMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for RecordMapTrieNodes { + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + self.0.get(key).and_then(Clone::clone) + } + + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + self.0.get(key).map(Option::is_some).unwrap_or(false) + } +} + +/// Container recording trie nodes and their encoded hash. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct ProofMapTrieNodes(pub HashMap, DBValue>); + +impl sp_std::default::Default for ProofMapTrieNodes { + fn default() -> Self { + ProofMapTrieNodes(Default::default()) + } +} + +impl sp_std::ops::Deref for ProofMapTrieNodes { + type Target = HashMap, DBValue>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ProofMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for ProofMapTrieNodes + where + H::Out: Encode, +{ + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + let key = key.encode(); + self.0.get(&key).cloned() + } + + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + let key = key.encode(); + self.0.contains_key(&key) + } +} diff --git a/primitives/trie/src/storage_proof/query_plan.rs b/primitives/trie/src/storage_proof/query_plan.rs new file mode 100644 index 0000000000000..acced580c2f30 --- /dev/null +++ b/primitives/trie/src/storage_proof/query_plan.rs @@ -0,0 +1,13 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs that are only verifying state for a given +// key value query plan. + +use super::compact::ProofCompacted; + diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs new file mode 100644 index 0000000000000..b3c48ec80faec --- /dev/null +++ b/primitives/trie/src/storage_proof/simple.rs @@ -0,0 +1,131 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs that are a simple collection of encoded nodes. + +use super::*; +use codec::{Codec, Encode, Decode}; +use sp_storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap}; + +/// A collection on encoded trie nodes. +pub type ProofNodes = Vec>; + +/// Single flattened proof, all default child trie are flattened over a same +/// container, no child trie information is provided. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct Flat(ProofNodes); + +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct Full(ChildrenProofMap); + +impl StorageProof for Flat { + fn empty() -> Self { + Flat(Default::default()) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl StorageProof for Full { + fn empty() -> Self { + Full(Default::default()) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl MergeableStorageProof for Flat { + fn merge(proofs: I) -> Self where I: IntoIterator { + let mut unique_set = BTreeSet::>::default(); + for proof in proofs { + unique_set.extend(proof.0); + } + Flat(unique_set.into_iter().collect()) + } +} + +impl MergeableStorageProof for Full { + fn merge(proofs: I) -> Self where I: IntoIterator { + let mut child_sets = ChildrenProofMap::>>::default(); + for children in proofs { + for (child_info, child) in children.0.into_iter() { + let set = child_sets.entry(child_info).or_default(); + set.extend(child); + } + } + Full(ChildrenProofMap(child_sets + .into_iter() + .map(|(child_info, set)| (child_info, set.into_iter().collect())) + .collect())) + } +} + +// TODO EMCH can remove Default bound with manual impl on recorder +#[cfg(feature = "std")] +impl RegStorageProof for Flat { + const INPUT_KIND: InputKind = InputKind::None; + + type RecordBackend = super::FlatSyncRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + let trie_nodes = recorder.0.read() + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + Ok(Flat(trie_nodes)) + } +} + +#[cfg(feature = "std")] +impl RegStorageProof for Full { + const INPUT_KIND: InputKind = InputKind::None; + + type RecordBackend = super::FullSyncRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in recorder.0.read().iter() { + let trie_nodes: Vec> = set + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + result.insert(child_info.proof_info(), trie_nodes); + } + Ok(Full(result)) + } +} + +impl BackendStorageProof for Flat { } + +impl BackendStorageProof for Full { } + +// Note that this implementation is only possible +// as long as we only have default child trie which +// can be flattened into top trie storage. +impl Into for Full { + fn into(self) -> Flat { + let mut unique_set = BTreeSet::>::default(); + for (child_info, nodes) in self.0 { + assert!(matches!(child_info, ChildInfoProof::Default(..))); + unique_set.extend(nodes); + } + Flat(unique_set.into_iter().collect()) + } +} + +impl Into for Flat { + fn into(self) -> Full { + let mut result = ChildrenProofMap::default(); + result.insert(ChildInfoProof::top_trie(), self.0); + Full(result) + } +} From 83ce8797c5321765611ea74744d64c5aea82e69b Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 4 Jun 2020 16:47:04 +0200 Subject: [PATCH 141/185] Trie in shape --- primitives/state-machine/src/backend.rs | 13 +- primitives/trie/src/lib.rs | 7 +- primitives/trie/src/storage_proof/compact.rs | 357 ++++++++++++++++++ primitives/trie/src/storage_proof/mod.rs | 39 +- primitives/trie/src/storage_proof/multiple.rs | 325 ++++++++++++++++ .../trie/src/storage_proof/query_plan.rs | 101 ++++- primitives/trie/src/storage_proof/simple.rs | 15 +- 7 files changed, 819 insertions(+), 38 deletions(-) create mode 100644 primitives/trie/src/storage_proof/multiple.rs diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 1a6bae4f47d43..3c55b6294fa93 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -38,11 +38,17 @@ pub trait Backend: std::fmt::Debug { type Transaction: Consolidate + Default + Send; /// The proof format use while registering proof. - type StorageProofReg: sp_trie::RegStorageProof + Into; + /// TODO EMCH on paper this is not needed, we shouldn't need this proof type to merge + /// but just use back proof reg backend. -> try to do it or try to remove the + /// storage proof constraint and rename the struct to something that is more build + /// related but do not need to be usable as a backend. + type StorageProofReg: sp_trie::RegStorageProof + + sp_trie::MergeableStorageProof + + Into; /// The actual proof produced. - type StorageProof: sp_trie::BackendStorageProof - + sp_trie::WithRegStorageProof; + type StorageProof: sp_trie::BackendStorageProof; +// + sp_trie::WithRegStorageProof; /// Type of proof backend. type ProofRegBackend: ProofRegBackend; @@ -275,7 +281,6 @@ pub trait ProofCheckBackend: Sized + crate::backend::Backend ) -> Result>; } - impl<'a, T, H> Backend for &'a T where H: Hasher, diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index a3ff95d00c514..72493fa6ae0f8 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -314,7 +314,7 @@ pub fn record_all_keys( } /// Pack proof from a collection of encoded node. -fn pack_proof_from_collected( +fn pack_proof_from_collected( root: &TrieHash, input: &dyn hash_db::HashDBRef, ) -> Result>, Box>> { @@ -325,13 +325,14 @@ fn pack_proof_from_collected( /// Unpack packed proof. Packed proof here is a list of encoded /// packed node ordered as defined by the compact trie scheme use. /// Returns a root and a collection on unpacked encoded nodes. -fn unpack_proof(input: &[Vec]) +fn unpack_proof(input: &[Vec]) -> Result<(TrieHash, Vec>), Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); let root = trie_db::decode_compact::(&mut memory_db, input)?; Ok((root.0, memory_db.drain().into_iter().map(|(_k, (v, _rc))| v).collect())) } +/* TODO remove ?? /// Unpack packed proof. /// This is faster than `unpack_proof`, and should be prefered is encoded node /// will be use in a new memory db. @@ -341,7 +342,7 @@ fn unpack_proof_to_memdb(input: &[Vec]) let root = trie_db::decode_compact::(&mut memory_db, input)?; Ok((root.0, memory_db)) } - +*/ /// Read a value from the child trie. pub fn read_child_trie_value( keyspace: &[u8], diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 89b93ef7d0867..16e3b58d0ef4c 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -8,10 +8,367 @@ // ! Trie storage proofs that are a compacted collection of encoded nodes. +use super::*; +use super::simple::ProofNodes; +use codec::{Codec, Encode, Decode}; +use crate::TrieLayout; +#[cfg(feature = "std")] +use crate::TrieHash; +use sp_storage::ChildType; +use sp_std::marker::PhantomData; +use sp_std::convert::TryInto; + /// A collection on encoded and compacted trie nodes. /// Nodes are sorted by trie node iteration order, and some hash /// and/or values are ommitted (they can be either calculated from /// proof content or completed by proof input). pub type ProofCompacted = Vec>; +/// Compacted flat proof. +/// +/// This works as `Flat`, but skips encoding of hashes +/// that can be calculated when reading the child nodes +/// in the proof (nodes ordering hold the trie structure information). +/// This requires that the proof is collected with +/// child trie separation and each child trie roots as additional +/// input. +/// We remove child trie info when encoding because it is not strictly needed +/// when decoding. +#[derive(Encode, Decode)] +pub struct Flat(Vec, PhantomData); + +impl PartialEq> for Flat { + fn eq(&self, other: &Flat) -> bool { + self.0.eq(&other.0) + } +} +impl Eq for Flat { } + +impl sp_std::fmt::Debug for Flat { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "Flat compact proof: {:?}", &self.0) + } +} + +impl Clone for Flat { + fn clone(&self) -> Self { + Flat(self.0.clone(), PhantomData) + } +} + +/// Compacted proof with child trie . +/// +/// This currently mainly provided for test purpose and extensibility. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +pub struct Full(ChildrenProofMap, PhantomData); + +impl sp_std::fmt::Debug for Full { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "Full compact proof: {:?}", &self.0) + } +} + +/// Proof cotaining an intermediate representation of state +/// which is mergeable and can be converted to compact representation. +/// Compatible with `TrieSkipHashes` and `TrieSkipHashesFull` proofs. +/// +/// This is needed mainly for technical reasons (merge then compact proofs). +/// (though if possible user should rather use a flat record +/// backend in the different context and avoid merge). +/// TODO EMCH try no backend in this case. +/// TODO could move to simple +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct FullForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>); + + +impl StorageProof for Flat { + fn empty() -> Self { + Flat(Default::default(), PhantomData) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl StorageProof for Full { + fn empty() -> Self { + Full(Default::default(), PhantomData) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl StorageProof for FullForMerge { + fn empty() -> Self { + FullForMerge(Default::default()) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// Note that this implementation assumes all proof are from a same state. +impl MergeableStorageProof for FullForMerge { + fn merge(proofs: I) -> Self where I: IntoIterator { + // TODO EMCH optimize all merge to init to first element + let mut child_sets = ChildrenProofMap::<(ProofMapTrieNodes, Vec)>::default(); + for children in proofs { + for (child_info, (mut proof, root)) in children.0.into_iter() { + child_sets.entry(child_info) + .and_modify(|entry| { + debug_assert!(&root == &entry.1); + let iter_proof = sp_std::mem::replace(&mut proof, Default::default()); + entry.0.extend(iter_proof.0.into_iter()); + }) + .or_insert((proof, root)); + } + } + FullForMerge(child_sets) + } +} + +// TODO EMCH can remove Default bound with manual impl on recorder +#[cfg(feature = "std")] +impl RegStorageProof> for Flat + where + T: 'static + TrieLayout, + TrieHash: Decode, +{ + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = super::FullSyncRecorder>; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::ChildTrieRoots(roots) = input { + let mut result = Vec::default(); + for (child_info, set) in recorder.0.read().iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, set)?; + result.push(trie_nodes); + } + Ok(Flat(result, PhantomData)) + } else { + Err(missing_pack_input()) + } + } +} + +#[cfg(feature = "std")] +impl RegStorageProof> for Full + where + T: 'static + TrieLayout, + TrieHash: Decode, +{ + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = super::FullSyncRecorder>; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::ChildTrieRoots(roots) = input { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in recorder.0.read().iter() { + let root = roots.get(&child_info.proof_info()) + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .ok_or_else(|| missing_pack_input())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, set)?; + result.insert(child_info.proof_info(), trie_nodes); + } + Ok(Full(result, PhantomData)) + } else { + Err(missing_pack_input()) + } + } +} + +#[cfg(feature = "std")] +impl RegStorageProof for FullForMerge + where + Hash: Default + Eq + Clone + Encode + sp_std::hash::Hash, +{ + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = super::FullSyncRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::ChildTrieRoots(roots) = input { + let mut result = ChildrenProofMap::default(); + for (child_info, set) in recorder.0.read().iter() { + let root = roots.get(&child_info.proof_info()) + .ok_or_else(|| missing_pack_input())?.clone(); + let trie_nodes: BTreeMap<_, _> = set + .iter() + .filter_map(|(k, v)| v.as_ref().map(|v| (k.encode(), v.to_vec()))) + .collect(); + result.insert(child_info.proof_info(), (ProofMapTrieNodes(trie_nodes), root)); + } + Ok(FullForMerge(result)) + } else { + Err(missing_pack_input()) + } + } +} + +impl BackendStorageProof for Flat { } + +impl BackendStorageProof for Full { } + +// Note that this implementation is only possible +// as long as we only have default child trie which +// can be flattened into top trie storage. +impl Into> for Full { + fn into(self) -> Flat { + let mut unique_set = BTreeSet::>::default(); + for (child_info, nodes) in self.0 { + assert!(matches!(child_info, ChildInfoProof::Default(..))); + unique_set.extend(nodes); + } + Flat(vec![unique_set.into_iter().collect()], PhantomData) + } +} + +// TODO switch to try into (works only for one compact flat) +impl Into> for Flat { + fn into(mut self) -> Full { + assert!(self.0.len() == 1); // works only if only top trie + let mut result = ChildrenProofMap::default(); + result.insert(ChildInfoProof::top_trie(), self.0.pop().expect("asserted above; qed")); + Full(result, PhantomData) + } +} + +impl FullForMerge { + // TODO EMCH use try_into! + fn to_full(self) -> Result> + where + L: 'static + TrieLayout, + TrieHash: Codec, + { + let mut result = ChildrenProofMap::::default(); + for (child_info, (set, root)) in self.0.into_iter() { + let root = Decode::decode(&mut &root[..]) + .map_err(|_e| pack_error())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, &set) + .map_err(|_e| pack_error())?; + result.insert(child_info, trie_nodes); + } + Ok(Full(result, PhantomData)) + } + + // TODO EMCH use try_into! + fn to_flat(self) -> Result> + where + L: 'static + TrieLayout, + TrieHash: Codec, + { + let mut result = Vec::::default(); + for (_child_info, (set, root)) in self.0.into_iter() { + let root = Decode::decode(&mut &root[..]) + .map_err(|_e| pack_error())?; + let trie_nodes = crate::pack_proof_from_collected::(&root, &set) + .map_err(|_e| pack_error())?; + result.push(trie_nodes); + } + Ok(Flat(result, PhantomData)) + } +} + +impl Into> for FullForMerge + where + L: 'static + TrieLayout, + TrieHash: Codec, +{ + // TODO consider only using try into (may not be very straightforward with backend) + fn into(self) -> Full { + self.to_full() + .expect("Full for merge was recorded on a correct state") + } +} + +impl Into> for FullForMerge + where + L: 'static + TrieLayout, + TrieHash: Codec, +{ + fn into(self) -> Flat { + self.to_flat() + .expect("Full for merge was recorded on a correct state") + } +} + +impl TryInto for Flat { + type Error = super::Error; + + fn try_into(self) -> Result { + let mut result = ProofNodes::default(); + for proof in self.0 { + let (_root, unpacked_proof) = crate::unpack_proof::(proof.as_slice())?; + result.extend(unpacked_proof); + } + Ok(super::simple::Flat(result)) + } +} + +impl TryInto for Full { + type Error = super::Error; + + fn try_into(self) -> Result { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in self.0 { + match child_info.child_type() { + ChildType::ParentKeyId => { + // Note that we could return roots from unpacking. + let (_root, unpacked_proof) = crate::unpack_proof::(proof.as_slice())?; + result.insert(child_info, unpacked_proof); + } + } + } + Ok(super::simple::Full(result)) + } +} + +/// Container recording trie nodes and their encoded hash. +/// TODO remove Encode by relieving mergeable storage proof from the +/// constraint to bring back btreemap? +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct ProofMapTrieNodes(pub BTreeMap, DBValue>); + +impl sp_std::default::Default for ProofMapTrieNodes { + fn default() -> Self { + ProofMapTrieNodes(Default::default()) + } +} + +impl sp_std::ops::Deref for ProofMapTrieNodes { + type Target = BTreeMap, DBValue>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl sp_std::ops::DerefMut for ProofMapTrieNodes { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl HashDBRef for ProofMapTrieNodes + where + H::Out: Encode, +{ + fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { + let key = key.encode(); + self.0.get(&key).cloned() + } + fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { + let key = key.encode(); + self.0.contains_key(&key) + } +} diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 1607e1e5e22ce..301b53747baa9 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -10,9 +10,9 @@ use sp_std::collections::{btree_map::BTreeMap, btree_map}; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; -use hash_db::{Hasher, HashDB, HashDBRef, EMPTY_PREFIX, Prefix}; -use crate::{MemoryDB, Layout}; -use sp_storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap}; +use hash_db::{Hasher, HashDBRef}; +use crate::Layout; +use sp_storage::{ChildInfo, ChildInfoProof, ChildrenMap}; use trie_db::DBValue; #[cfg(feature = "std")] @@ -101,14 +101,19 @@ const fn missing_pack_input() -> Error { error("Packing input missing for proof") } +const fn pack_error() -> Error { + error("Error while packing for proof") +} + const fn missing_verify_input() -> Error { error("Input missing for proof verification") } -const fn no_partial_db_support() -> Error { - error("Partial db not supported for this proof") +const fn incompatible_type() -> Error { + error("Incompatible type") } + #[derive(Clone)] /// Additional information needed for packing or unpacking storage proof. /// These do not need to be part of the proof but are required @@ -214,7 +219,7 @@ pub trait MergeableStorageProof: StorageProof { } /// Trait for proofs that can be recorded against a trie backend. -pub trait RegStorageProof: MergeableStorageProof { +pub trait RegStorageProof: StorageProof { /// Variant of enum input to use. const INPUT_KIND: InputKind; @@ -226,7 +231,7 @@ pub trait RegStorageProof: MergeableStorageProof { /// (usually to compact the proof). fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result; } - +/* /// Associate a different proof kind for recording proof. /// The recorded proof will need to be convertible to this type. /// @@ -238,14 +243,14 @@ pub trait WithRegStorageProof: Sized { /// Associated proof to register. type RegStorageProof: Into + RegStorageProof; } - +*/ pub trait BackendStorageProof: Codec + StorageProof {} /// Trait for proofs that can use to create a partial trie backend. pub trait CheckableStorageProof: Codec + StorageProof { /// Run proof validation when the proof allows immediate /// verification. - fn verify(self, input: &Input) -> Result>; + fn verify(self, input: &Input) -> Result; } /// Trie encoded node recorder. @@ -274,6 +279,7 @@ pub struct FullSyncRecorder(Arc(Arc>>); + #[cfg(feature = "std")] impl RecordBackend for FullSyncRecorder { fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { @@ -304,21 +310,6 @@ pub struct StorageProofNodeIterator { inner: > as IntoIterator>::IntoIter, } -impl StorageProofNodeIterator { - // TODO EMCH looks very useless - fn new(proof: multiple::MultipleStorageProof) -> Self { -/* match proof { - multiple::MultipleStorageProof::Flat(data) => StorageProofNodeIterator { - inner: data.0.into_iter(), - }, - _ => StorageProofNodeIterator { - inner: Vec::new().into_iter(), - }, - }*/ - unimplemented!() - } -} - impl Iterator for StorageProofNodeIterator { type Item = Vec; diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs new file mode 100644 index 0000000000000..6aabce6a66cc1 --- /dev/null +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -0,0 +1,325 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ! Trie storage proofs allowing using different proofs. + +use super::*; +use sp_std::convert::TryInto; +use sp_std::marker::PhantomData; + +/// Different kind of proof representation are allowed. +/// This definition is used as input parameter when producing +/// a storage proof. +/// Some kind are reserved for test or internal use and will +/// not be usable when decoding proof. +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum StorageProofKind { + /// Kind for `MultipleStorageProof::Flat`. + Flat = 1, + + /// Kind for `MultipleStorageProof::TrieSkipHashes`. + TrieSkipHashes = 2, +} + +impl StorageProofKind { + /// Decode a byte value representing the storage kind. + /// Return `None` if the kind does not exists or is not allowed. + pub fn from_byte(encoded: u8) -> Option { + Some(match encoded { + x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, + x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, + _ => return None, + }) + } +} + +/// Allow usage of multiple proof at the same time. This is usefull when +/// we want to be able to operate from different proof origin. +/// It produces a single proof type that is defined by type parameter `D` +/// as `DefaultKind`. +#[derive(PartialEq, Eq, Clone)] +pub enum MultipleStorageProof { + /// See `crate::storage_proof::simple::Flat`. + Flat(super::simple::Flat), + + /// See `crate::storage_proof::compact::Flat`. + TrieSkipHashes(super::compact::Flat>, PhantomData), +} + +impl sp_std::fmt::Debug for MultipleStorageProof { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match self { + MultipleStorageProof::Flat(v) => v.fmt(f), + MultipleStorageProof::TrieSkipHashes(v, _) => v.fmt(f), + } + } +} + +/// Allow to use specific kind of proof by default. +pub trait DefaultKind: 'static + Clone { + const KIND: StorageProofKind; +} + +impl Decode for MultipleStorageProof { + fn decode(value: &mut I) -> CodecResult { + let kind = value.read_byte()?; + Ok(match StorageProofKind::from_byte(kind) + .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { + StorageProofKind::Flat => MultipleStorageProof::Flat(Decode::decode(value)?), + StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes( + Decode::decode(value)?, + PhantomData, + ), + }) + } +} + +impl Encode for MultipleStorageProof { + fn encode_to(&self, dest: &mut T) { + (self.kind() as u8).encode_to(dest); + match self { + MultipleStorageProof::Flat(p) => p.encode_to(dest), + MultipleStorageProof::TrieSkipHashes(p, _) => p.encode_to(dest), + } + } +} + +impl StorageProof for MultipleStorageProof { + fn empty() -> Self { + match D::KIND { + StorageProofKind::Flat => + MultipleStorageProof::Flat(super::simple::Flat::empty()), + StorageProofKind::TrieSkipHashes => + MultipleStorageProof::TrieSkipHashes(super::compact::Flat::empty(), PhantomData), + } + } + + + fn is_empty(&self) -> bool { + match self { + MultipleStorageProof::Flat(data) => data.is_empty(), + MultipleStorageProof::TrieSkipHashes(data, _) => data.is_empty(), + } + } +} + +#[cfg(feature = "std")] +#[derive(Clone)] +pub enum MultipleSyncRecorder { + Flat(super::FlatSyncRecorder, PhantomData), + Full(super::FullSyncRecorder), +} + +impl Default for MultipleSyncRecorder { + fn default() -> Self { + match D::KIND { + StorageProofKind::Flat => MultipleSyncRecorder::Flat(Default::default(), PhantomData), + StorageProofKind::TrieSkipHashes => MultipleSyncRecorder::Full(Default::default()), + } + } +} + +#[cfg(feature = "std")] +impl RecordBackend for MultipleSyncRecorder { + fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { + match self { + MultipleSyncRecorder::Flat(rec, _) => rec.get(child_info, key), + MultipleSyncRecorder::Full(rec) => rec.get(child_info, key), + } + } + + fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option) { + match self { + MultipleSyncRecorder::Flat(rec, _) => rec.record(child_info, key, value), + MultipleSyncRecorder::Full(rec) => rec.record(child_info, key, value), + } + } +} + +// TODO EMCH can remove Default bound with manual impl on recorder +#[cfg(feature = "std")] +impl RegStorageProof for MultipleStorageProof + where + Hash: Hasher + 'static, + Hash::Out: Decode, + D: DefaultKind, +{ + // Actually one could ignore this if he knows its type to be non compact. + // TODO EMCH try a const function over D, this have very little chance to work + const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; + + type RecordBackend = MultipleSyncRecorder; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + match D::KIND { + StorageProofKind::Flat => { + if let MultipleSyncRecorder::Flat(rec, _) = recorder { + return Ok(MultipleStorageProof::Flat(super::simple::Flat::extract_proof(rec, input)?)) + } + }, + StorageProofKind::TrieSkipHashes => { + if let MultipleSyncRecorder::Full(rec) = recorder { + return Ok(MultipleStorageProof::TrieSkipHashes( + super::compact::Flat::extract_proof(rec, input)?, + PhantomData, + )) + } + }, + } + Err(missing_pack_input()) + } +} + +impl BackendStorageProof for MultipleStorageProof { } + +impl TryInto for MultipleStorageProof { + type Error = super::Error; + + fn try_into(self) -> Result { + match self { + MultipleStorageProof::Flat(p) => Ok(p), + _ => Err(incompatible_type()), + } + } +} + +impl TryInto>> for MultipleStorageProof { + type Error = super::Error; + + fn try_into(self) -> Result>> { + match self { + MultipleStorageProof::TrieSkipHashes(p, _) => Ok(p), + _ => Err(incompatible_type()), + } + } +} + +impl MultipleStorageProof { + /// Get kind type for the storage proof variant. + pub fn kind(&self) -> StorageProofKind { + match self { + MultipleStorageProof::Flat(_) => StorageProofKind::Flat, + MultipleStorageProof::TrieSkipHashes(_, _) => StorageProofKind::TrieSkipHashes, + } + } +} + +/* + /// Can also fail on invalid compact proof. + pub fn into_partial_db(self) -> Result>> + where + H: Hasher, + H::Out: Decode, + { + let mut result = ChildrenProofMap::default(); + match self { + s@MultipleStorageProof::Flat(..) => { + let db = s.into_partial_flat_db::()?; + result.insert(ChildInfoProof::top_trie(), db); + }, + MultipleStorageProof::Full(children) => { + for (child_info, proof) in children.into_iter() { + let mut db = MemoryDB::default(); + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + result.insert(child_info, db); + } + }, + MultipleStorageProof::TrieSkipHashesForMerge(children) => { + for (child_info, (proof, _root)) in children.into_iter() { + let mut db = MemoryDB::default(); + for (key, value) in proof.0.into_iter() { + let key = Decode::decode(&mut &key[..])?; + db.emplace(key, EMPTY_PREFIX, value); + } + result.insert(child_info, db); + } + }, + MultipleStorageProof::TrieSkipHashesFull(children) => { + for (child_info, proof) in children.into_iter() { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + result.insert(child_info, db); + } + }, + s@MultipleStorageProof::TrieSkipHashes(..) => { + let db = s.into_partial_flat_db::()?; + result.insert(ChildInfoProof::top_trie(), db); + }, + MultipleStorageProof::KnownQueryPlanAndValues(_children) => { + return Err(no_partial_db_support()); + }, + } + Ok(result) + } + + /// Create in-memory storage of proof check backend. + /// + /// Behave similarily to `into_partial_db`. + pub fn into_partial_flat_db(self) -> Result> + where + H: Hasher, + H::Out: Decode, + { + let mut db = MemoryDB::default(); + let mut db_empty = true; + match self { + s@MultipleStorageProof::Flat(..) => { + for item in s.iter_nodes_flatten() { + db.insert(EMPTY_PREFIX, &item[..]); + } + }, + MultipleStorageProof::Full(children) => { + for (_child_info, proof) in children.into_iter() { + for item in proof.into_iter() { + db.insert(EMPTY_PREFIX, &item); + } + } + }, + MultipleStorageProof::TrieSkipHashesForMerge(children) => { + for (_child_info, (proof, _root)) in children.into_iter() { + for (key, value) in proof.0.into_iter() { + let key = Decode::decode(&mut &key[..])?; + db.emplace(key, EMPTY_PREFIX, value); + } + } + }, + MultipleStorageProof::TrieSkipHashesFull(children) => { + for (_child_info, proof) in children.into_iter() { + // Note that this does check all hashes so using a trie backend + // for further check is not really good (could use a direct value backend). + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + }, + MultipleStorageProof::TrieSkipHashes(children) => { + for proof in children.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + }, + MultipleStorageProof::KnownQueryPlanAndValues(_children) => { + return Err(no_partial_db_support()); + }, + } + Ok(db) + } +*/ diff --git a/primitives/trie/src/storage_proof/query_plan.rs b/primitives/trie/src/storage_proof/query_plan.rs index acced580c2f30..0fee9c3283356 100644 --- a/primitives/trie/src/storage_proof/query_plan.rs +++ b/primitives/trie/src/storage_proof/query_plan.rs @@ -9,5 +9,104 @@ // ! Trie storage proofs that are only verifying state for a given // key value query plan. -use super::compact::ProofCompacted; +use super::*; +use super::compact::{ProofCompacted}; +use codec::{Encode, Decode}; +use crate::{TrieConfiguration, TrieHash}; +use sp_std::marker::PhantomData; + +/// Proof for a known key value content. +/// +/// This skips encoding of hashes in a similar way as `crate::storage_proof::compact`. +/// This also skips values in the proof, and can therefore only be +/// use to check if there was a change of content. +/// This needs to be check for every children proofs, and needs to keep +/// trace of every child trie origin. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +struct KnownQueryPlanAndValues(ChildrenProofMap, PhantomData); + +impl sp_std::fmt::Debug for KnownQueryPlanAndValues { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "Known values compact proof: {:?}", &self.0) + } +} + +impl StorageProof for KnownQueryPlanAndValues { + fn empty() -> Self { + KnownQueryPlanAndValues(Default::default(), PhantomData) + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +#[cfg(feature = "std")] +impl RegStorageProof> for KnownQueryPlanAndValues + where + T: 'static + TrieConfiguration, + TrieHash: Decode, +{ + const INPUT_KIND: InputKind = InputKind::QueryPlan; + + type RecordBackend = super::FullSyncRecorder>; + + fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + if let Input::QueryPlan(input_children) = input { + let mut result = ChildrenProofMap::default(); + let mut root_hash = TrieHash::::default(); + for (child_info, set) in recorder.0.read().iter() { + let child_info_proof = child_info.proof_info(); + if let Some((root, keys)) = input_children.get(&child_info_proof) { + // Layout h is the only supported one at the time being + if root.len() != root_hash.as_ref().len() { + return Err(missing_pack_input()); + } + root_hash.as_mut().copy_from_slice(&root[..]); + let trie = trie_db::TrieDB::::new(set, &root_hash)?; + let compacted = trie_db::proof::generate_proof(&trie, keys)?; + result.insert(child_info_proof, compacted); + } else { + return Err(missing_pack_input()); + } + } + Ok(KnownQueryPlanAndValues(result, PhantomData)) + } else { + Err(missing_pack_input()) + } + } +} + +impl CheckableStorageProof for KnownQueryPlanAndValues + where + T: 'static + TrieConfiguration, + TrieHash: Decode, +{ + fn verify(self, input: &Input) -> Result { + if let Input::QueryPlanWithValues(input_children) = input { + let mut root_hash = TrieHash::::default(); + for (child_info, nodes) in self.0.iter() { + if let Some((root, input)) = input_children.get(child_info) { + // Layout h is the only supported one at the time being + if root.len() != root_hash.as_ref().len() { + return Ok(false); + } + root_hash.as_mut().copy_from_slice(&root[..]); + if let Err(_) = trie_db::proof::verify_proof::( + &root_hash, + &nodes[..], + input.iter(), + ) { + return Ok(false); + } + } else { + return Err(missing_verify_input()); + } + } + Ok(true) + } else { + Err(missing_pack_input()) + } + } +} diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index b3c48ec80faec..f34107287c70d 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -9,8 +9,8 @@ // ! Trie storage proofs that are a simple collection of encoded nodes. use super::*; -use codec::{Codec, Encode, Decode}; -use sp_storage::{ChildInfo, ChildInfoProof, ChildType, ChildrenMap}; +use codec::{Encode, Decode}; +use sp_storage::ChildInfoProof; /// A collection on encoded trie nodes. pub type ProofNodes = Vec>; @@ -18,10 +18,13 @@ pub type ProofNodes = Vec>; /// Single flattened proof, all default child trie are flattened over a same /// container, no child trie information is provided. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct Flat(ProofNodes); +pub struct Flat(pub(crate) ProofNodes); +/// Compacted proof with child trie organisation. +/// +/// This is taking more space than the flat variant.but #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct Full(ChildrenProofMap); +pub struct Full(pub(crate) ChildrenProofMap); impl StorageProof for Flat { fn empty() -> Self { @@ -76,7 +79,7 @@ impl RegStorageProof for type RecordBackend = super::FlatSyncRecorder; - fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + fn extract_proof(recorder: &Self::RecordBackend, _input: Input) -> Result { let trie_nodes = recorder.0.read() .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) @@ -91,7 +94,7 @@ impl RegStorageProof for type RecordBackend = super::FullSyncRecorder; - fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { + fn extract_proof(recorder: &Self::RecordBackend, _input: Input) -> Result { let mut result = ChildrenProofMap::default(); for (child_info, set) in recorder.0.read().iter() { let trie_nodes: Vec> = set From 9aca478464582d72aed691d57b6d0a1fbbaab028 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 4 Jun 2020 17:32:49 +0200 Subject: [PATCH 142/185] Adding query plan variant to multiple for usage without exposing state machine backend. --- primitives/state-machine/src/backend.rs | 2 +- primitives/trie/src/storage_proof/multiple.rs | 48 ++++++++++++++++++- .../trie/src/storage_proof/query_plan.rs | 18 ++++++- 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 3c55b6294fa93..d5ebf09ecc755 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -44,7 +44,7 @@ pub trait Backend: std::fmt::Debug { /// related but do not need to be usable as a backend. type StorageProofReg: sp_trie::RegStorageProof + sp_trie::MergeableStorageProof - + Into; + + Into; // TODO EMCH consider removing this conv. /// The actual proof produced. type StorageProof: sp_trie::BackendStorageProof; diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 6aabce6a66cc1..54d3d396ac1d8 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -25,6 +25,9 @@ pub enum StorageProofKind { /// Kind for `MultipleStorageProof::TrieSkipHashes`. TrieSkipHashes = 2, + + /// Kind for `MultipleStorageProof::QueryPlan`. + KnownQueryPlanAndValues = 127, } impl StorageProofKind { @@ -34,6 +37,7 @@ impl StorageProofKind { Some(match encoded { x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, + x if x == StorageProofKind::KnownQueryPlanAndValues as u8 => StorageProofKind::KnownQueryPlanAndValues, _ => return None, }) } @@ -50,6 +54,15 @@ pub enum MultipleStorageProof { /// See `crate::storage_proof::compact::Flat`. TrieSkipHashes(super::compact::Flat>, PhantomData), + + /// See `crate::storage_proof::query_plan::KnownQueryPlanAndValues`. + /// + /// This variant is temporary to allow producing known query proof over + /// substrate state machine, until it can be configured over a specific + /// proving backend. + /// The fundamental flaw here is that this leads to a partial implementation + /// of the proof verification. + KnownQueryPlanAndValues(super::query_plan::KnownQueryPlanAndValues>), } impl sp_std::fmt::Debug for MultipleStorageProof { @@ -57,6 +70,7 @@ impl sp_std::fmt::Debug for MultipleStorageProof { match self { MultipleStorageProof::Flat(v) => v.fmt(f), MultipleStorageProof::TrieSkipHashes(v, _) => v.fmt(f), + MultipleStorageProof::KnownQueryPlanAndValues(v) => v.fmt(f), } } } @@ -76,6 +90,9 @@ impl Decode for MultipleStorageProof { Decode::decode(value)?, PhantomData, ), + StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::KnownQueryPlanAndValues( + Decode::decode(value)? + ), }) } } @@ -86,6 +103,7 @@ impl Encode for MultipleStorageProof { match self { MultipleStorageProof::Flat(p) => p.encode_to(dest), MultipleStorageProof::TrieSkipHashes(p, _) => p.encode_to(dest), + MultipleStorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), } } } @@ -93,10 +111,13 @@ impl Encode for MultipleStorageProof { impl StorageProof for MultipleStorageProof { fn empty() -> Self { match D::KIND { - StorageProofKind::Flat => + StorageProofKind::Flat => MultipleStorageProof::Flat(super::simple::Flat::empty()), - StorageProofKind::TrieSkipHashes => + StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes(super::compact::Flat::empty(), PhantomData), + StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::KnownQueryPlanAndValues( + super::query_plan::KnownQueryPlanAndValues::empty() + ), } } @@ -105,6 +126,7 @@ impl StorageProof for MultipleStorageProof { match self { MultipleStorageProof::Flat(data) => data.is_empty(), MultipleStorageProof::TrieSkipHashes(data, _) => data.is_empty(), + MultipleStorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), } } } @@ -121,6 +143,7 @@ impl Default for MultipleSyncRecorder { match D::KIND { StorageProofKind::Flat => MultipleSyncRecorder::Flat(Default::default(), PhantomData), StorageProofKind::TrieSkipHashes => MultipleSyncRecorder::Full(Default::default()), + StorageProofKind::KnownQueryPlanAndValues => MultipleSyncRecorder::Full(Default::default()), } } } @@ -152,6 +175,7 @@ impl RegStorageProof for MultipleStorageProof; @@ -171,6 +195,13 @@ impl RegStorageProof for MultipleStorageProof { + if let MultipleSyncRecorder::Full(rec) = recorder { + return Ok(MultipleStorageProof::KnownQueryPlanAndValues( + super::query_plan::KnownQueryPlanAndValues::extract_proof(rec, input)?, + )) + } + }, } Err(missing_pack_input()) } @@ -200,12 +231,25 @@ impl TryInto>> for MultipleStorageProof TryInto>> for MultipleStorageProof { + type Error = super::Error; + + fn try_into(self) -> Result>> { + match self { + MultipleStorageProof::KnownQueryPlanAndValues(p) => Ok(p), + _ => Err(incompatible_type()), + } + } +} + + impl MultipleStorageProof { /// Get kind type for the storage proof variant. pub fn kind(&self) -> StorageProofKind { match self { MultipleStorageProof::Flat(_) => StorageProofKind::Flat, MultipleStorageProof::TrieSkipHashes(_, _) => StorageProofKind::TrieSkipHashes, + MultipleStorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, } } } diff --git a/primitives/trie/src/storage_proof/query_plan.rs b/primitives/trie/src/storage_proof/query_plan.rs index 0fee9c3283356..4d099d23f90bd 100644 --- a/primitives/trie/src/storage_proof/query_plan.rs +++ b/primitives/trie/src/storage_proof/query_plan.rs @@ -23,8 +23,8 @@ use sp_std::marker::PhantomData; /// use to check if there was a change of content. /// This needs to be check for every children proofs, and needs to keep /// trace of every child trie origin. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] -struct KnownQueryPlanAndValues(ChildrenProofMap, PhantomData); +#[derive(Encode, Decode)] +pub struct KnownQueryPlanAndValues(pub(crate) ChildrenProofMap, PhantomData); impl sp_std::fmt::Debug for KnownQueryPlanAndValues { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { @@ -32,6 +32,20 @@ impl sp_std::fmt::Debug for KnownQueryPlanAndValues { } } +impl PartialEq> for KnownQueryPlanAndValues { + fn eq(&self, other: &KnownQueryPlanAndValues) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for KnownQueryPlanAndValues { } + +impl Clone for KnownQueryPlanAndValues { + fn clone(&self) -> Self { + KnownQueryPlanAndValues(self.0.clone(), PhantomData) + } +} + impl StorageProof for KnownQueryPlanAndValues { fn empty() -> Self { KnownQueryPlanAndValues(Default::default(), PhantomData) From 2ec97d59f4719e2e5e92711eede93f24611ec5db Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 5 Jun 2020 13:01:01 +0200 Subject: [PATCH 143/185] into partial merge --- primitives/state-machine/src/backend.rs | 19 ++- primitives/state-machine/src/lib.rs | 4 +- .../state-machine/src/proving_backend.rs | 159 +++++++----------- primitives/state-machine/src/trie_backend.rs | 14 +- primitives/trie/src/lib.rs | 4 +- primitives/trie/src/storage_proof/mod.rs | 53 +++++- primitives/trie/src/storage_proof/multiple.rs | 114 +++++++++++-- 7 files changed, 241 insertions(+), 126 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index d5ebf09ecc755..c992750ee59e6 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,11 +21,13 @@ use hash_db::Hasher; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; use crate::{ - trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, UsageInfo, StorageKey, StorageValue, StorageCollection, }; +/// Access the state of the proof backend of a backend. +pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; + /// A state backend is used to read state data and can have changes committed /// to it. /// @@ -43,7 +45,7 @@ pub trait Backend: std::fmt::Debug { /// storage proof constraint and rename the struct to something that is more build /// related but do not need to be usable as a backend. type StorageProofReg: sp_trie::RegStorageProof - + sp_trie::MergeableStorageProof + + sp_trie::MergeableStorageProof + Into; // TODO EMCH consider removing this conv. /// The actual proof produced. @@ -169,11 +171,18 @@ pub trait Backend: std::fmt::Debug { all } - /// Try convert into trie backend. - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - None + /// Try convert into a proof backend. + /// If one do not want to consume the backend, calling on '&self' is fine + /// since '&Backend' implement 'Backend'. + fn as_proof_backend(self) -> Option { + self.from_reg_state(Default::default()) } + /// Try convert into a proof backend. + /// We can optionally use a previous proof backend to avoid having to merge + /// proof later. + fn from_reg_state(self, previous: ProofRegStateFor) -> Option; + /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c01ac241091ac..ef6d5e1294555 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -45,8 +45,8 @@ mod stats; mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, - LegacyDecodeAdapter, LegacyEncodeAdapter, FlatEncodeAdapter, ProofNodes}; + TrieNodesStorageProof as StorageProof, StorageProof as StorageProofT, StorageProofKind, ChildrenProofMap, + ProofInput, ProofInputKind, ProofNodes}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ae7334e0d4524..6e18bfbb38aa2 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -24,13 +24,14 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProofKind, StorageProof, ProofInputKind, ProofInput, - RecordMapTrieNodes, + record_all_keys, StorageProofKind, TrieNodesStorageProof as StorageProof, ProofInputKind, + ProofInput, RecordMapTrieNodes, }; pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; -use crate::{Error, ExecutionError, Backend, DBValue}; +use crate::{Error, ExecutionError, DBValue}; +use crate::backend::{Backend, ProofRegStateFor}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildrenMap}; /// Patricia trie-based backend specialized in get value proofs. @@ -144,37 +145,31 @@ impl Clone for ProofRecorder { /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - trie_backend: TrieBackend, H>, - previous_input: ProofInput, - proof_kind: StorageProofKind, -} +pub struct ProvingBackend, T: TrieConfiguration> ( + pub TrieBackend, T>, +); /// Trie backend storage with its proof recorder. -pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a S, +pub struct ProofRecorderBackend, H: Hasher> { + backend: S, proof_recorder: ProofRecorder, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> - where H::Out: Codec +impl<'a, S, T> ProvingBackend<&'a S, T> + where + S: TrieBackendStorage, + T: TrieConfiguration, + TrieHash: Codec, { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend, kind: StorageProofKind) -> Self { - let proof_recorder = if kind.is_full_proof_recorder_needed() { - ProofRecorder::Full(Default::default()) - } else { - ProofRecorder::Flat(Default::default()) - }; - Self::new_with_recorder(backend, proof_recorder, kind, ProofInput::None) + pub fn new(backend: &'a TrieBackend) -> Self { + let proof_recorder = Default::default(); + Self::new_with_recorder(backend, proof_recorder) } - /// Create new proving backend with the given recorder. - pub fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: ProofRecorder, - proof_kind: StorageProofKind, - previous_input: ProofInput, + fn new_with_recorder( + backend: &'a TrieBackend, + proof_recorder: ProofRecorder, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -182,84 +177,38 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> backend: essence.backend_storage(), proof_recorder, }; - let trie_backend = if let ProofInputKind::ChildTrieRoots = proof_kind.input_kind_for_processing() { - TrieBackend::new_with_roots(recorder, root) - } else { - TrieBackend::new(recorder, root) - }; - ProvingBackend { - trie_backend, - previous_input, - proof_kind, - } - } - - /// Extracting the gathered unordered proof. - pub fn extract_proof(&mut self) -> Result { - self.update_input()?; - self.trie_backend.essence().backend_storage().proof_recorder - .extract_proof(self.proof_kind, self.previous_input.clone()) - } - - fn update_input(&mut self) -> Result<(), String> { - let input = match self.proof_kind.input_kind_for_processing() { - ProofInputKind::ChildTrieRoots => { - self.trie_backend.extract_registered_roots() - }, - _ => ProofInput::None, - }; - if let Err(e) = self.previous_input.consolidate(input) { - Err(format!( - "{:?} for inputs kind {:?}, {:?}", - e, - self.previous_input.kind(), - ProofInputKind::ChildTrieRoots, - )) - } else { - Ok(()) - } - } - - /// Extract current recording state, this allows using the state back when recording - /// multiple operations. - pub fn recording_state(mut self) -> Result<(ProofRecorder, ProofInput), String> { - self.update_input()?; - Ok(( - self.trie_backend.essence().backend_storage().proof_recorder.clone(), - self.previous_input - )) + ProvingBackend(TrieBackend::new(recorder, root)) } } -impl ProofRecorder +impl ProvingBackend where - H::Out: Codec, + S: TrieBackendStorage, + T: TrieConfiguration, + TrieHash: Codec, { - /// Extracts the gathered unordered encoded trie nodes. - /// Depending on `kind`, encoded trie nodes can change - /// (usually to compact the proof). - pub fn extract_proof( - &self, - kind: StorageProofKind, - input: ProofInput, - ) -> Result { - Ok(match self { - ProofRecorder::Flat(rec) => StorageProof::extract_proof_from_flat( - &*rec.read(), - kind, - &input, - ).map_err(|e| format!("{}", e))?, - ProofRecorder::Full(rec) => StorageProof::extract_proof( - &*rec.read(), - kind, - &input, - ).map_err(|e| format!("{}", e))?, - }) + /// Create new proving backend with the given recorder. + pub fn from_backend_with_recorder( + backend: S, + root: TrieHash, + proof_recorder: ProofRecorder, + ) -> Self { + let recorder = ProofRecorderBackend { + backend, + proof_recorder, + }; + ProvingBackend(TrieBackend::new(recorder, root)) + } + + /// Extract current recording state. + /// This is sharing a rc over a sync reference. + pub fn extract_recorder(&self) -> ProofRecorder { + self.0.backend_storage().proof_recorder.clone() } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage - for ProofRecorderBackend<'a, S, H> +impl, H: Hasher> TrieBackendStorage + for ProofRecorderBackend { type Overlay = S::Overlay; @@ -303,7 +252,9 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> { type Error = String; type Transaction = S::Overlay; - type TrieBackendStorage = S; + type StorageProof = sp_trie::TrieNodesStorageProof; + type ProofRegBackend = Self; + type ProofCheckBackend = TrieBackend, H>; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.trie_backend.storage(key) @@ -390,6 +341,22 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn usage_info(&self) -> crate::stats::UsageInfo { self.trie_backend.usage_info() } + + fn as_proof_backend(self) -> Option { + Some(self) + } + + fn from_reg_state(self, previous_recorder: ProofRegStateFor) -> Option { + let root = self.0.essence().root().clone(); + let storage = self.0.into_storage(); + let current_recorder = storage.proof_recorder; + let backend = storage.backend; + if current_recorder.merge(previous_recorder) { + ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder) + } else { + None + } + } } /// Create flat proof check backend. diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 556075da06f99..9fde606536118 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -22,6 +22,7 @@ use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, ChildrenProofMap, ProofInput}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; +use crate::backend::{ProofRegStateFor}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; use crate::{ @@ -109,7 +110,9 @@ impl, H: Hasher> Backend for TrieBackend where { type Error = String; type Transaction = S::Overlay; - type TrieBackendStorage = S; + type StorageProof = sp_trie::TrieNodesStorageProof; + type ProofRegBackend = crate::proving_backend::ProvingBackend; + type ProofCheckBackend = TrieBackend, H>; fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) @@ -265,8 +268,13 @@ impl, H: Hasher> Backend for TrieBackend where (root, is_default, write_overlay) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - Some(self) + fn from_reg_state(self, recorder: ProofRegStateFor) -> Option { + let root = self.essence.root().clone(); + Some(crate::proving_backend::ProvingBackend::from_backend_with_recorder( + self.essence.into_storage(), + root, + recorder, + )) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 72493fa6ae0f8..1e31984cfd110 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -35,7 +35,9 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, - Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes}; + Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, + BackendStorageProof, MergeableStorageProof, + multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 301b53747baa9..aa31740f1042b 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -6,8 +6,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use sp_std::collections::{btree_map::BTreeMap, btree_map}; +use sp_std::collections::{btree_map::BTreeMap, btree_map, btree_map::Entry}; use sp_std::collections::btree_set::BTreeSet; +#[cfg(feature = "std")] +use std::collections::hash_map::Entry as HEntry; use sp_std::vec::Vec; use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; use hash_db::{Hasher, HashDBRef}; @@ -262,6 +264,8 @@ pub trait RecordBackend: Clone + Default { fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option>; /// Record the actual value. fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option); + /// Merge two record, can fail. + fn merge(&mut self, other: Self) -> bool; } #[cfg(feature = "std")] @@ -291,6 +295,35 @@ impl RecordBackend for Fu .or_default() .insert(key.clone(), value.clone()); } + + fn merge(&mut self, other: Self) -> bool { + let mut first = self.0.write(); + let mut second = other.0.write(); + for (child_info, other) in std::mem::replace(&mut *second, Default::default()) { + match first.entry(child_info) { + Entry::Occupied(mut entry) => { + for (key, value) in other.0 { + match entry.get_mut().entry(key) { + HEntry::Occupied(entry) => { + if entry.get() != &value { + return false; + } + }, + HEntry::Vacant(entry) => { + entry.insert(value); + }, + } + } + }, + Entry::Vacant(entry) => { + entry.insert(other); + }, + } + } + true + } + + } #[cfg(feature = "std")] @@ -302,6 +335,24 @@ impl RecordBackend for Fl fn record(&self, _child_info: &ChildInfo, key: &Hash, value: Option) { self.0.write().insert(key.clone(), value.clone()); } + + fn merge(&mut self, other: Self) -> bool { + let mut first = self.0.write(); + let mut second = other.0.write(); + for (key, value) in std::mem::replace(&mut *second, Default::default()).0 { + match first.entry(key) { + HEntry::Occupied(entry) => { + if entry.get() != &value { + return false; + } + }, + HEntry::Vacant(entry) => { + entry.insert(value); + }, + } + } + true + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 54d3d396ac1d8..4b5aef7adb02b 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -16,7 +16,9 @@ use sp_std::marker::PhantomData; /// This definition is used as input parameter when producing /// a storage proof. /// Some kind are reserved for test or internal use and will -/// not be usable when decoding proof. +/// not be usable when decoding proof, those could be remove +/// when substrate will be able to define custom state-machine +/// backend. #[repr(u8)] #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum StorageProofKind { @@ -26,6 +28,9 @@ pub enum StorageProofKind { /// Kind for `MultipleStorageProof::TrieSkipHashes`. TrieSkipHashes = 2, + /// Kind for `MultipleStorageProof::FullForMerge`. + FullForMerge = 126, + /// Kind for `MultipleStorageProof::QueryPlan`. KnownQueryPlanAndValues = 127, } @@ -37,6 +42,7 @@ impl StorageProofKind { Some(match encoded { x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, + x if x == StorageProofKind::FullForMerge as u8 => StorageProofKind::FullForMerge, x if x == StorageProofKind::KnownQueryPlanAndValues as u8 => StorageProofKind::KnownQueryPlanAndValues, _ => return None, }) @@ -55,6 +61,15 @@ pub enum MultipleStorageProof { /// See `crate::storage_proof::compact::Flat`. TrieSkipHashes(super::compact::Flat>, PhantomData), + /// See `crate::storage_proof::compact::FullForMerge`. + /// + /// This variant is temporary to allow producing known query proof over + /// substrate state machine, until it can be configured over a specific + /// proving backend. + /// The fundamental flaw here is that this leads to a partial implementation + /// of the proof verification. + FullForMerge(super::compact::FullForMerge), + /// See `crate::storage_proof::query_plan::KnownQueryPlanAndValues`. /// /// This variant is temporary to allow producing known query proof over @@ -70,6 +85,7 @@ impl sp_std::fmt::Debug for MultipleStorageProof { match self { MultipleStorageProof::Flat(v) => v.fmt(f), MultipleStorageProof::TrieSkipHashes(v, _) => v.fmt(f), + MultipleStorageProof::FullForMerge(v) => v.fmt(f), MultipleStorageProof::KnownQueryPlanAndValues(v) => v.fmt(f), } } @@ -90,6 +106,7 @@ impl Decode for MultipleStorageProof { Decode::decode(value)?, PhantomData, ), + StorageProofKind::FullForMerge => MultipleStorageProof::FullForMerge(Decode::decode(value)?), StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::KnownQueryPlanAndValues( Decode::decode(value)? ), @@ -103,6 +120,7 @@ impl Encode for MultipleStorageProof { match self { MultipleStorageProof::Flat(p) => p.encode_to(dest), MultipleStorageProof::TrieSkipHashes(p, _) => p.encode_to(dest), + MultipleStorageProof::FullForMerge(p) => p.encode_to(dest), MultipleStorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), } } @@ -115,6 +133,8 @@ impl StorageProof for MultipleStorageProof { MultipleStorageProof::Flat(super::simple::Flat::empty()), StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes(super::compact::Flat::empty(), PhantomData), + StorageProofKind::FullForMerge => + MultipleStorageProof::FullForMerge(super::compact::FullForMerge::empty()), StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::KnownQueryPlanAndValues( super::query_plan::KnownQueryPlanAndValues::empty() ), @@ -126,6 +146,7 @@ impl StorageProof for MultipleStorageProof { match self { MultipleStorageProof::Flat(data) => data.is_empty(), MultipleStorageProof::TrieSkipHashes(data, _) => data.is_empty(), + MultipleStorageProof::FullForMerge(data) => data.is_empty(), MultipleStorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), } } @@ -134,17 +155,33 @@ impl StorageProof for MultipleStorageProof { #[cfg(feature = "std")] #[derive(Clone)] pub enum MultipleSyncRecorder { - Flat(super::FlatSyncRecorder, PhantomData), - Full(super::FullSyncRecorder), + Flat(super::FlatSyncRecorder, StorageProofKind, PhantomData), + Full(super::FullSyncRecorder, StorageProofKind), +} + +impl MultipleSyncRecorder { + /// Instantiate a recorder of a given type. + pub fn new_recorder(kind: StorageProofKind) -> Self { + match kind { + StorageProofKind::Flat => MultipleSyncRecorder::Flat(Default::default(), D::KIND, PhantomData), + StorageProofKind::TrieSkipHashes => MultipleSyncRecorder::Full(Default::default(), D::KIND), + StorageProofKind::FullForMerge => MultipleSyncRecorder::Full(Default::default(), D::KIND), + StorageProofKind::KnownQueryPlanAndValues => MultipleSyncRecorder::Full(Default::default(), D::KIND), + } + } + + /// Targetted storage proof kind. + pub fn target(&self) -> StorageProofKind { + match self { + MultipleSyncRecorder::Flat(_, k, _) => *k, + MultipleSyncRecorder::Full(_, k) => *k, + } + } } impl Default for MultipleSyncRecorder { fn default() -> Self { - match D::KIND { - StorageProofKind::Flat => MultipleSyncRecorder::Flat(Default::default(), PhantomData), - StorageProofKind::TrieSkipHashes => MultipleSyncRecorder::Full(Default::default()), - StorageProofKind::KnownQueryPlanAndValues => MultipleSyncRecorder::Full(Default::default()), - } + Self::new_recorder(D::KIND) } } @@ -152,15 +189,38 @@ impl Default for MultipleSyncRecorder { impl RecordBackend for MultipleSyncRecorder { fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { match self { - MultipleSyncRecorder::Flat(rec, _) => rec.get(child_info, key), - MultipleSyncRecorder::Full(rec) => rec.get(child_info, key), + MultipleSyncRecorder::Flat(rec, _ ,_) => rec.get(child_info, key), + MultipleSyncRecorder::Full(rec, _) => rec.get(child_info, key), } } fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option) { match self { - MultipleSyncRecorder::Flat(rec, _) => rec.record(child_info, key, value), - MultipleSyncRecorder::Full(rec) => rec.record(child_info, key, value), + MultipleSyncRecorder::Flat(rec, _, _) => rec.record(child_info, key, value), + MultipleSyncRecorder::Full(rec, _) => rec.record(child_info, key, value), + } + } + + fn merge(&mut self, other: Self) -> bool { + match self { + MultipleSyncRecorder::Flat(rec, _, _) => { + match other { + MultipleSyncRecorder::Flat(oth, _, _) => { + rec.merge(oth); + true + }, + _ => false + } + }, + MultipleSyncRecorder::Full(rec, _) => { + match other { + MultipleSyncRecorder::Full(oth, _) => { + rec.merge(oth); + true + }, + _ => false, + } + }, } } } @@ -170,7 +230,7 @@ impl RecordBack impl RegStorageProof for MultipleStorageProof where Hash: Hasher + 'static, - Hash::Out: Decode, + Hash::Out: Codec, D: DefaultKind, { // Actually one could ignore this if he knows its type to be non compact. @@ -181,22 +241,29 @@ impl RegStorageProof for MultipleStorageProof; fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { - match D::KIND { + match recorder.target() { StorageProofKind::Flat => { - if let MultipleSyncRecorder::Flat(rec, _) = recorder { + if let MultipleSyncRecorder::Flat(rec, _, _) = recorder { return Ok(MultipleStorageProof::Flat(super::simple::Flat::extract_proof(rec, input)?)) } }, StorageProofKind::TrieSkipHashes => { - if let MultipleSyncRecorder::Full(rec) = recorder { + if let MultipleSyncRecorder::Full(rec, _) = recorder { return Ok(MultipleStorageProof::TrieSkipHashes( super::compact::Flat::extract_proof(rec, input)?, PhantomData, )) } }, + StorageProofKind::FullForMerge => { + if let MultipleSyncRecorder::Full(rec, _) = recorder { + return Ok(MultipleStorageProof::FullForMerge( + super::compact::FullForMerge::extract_proof(rec, input)?, + )) + } + }, StorageProofKind::KnownQueryPlanAndValues => { - if let MultipleSyncRecorder::Full(rec) = recorder { + if let MultipleSyncRecorder::Full(rec, _) = recorder { return Ok(MultipleStorageProof::KnownQueryPlanAndValues( super::query_plan::KnownQueryPlanAndValues::extract_proof(rec, input)?, )) @@ -231,6 +298,17 @@ impl TryInto>> for MultipleStorageProof TryInto for MultipleStorageProof { + type Error = super::Error; + + fn try_into(self) -> Result { + match self { + MultipleStorageProof::FullForMerge(p) => Ok(p), + _ => Err(incompatible_type()), + } + } +} + impl TryInto>> for MultipleStorageProof { type Error = super::Error; @@ -242,13 +320,13 @@ impl TryInto>> for Mu } } - impl MultipleStorageProof { /// Get kind type for the storage proof variant. pub fn kind(&self) -> StorageProofKind { match self { MultipleStorageProof::Flat(_) => StorageProofKind::Flat, MultipleStorageProof::TrieSkipHashes(_, _) => StorageProofKind::TrieSkipHashes, + MultipleStorageProof::FullForMerge(_) => StorageProofKind::FullForMerge, MultipleStorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, } } From f425fd5588ca7fac4fc612a811bb98a311ea5f7c Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 5 Jun 2020 15:27:50 +0200 Subject: [PATCH 144/185] in progress --- primitives/state-machine/src/backend.rs | 7 +- primitives/state-machine/src/lib.rs | 289 ++++++------------ .../state-machine/src/proving_backend.rs | 136 ++++----- primitives/state-machine/src/trie_backend.rs | 6 +- .../state-machine/src/trie_backend_essence.rs | 8 + primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof/mod.rs | 5 +- primitives/trie/src/storage_proof/multiple.rs | 8 + primitives/trie/src/storage_proof/simple.rs | 4 +- 9 files changed, 175 insertions(+), 290 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index c992750ee59e6..d15f7023d2878 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -24,6 +24,7 @@ use crate::{ trie_backend_essence::TrieBackendStorage, UsageInfo, StorageKey, StorageValue, StorageCollection, }; +use sp_trie::ProofInput; /// Access the state of the proof backend of a backend. pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; @@ -53,10 +54,10 @@ pub trait Backend: std::fmt::Debug { // + sp_trie::WithRegStorageProof; /// Type of proof backend. - type ProofRegBackend: ProofRegBackend; + type ProofRegBackend: ProofRegBackend; /// Type of proof backend. - type ProofCheckBackend: ProofCheckBackend; + type ProofCheckBackend: ProofCheckBackend; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result, Self::Error>; @@ -275,7 +276,7 @@ pub trait ProofRegBackend: crate::backend::Backend type State: Default + Send + Sync + Clone; /// Extract proof when run. - fn extract_proof(&self) -> Self::StorageProof; + fn extract_proof(&self, input: ProofInput) -> Self::StorageProofReg; } /// Backend used to produce proof. diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ef6d5e1294555..23568bfb97ffc 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -51,7 +51,6 @@ pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; pub use ext::Ext; -pub use backend::Backend; pub use changes_trie::{ AnchorBlockId as ChangesTrieAnchorBlockId, State as ChangesTrieState, @@ -71,7 +70,7 @@ pub use overlayed_changes::{ OverlayedChanges, StorageChanges, StorageTransactionCache, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; -pub use proving_backend::{ProofRecorder, ProvingBackend, ProvingBackendRecorder, +pub use proving_backend::{ProvingBackend, ProvingBackendRecorder, create_proof_check_backend, create_flat_proof_check_backend}; pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; @@ -80,6 +79,8 @@ pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use sp_core::traits::CloneableSpawn; +use backend::{Backend, ProofRegBackend, ProofCheckBackend}; + type CallResult = Result, E>; /// Default handler of the execution manager. @@ -462,7 +463,7 @@ pub fn prove_execution( call_data: &[u8], kind: StorageProofKind, runtime_code: &RuntimeCode, -) -> Result<(Vec, StorageProof), Box> +) -> Result<(Vec, B::StorageProof), Box> where B: Backend, H: Hasher, @@ -470,10 +471,10 @@ where Exec: CodeExecutor + Clone + 'static, N: crate::changes_trie::BlockNumber, { - let trie_backend = backend.as_trie_backend() + let proof_backend = backend.as_proof_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _>( - trie_backend, + prove_execution_on_proof_backend::<_, _, N, _>( + &proof_backend, overlay, exec, spawn_handle, @@ -493,8 +494,8 @@ where /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. -pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_execution_on_proof_backend( + proving_backend: &P, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -502,44 +503,40 @@ pub fn prove_execution_on_trie_backend( call_data: &[u8], kind: StorageProofKind, runtime_code: &RuntimeCode, -) -> Result<(Vec, StorageProof), Box> +) -> Result<(Vec, P::StorageProof), Box> where - S: trie_backend_essence::TrieBackendStorage, + P: ProofRegBackend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, N: crate::changes_trie::BlockNumber, { let mut offchain_overlay = OffchainOverlayedChanges::default(); - let mut proving_backend = proving_backend::ProvingBackend::new(trie_backend, kind); - let result = { - let mut sm = StateMachine::<_, H, N, Exec>::new( - &proving_backend, - None, - overlay, - &mut offchain_overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); + let mut sm = StateMachine::<_, H, N, Exec>::new( + proving_backend, + None, + overlay, + &mut offchain_overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )? - }; - let proof = proving_backend.extract_proof() - .map_err(|e| Box::new(e) as Box)?; + let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )?; + let proof = sm.backend.extract_proof(); Ok((result.into_encoded(), proof)) } /// Check execution proof, generated by `prove_execution` call. -pub fn execution_proof_check( +pub fn execution_proof_check( root: H::Out, - proof: StorageProof, + proof: P::StorageProof, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -548,81 +545,27 @@ pub fn execution_proof_check( runtime_code: &RuntimeCode, ) -> Result, Box> where + P: ProofCheckBackend, H: Hasher, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, N: crate::changes_trie::BlockNumber, { - match proof.kind().use_full_partial_db() { - Some(true) => { - let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _>( - &trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) - }, - Some(false) => { - let trie_backend = create_flat_proof_check_backend::(root.into(), proof)?; - execution_flat_proof_check_on_trie_backend::<_, N, _>( - &trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) - }, - None => { - return Err(Box::new("This kind of proof need to use a verify method")); - }, - } -} - -/// Check execution proof on proving backend, generated by `prove_execution` call. -fn execution_flat_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Box, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, -) -> Result, Box> -where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, -{ - let mut offchain_overlay = OffchainOverlayedChanges::default(); - let mut sm = StateMachine::<_, H, N, Exec>::new( - trie_backend, - None, + let trie_backend = P::create_proof_check_backend(root.into(), proof)?; + execution_proof_check_on_proof_backend::( + &trie_backend, overlay, - &mut offchain_overlay, exec, + spawn_handle, method, call_data, - Extensions::default(), runtime_code, - spawn_handle, - ); - - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_untrusted_wasm(), - None, - ).map(NativeOrEncoded::into_encoded) + ) } /// Check execution proof on proving backend, generated by `prove_execution` call. -pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend>, H>, +pub fn execution_proof_check_on_proof_backend( + proof_backend: &B, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -631,6 +574,7 @@ pub fn execution_proof_check_on_trie_backend( runtime_code: &RuntimeCode, ) -> Result, Box> where + B: Backend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, @@ -638,7 +582,7 @@ where { let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut sm = StateMachine::<_, H, N, Exec>::new( - trie_backend, + proof_backend, None, overlay, &mut offchain_overlay, @@ -658,10 +602,10 @@ where /// Generate storage read proof. pub fn prove_read( - mut backend: B, + backend: B, keys: I, kind: StorageProofKind, -) -> Result> +) -> Result> where B: Backend, H: Hasher, @@ -669,20 +613,20 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() + let proof_backend = backend.as_proof_backend() .ok_or_else( || Box::new(ExecutionError::UnableToGenerateProof) as Box )?; - prove_read_on_trie_backend(trie_backend, keys, kind) + prove_read_on_proof_backend(&proof_backend, keys, kind) } /// Generate child storage read proof. pub fn prove_child_read( - mut backend: B, + backend: B, child_info: &ChildInfo, keys: I, kind: StorageProofKind, -) -> Result> +) -> Result> where B: Backend, H: Hasher, @@ -690,187 +634,124 @@ where I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() + let proving_backend = backend.as_proof_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, child_info, keys, kind) + prove_child_read_on_proof_backend(&proving_backend, child_info, keys, kind) } /// Generate storage read proof on pre-created trie backend. -pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_read_on_proof_backend( + proving_backend: &P, keys: I, kind: StorageProofKind, -) -> Result> +) -> Result> where - S: trie_backend_essence::TrieBackendStorage, + P: ProofRegBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let mut proving_backend = proving_backend::ProvingBackend::<_, H>::new( - trie_backend, - kind, - ); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof() - .map_err(|e| Box::new(e) as Box)?) + Ok(proving_backend.extract_proof()) } /// Generate storage read proof on pre-created trie backend. -pub fn prove_child_read_on_trie_backend( - trie_backend: &TrieBackend, +pub fn prove_child_read_on_proof_backend( + proving_backend: &P, child_info: &ChildInfo, keys: I, kind: StorageProofKind, -) -> Result> +) -> Result> where - S: trie_backend_essence::TrieBackendStorage, + P: ProofRegBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let mut proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend, kind); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof() - .map_err(|e| Box::new(e) as Box)?) + Ok(proving_backend.extract_proof()) } /// Check storage read proof, generated by `prove_read` call. -/// WARNING this method rebuild a full memory backend and should -/// be call only once per proof checks. -pub fn read_proof_check( +pub fn read_proof_check( root: H::Out, - proof: StorageProof, + proof: P::StorageProof, keys: I, ) -> Result, Option>>, Box> where + P: ProofCheckBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { + let proving_backend = P::create_proof_check_backend(root, proof)?; let mut result = HashMap::new(); - match proof.kind().use_full_partial_db() { - Some(true) => { - let proving_backend = create_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); - } - }, - Some(false) => { - let proving_backend = create_flat_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_proof_check_on_flat_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); - } - }, - None => { - return Err(Box::new("This kind of proof need to use a verify method")); - }, + for key in keys.into_iter() { + let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); } Ok(result) } /// Check child storage read proof, generated by `prove_child_read` call. -pub fn read_child_proof_check( +pub fn read_child_proof_check( root: H::Out, - proof: StorageProof, + proof: P::StorageProof, child_info: &ChildInfo, keys: I, ) -> Result, Option>>, Box> where + P: ProofCheckBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { + let proving_backend = P::create_proof_check_backend(root, proof)?; let mut result = HashMap::new(); - match proof.kind().use_full_partial_db() { - Some(true) => { - let proving_backend = create_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_child_proof_check_on_proving_backend( - &proving_backend, - child_info, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); - } - }, - Some(false) => { - let proving_backend = create_flat_proof_check_backend::(root, proof)?; - for key in keys.into_iter() { - let value = read_child_proof_check_on_flat_proving_backend( - &proving_backend, - child_info, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); - } - }, - None => { - return Err(Box::new("This kind of proof need to use a verify method")); - }, + for key in keys.into_iter() { + let value = read_child_proof_check_on_proving_backend( + &proving_backend, + child_info, + key.as_ref(), + )?; + result.insert(key.as_ref().to_vec(), value); } Ok(result) } -/// Check storage read proof on pre-created flat proving backend. -pub fn read_proof_check_on_flat_proving_backend( - proving_backend: &TrieBackend, H>, - key: &[u8], -) -> Result>, Box> -where - H: Hasher, - H::Out: Ord + Codec, -{ - proving_backend.storage(key).map_err(|e| Box::new(e) as Box) -} - /// Check storage read proof on pre-created proving backend. -pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend>, H>, +pub fn read_proof_check_on_proving_backend( + proving_backend: &B, key: &[u8], ) -> Result>, Box> where + B: Backend, H: Hasher, H::Out: Ord + Codec, { proving_backend.storage(key).map_err(|e| Box::new(e) as Box) } -/// Check child storage read proof on pre-created flat proving backend. -fn read_child_proof_check_on_flat_proving_backend( - proving_backend: &TrieBackend, H>, - child_info: &ChildInfo, - key: &[u8], -) -> Result>, Box> -where - H: Hasher, - H::Out: Ord + Codec, -{ - proving_backend.child_storage(child_info, key) - .map_err(|e| Box::new(e) as Box) -} - /// Check child storage read proof on pre-created proving backend. -fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend>, H>, +pub fn read_child_proof_check_on_proving_backend( + proving_backend: &B, child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> where + B: Backend, H: Hasher, H::Out: Ord + Codec, { @@ -887,6 +768,12 @@ mod tests { use super::changes_trie::Configuration as ChangesTrieConfig; use sp_core::{map, traits::{Externalities, RuntimeCode}}; use sp_runtime::traits::BlakeTwo256; + use sp_trie::Layout; + + type ProvingBackend = super::TrieBackend< + MemoryDB, + Layout, + >; #[derive(Clone)] struct DummyCodeExecutor { diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6e18bfbb38aa2..325e27d891eb2 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -25,14 +25,15 @@ use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, StorageProofKind, TrieNodesStorageProof as StorageProof, ProofInputKind, - ProofInput, RecordMapTrieNodes, + ProofInput, RecordMapTrieNodes, RecordBackend, RegStorageProof, ProofFlatDefault, }; pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, DBValue}; -use crate::backend::{Backend, ProofRegStateFor}; +use crate::backend::{Backend, ProofRegStateFor, ProofRegBackend}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildrenMap}; +use std::marker::PhantomData; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -114,62 +115,35 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> } } -/// A type that records all accessed trie nodes. -pub enum ProofRecorder { - /// Records are separated by child trie, this is needed for - /// proof compaction. - Full(Arc>>>), - /// Single storage for all recoded nodes (as in - /// state db column). - /// That this variant exists only for performance - /// (on less map access than in `Full`), but is not strictly - /// necessary. - Flat(Arc>>), -} - -impl Default for ProofRecorder { - fn default() -> Self { - // Default to flat proof. - ProofRecorder::Flat(Default::default()) - } -} - -impl Clone for ProofRecorder { - fn clone(&self) -> Self { - match self { - ProofRecorder::Full(a) => ProofRecorder::Full(a.clone()), - ProofRecorder::Flat(a) => ProofRecorder::Flat(a.clone()), - } - } -} - /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend, T: TrieConfiguration> ( - pub TrieBackend, T>, +pub struct ProvingBackend, H: Hasher, R: RecordBackend> ( + pub TrieBackend, H>, ); /// Trie backend storage with its proof recorder. -pub struct ProofRecorderBackend, H: Hasher> { +pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { backend: S, - proof_recorder: ProofRecorder, + proof_recorder: R, + _ph: PhantomData, } -impl<'a, S, T> ProvingBackend<&'a S, T> +impl<'a, S, H, R> ProvingBackend<&'a S, H, R> where - S: TrieBackendStorage, - T: TrieConfiguration, - TrieHash: Codec, + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, + R: RecordBackend, { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { + pub fn new(backend: &'a TrieBackend) -> Self { let proof_recorder = Default::default(); Self::new_with_recorder(backend, proof_recorder) } fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: ProofRecorder, + backend: &'a TrieBackend, + proof_recorder: R, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -181,17 +155,18 @@ impl<'a, S, T> ProvingBackend<&'a S, T> } } -impl ProvingBackend +impl ProvingBackend where - S: TrieBackendStorage, - T: TrieConfiguration, - TrieHash: Codec, + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, + R: RecordBackend, { /// Create new proving backend with the given recorder. pub fn from_backend_with_recorder( backend: S, - root: TrieHash, - proof_recorder: ProofRecorder, + root: H::Out, + proof_recorder: R, ) -> Self { let recorder = ProofRecorderBackend { backend, @@ -202,57 +177,62 @@ impl ProvingBackend /// Extract current recording state. /// This is sharing a rc over a sync reference. - pub fn extract_recorder(&self) -> ProofRecorder { + pub fn extract_recorder(&self) -> R { self.0.backend_storage().proof_recorder.clone() } } -impl, H: Hasher> TrieBackendStorage - for ProofRecorderBackend +impl, H: Hasher, R: RecordBackend> TrieBackendStorage + for ProofRecorderBackend { type Overlay = S::Overlay; fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { - match &self.proof_recorder { - ProofRecorder::Flat(rec) => { - if let Some(v) = rec.read().get(key) { - return Ok(v.clone()); - } - let backend_value = self.backend.get(child_info, key, prefix)?; - rec.write().insert(key.clone(), backend_value.clone()); - Ok(backend_value) - }, - ProofRecorder::Full(rec) => { - if let Some(v) = rec.read().get(child_info).and_then(|s| s.get(key)) { - return Ok(v.clone()); - } - let backend_value = self.backend.get(child_info, key, prefix)?; - rec.write().entry(child_info.clone()) - .or_default() - .insert(key.clone(), backend_value.clone()); - Ok(backend_value) - }, + if let Some(v) = self.proof_recorder.get(child_info, key) { + return Ok(v.clone()); } + let backend_value = self.backend.get(child_info, key, prefix)?; + self.proof_recorder.record(child_info, key, backend_value.clone()); + Ok(backend_value) } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug - for ProvingBackend<'a, S, H> +impl, H: Hasher, R: RecordBackend> std::fmt::Debug + for ProvingBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ProvingBackend") } } -impl<'a, S, H> Backend for ProvingBackend<'a, S, H> +impl ProofRegBackend for ProvingBackend where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, + S: TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + R: RegStorageProof, +{ + type State = R::RecordBackend; + + fn extract_proof(&self, input: ProofInput) -> Self::StorageProofReg { + R::extract_proof( + &self.0.essence().backend_storage().proof_recorder, + input, + ) + } +} + +impl Backend for ProvingBackend + where + S: TrieBackendStorage, + H: Hasher, H::Out: Ord + Codec, + R: RegStorageProof, { type Error = String; type Transaction = S::Overlay; - type StorageProof = sp_trie::TrieNodesStorageProof; + type StorageProofReg = R; + type StorageProof = StorageProof; type ProofRegBackend = Self; type ProofCheckBackend = TrieBackend, H>; @@ -362,7 +342,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> /// Create flat proof check backend. pub fn create_flat_proof_check_backend( root: H::Out, - proof: StorageProof, + proof: StorageProof, ) -> Result, H>, Box> where H: Hasher, @@ -380,7 +360,7 @@ where /// Create proof check backend. pub fn create_proof_check_backend( root: H::Out, - proof: StorageProof, + proof: StorageProof, ) -> Result>, H>, Box> where H: Hasher, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 9fde606536118..48ebd351fdf00 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -22,6 +22,7 @@ use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, ChildrenProofMap, ProofInput}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; +use sp_trie::RegStorageProof; use crate::backend::{ProofRegStateFor}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; @@ -110,8 +111,9 @@ impl, H: Hasher> Backend for TrieBackend where { type Error = String; type Transaction = S::Overlay; - type StorageProof = sp_trie::TrieNodesStorageProof; - type ProofRegBackend = crate::proving_backend::ProvingBackend; + type StorageProof = sp_trie::TrieNodesStorageProof; + type StorageProofReg = sp_trie::TrieNodesStorageProof; + type ProofRegBackend = crate::proving_backend::ProvingBackend>::RecordBackend>; type ProofCheckBackend = TrieBackend, H>; fn storage(&self, key: &[u8]) -> Result, Self::Error> { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0308d0a842089..05233e4d9ee0a 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -426,6 +426,14 @@ pub trait TrieBackendStorage: Send + Sync { fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String>; } +impl<'a, H: Hasher, S: TrieBackendStorage> TrieBackendStorage for &'a S { + type Overlay = S::Overlay; + + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + >::get(self, child_info, key, prefix) + } +} + // This implementation is used by normal storage trie clients. impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 1e31984cfd110..d106170e81e2b 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,7 +36,7 @@ pub use trie_stream::TrieStream; pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, - BackendStorageProof, MergeableStorageProof, + BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index aa31740f1042b..e5d79a1928c2c 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -259,10 +259,11 @@ pub trait CheckableStorageProof: Codec + StorageProof { /// TODO EMCH consider using &mut and change reg storage (consume) proof /// to implement without rc & sync, and encapsulate from calling /// code. -pub trait RecordBackend: Clone + Default { +pub trait RecordBackend: Sync + Send + Clone + Default { /// Access recorded value, allow using the backend as a cache. fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option>; /// Record the actual value. + /// TODO EMCH switch to all ref or all value for param. fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option); /// Merge two record, can fail. fn merge(&mut self, other: Self) -> bool; @@ -322,8 +323,6 @@ impl RecordBackend for Fu } true } - - } #[cfg(feature = "std")] diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 4b5aef7adb02b..a75f72f8d2309 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -96,6 +96,14 @@ pub trait DefaultKind: 'static + Clone { const KIND: StorageProofKind; } +/// Default the multiple proof to flat. +#[derive(Clone, Copy)] +pub struct FlatDefault; + +impl DefaultKind for FlatDefault { + const KIND: StorageProofKind = StorageProofKind::Flat; +} + impl Decode for MultipleStorageProof { fn decode(value: &mut I) -> CodecResult { let kind = value.read_byte()?; diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index f34107287c70d..c5b54d46c1e04 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -74,7 +74,7 @@ impl MergeableStorageProof for Full { // TODO EMCH can remove Default bound with manual impl on recorder #[cfg(feature = "std")] -impl RegStorageProof for Flat { +impl RegStorageProof for Flat { const INPUT_KIND: InputKind = InputKind::None; type RecordBackend = super::FlatSyncRecorder; @@ -89,7 +89,7 @@ impl RegStorageProof for } #[cfg(feature = "std")] -impl RegStorageProof for Full { +impl RegStorageProof for Full { const INPUT_KIND: InputKind = InputKind::None; type RecordBackend = super::FullSyncRecorder; From 719d2d7cbbcab8763272d5a0a77753940db50383 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 5 Jun 2020 17:05:16 +0200 Subject: [PATCH 145/185] in progress --- .../state-machine/src/proving_backend.rs | 24 +++++++++---------- primitives/state-machine/src/trie_backend.rs | 13 +++++++++- primitives/trie/src/storage_proof/compact.rs | 2 +- primitives/trie/src/storage_proof/mod.rs | 4 ++-- primitives/trie/src/storage_proof/multiple.rs | 4 ++-- primitives/trie/src/storage_proof/simple.rs | 4 ++-- 6 files changed, 30 insertions(+), 21 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 325e27d891eb2..dcfbe6c45237e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -117,8 +117,8 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend, H: Hasher, R: RecordBackend> ( - pub TrieBackend, H>, +pub struct ProvingBackend, H: Hasher, R: RegStorageProof> ( + pub TrieBackend, H>, ); /// Trie backend storage with its proof recorder. @@ -133,7 +133,7 @@ impl<'a, S, H, R> ProvingBackend<&'a S, H, R> S: TrieBackendStorage, H: Hasher, H::Out: Codec, - R: RecordBackend, + R: RegStorageProof, { /// Create new proving backend. pub fn new(backend: &'a TrieBackend) -> Self { @@ -160,7 +160,7 @@ impl ProvingBackend S: TrieBackendStorage, H: Hasher, H::Out: Codec, - R: RecordBackend, + R: RegStorageProof, { /// Create new proving backend with the given recorder. pub fn from_backend_with_recorder( @@ -197,7 +197,7 @@ impl, H: Hasher, R: RecordBackend> TrieBackendS } } -impl, H: Hasher, R: RecordBackend> std::fmt::Debug +impl, H: Hasher, R: RegStorageProof> std::fmt::Debug for ProvingBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -205,33 +205,31 @@ impl, H: Hasher, R: RecordBackend> std::fmt::De } } -impl ProofRegBackend for ProvingBackend +impl ProofRegBackend for ProvingBackend> where S: TrieBackendStorage, - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, - R: RegStorageProof, { - type State = R::RecordBackend; + type State = as RegStorageProof>::RecordBackend; fn extract_proof(&self, input: ProofInput) -> Self::StorageProofReg { - R::extract_proof( + StorageProof::::extract_proof( &self.0.essence().backend_storage().proof_recorder, input, ) } } -impl Backend for ProvingBackend +impl Backend for ProvingBackend> where S: TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, - R: RegStorageProof, { type Error = String; type Transaction = S::Overlay; - type StorageProofReg = R; + type StorageProofReg = StorageProof; type StorageProof = StorageProof; type ProofRegBackend = Self; type ProofCheckBackend = TrieBackend, H>; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 48ebd351fdf00..190dbd086fcea 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -27,9 +27,10 @@ use crate::backend::{ProofRegStateFor}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; use crate::{ - StorageKey, StorageValue, Backend, + StorageKey, StorageValue, Backend, backend::ProofCheckBackend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; +use sp_trie::MemoryDB; use parking_lot::RwLock; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -290,6 +291,16 @@ impl, H: Hasher> Backend for TrieBackend where } } +impl ProofCheckBackend for TrieBackend, H> where + H::Out: Ord + Codec, +{ + fn create_proof_check_backend( + root: H::Out, + proof: Self::StorageProof, + ) -> Result> { + } +} + #[cfg(test)] pub mod tests { use std::{collections::HashSet, iter}; diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 16e3b58d0ef4c..3417c0a57fd86 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -189,7 +189,7 @@ impl RegStorageProof> for Full #[cfg(feature = "std")] impl RegStorageProof for FullForMerge where - Hash: Default + Eq + Clone + Encode + sp_std::hash::Hash, + Hash: Default + Eq + Clone + Encode + sp_std::hash::Hash + Send + Sync, { const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index e5d79a1928c2c..14144281dc488 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -286,7 +286,7 @@ pub struct FlatSyncRecorder(Arc>>); #[cfg(feature = "std")] -impl RecordBackend for FullSyncRecorder { +impl RecordBackend for FullSyncRecorder { fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { self.0.read().get(child_info).and_then(|s| (**s).get(&key).cloned()) } @@ -326,7 +326,7 @@ impl RecordBackend for Fu } #[cfg(feature = "std")] -impl RecordBackend for FlatSyncRecorder { +impl RecordBackend for FlatSyncRecorder { fn get(&self, _child_info: &ChildInfo, key: &Hash) -> Option> { (**self.0.read()).get(&key).cloned() } diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index a75f72f8d2309..0d1820eaeee29 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -92,7 +92,7 @@ impl sp_std::fmt::Debug for MultipleStorageProof { } /// Allow to use specific kind of proof by default. -pub trait DefaultKind: 'static + Clone { +pub trait DefaultKind: 'static + Clone + Send + Sync { const KIND: StorageProofKind; } @@ -194,7 +194,7 @@ impl Default for MultipleSyncRecorder { } #[cfg(feature = "std")] -impl RecordBackend for MultipleSyncRecorder { +impl RecordBackend for MultipleSyncRecorder { fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { match self { MultipleSyncRecorder::Flat(rec, _ ,_) => rec.get(child_info, key), diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index c5b54d46c1e04..e7886affbc8e4 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -74,7 +74,7 @@ impl MergeableStorageProof for Full { // TODO EMCH can remove Default bound with manual impl on recorder #[cfg(feature = "std")] -impl RegStorageProof for Flat { +impl RegStorageProof for Flat { const INPUT_KIND: InputKind = InputKind::None; type RecordBackend = super::FlatSyncRecorder; @@ -89,7 +89,7 @@ impl RegStorageProof RegStorageProof for Full { +impl RegStorageProof for Full { const INPUT_KIND: InputKind = InputKind::None; type RecordBackend = super::FullSyncRecorder; From 30c1aec8fcbc992b4dd48aaa14af8250e12513c3 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 8 Jun 2020 18:04:37 +0200 Subject: [PATCH 146/185] refact trie code a bit (hasher instead of h::out mainly and no sync). --- Cargo.lock | 1 - primitives/state-machine/src/backend.rs | 13 +- .../state-machine/src/proving_backend.rs | 40 +++--- primitives/state-machine/src/trie_backend.rs | 24 ++-- primitives/trie/Cargo.toml | 2 - primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof/compact.rs | 68 ++++++---- primitives/trie/src/storage_proof/mod.rs | 117 ++++++++++-------- primitives/trie/src/storage_proof/multiple.rs | 107 ++++++++++------ .../trie/src/storage_proof/query_plan.rs | 13 +- primitives/trie/src/storage_proof/simple.rs | 23 ++-- 11 files changed, 246 insertions(+), 164 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 740e73bf77054..060bbff1a0a6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7900,7 +7900,6 @@ dependencies = [ "hex-literal", "memory-db", "parity-scale-codec", - "parking_lot 0.10.2", "sp-core", "sp-runtime", "sp-std", diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index d15f7023d2878..4f94b0664734e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -40,24 +40,15 @@ pub trait Backend: std::fmt::Debug { /// Storage changes to be applied if committing type Transaction: Consolidate + Default + Send; - /// The proof format use while registering proof. - /// TODO EMCH on paper this is not needed, we shouldn't need this proof type to merge - /// but just use back proof reg backend. -> try to do it or try to remove the - /// storage proof constraint and rename the struct to something that is more build - /// related but do not need to be usable as a backend. - type StorageProofReg: sp_trie::RegStorageProof - + sp_trie::MergeableStorageProof - + Into; // TODO EMCH consider removing this conv. - /// The actual proof produced. type StorageProof: sp_trie::BackendStorageProof; // + sp_trie::WithRegStorageProof; /// Type of proof backend. - type ProofRegBackend: ProofRegBackend; + type ProofRegBackend: ProofRegBackend; /// Type of proof backend. - type ProofCheckBackend: ProofCheckBackend; + type ProofCheckBackend: ProofCheckBackend; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result, Self::Error>; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index dcfbe6c45237e..c5cd2bf9f88c5 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -25,7 +25,7 @@ use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys, StorageProofKind, TrieNodesStorageProof as StorageProof, ProofInputKind, - ProofInput, RecordMapTrieNodes, RecordBackend, RegStorageProof, ProofFlatDefault, + ProofInput, RecordMapTrieNodes, RecordBackend, RegStorageProof, ProofFlatDefault, BackendStorageProof, }; pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; @@ -117,18 +117,24 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend, H: Hasher, R: RegStorageProof> ( +pub struct ProvingBackend< + S: TrieBackendStorage, + H: Hasher, + R: RegStorageProof, + P, + > ( pub TrieBackend, H>, + PhantomData

, ); /// Trie backend storage with its proof recorder. -pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { +pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { backend: S, - proof_recorder: R, + proof_recorder: Arc>, _ph: PhantomData, } -impl<'a, S, H, R> ProvingBackend<&'a S, H, R> +impl<'a, S, H, R, P> ProvingBackend<&'a S, H, R, P> where S: TrieBackendStorage, H: Hasher, @@ -151,11 +157,11 @@ impl<'a, S, H, R> ProvingBackend<&'a S, H, R> backend: essence.backend_storage(), proof_recorder, }; - ProvingBackend(TrieBackend::new(recorder, root)) + ProvingBackend(TrieBackend::new(recorder, root), PhantomData) } } -impl ProvingBackend +impl ProvingBackend where S: TrieBackendStorage, H: Hasher, @@ -182,7 +188,7 @@ impl ProvingBackend } } -impl, H: Hasher, R: RecordBackend> TrieBackendStorage +impl, H: Hasher, R: RecordBackend> TrieBackendStorage for ProofRecorderBackend { type Overlay = S::Overlay; @@ -197,21 +203,23 @@ impl, H: Hasher, R: RecordBackend> TrieBackendS } } -impl, H: Hasher, R: RegStorageProof> std::fmt::Debug - for ProvingBackend +impl, H: Hasher, R: RegStorageProof, P> std::fmt::Debug + for ProvingBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ProvingBackend") } } -impl ProofRegBackend for ProvingBackend> +impl ProofRegBackend for ProvingBackend where S: TrieBackendStorage, H: Hasher + 'static, H::Out: Ord + Codec, + R: RegStorageProof, + P: BackendStorageProof, { - type State = as RegStorageProof>::RecordBackend; + type State = as RegStorageProof>::RecordBackend; fn extract_proof(&self, input: ProofInput) -> Self::StorageProofReg { StorageProof::::extract_proof( @@ -221,16 +229,18 @@ impl ProofRegBackend for ProvingBackend Backend for ProvingBackend> +impl Backend for ProvingBackend where S: TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, + R: RegStorageProof, + P: BackendStorageProof, { type Error = String; type Transaction = S::Overlay; - type StorageProofReg = StorageProof; - type StorageProof = StorageProof; + type StorageProofReg = R; + type StorageProof = P; type ProofRegBackend = Self; type ProofCheckBackend = TrieBackend, H>; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 190dbd086fcea..8bd5238311220 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -34,8 +34,9 @@ use sp_trie::MemoryDB; use parking_lot::RwLock; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher> { +pub struct TrieBackend, H: Hasher, R, P> { pub (crate) essence: TrieBackendEssence, + _ph: PhantomData<(R, P)>, } impl, H: Hasher> TrieBackend where H::Out: Codec { @@ -101,21 +102,30 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } } -impl, H: Hasher> std::fmt::Debug for TrieBackend { +impl, H: Hasher, R, P> std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher> Backend for TrieBackend where +impl Backend for TrieBackend where + H: Hasher, + S: TrieBackendStorage, H::Out: Ord + Codec, + R: RegStorageProof, + P: BackendStorageProof, { type Error = String; type Transaction = S::Overlay; - type StorageProof = sp_trie::TrieNodesStorageProof; - type StorageProofReg = sp_trie::TrieNodesStorageProof; - type ProofRegBackend = crate::proving_backend::ProvingBackend>::RecordBackend>; - type ProofCheckBackend = TrieBackend, H>; + type StorageProofReg = R, + type StorageProof = P, + type ProofRegBackend = crate::proving_backend::ProvingBackend< + S, + H, + >::RecordBackend, + Self::StorageProof, + >; + type ProofCheckBackend = TrieBackend, H, R, P>; fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index db7abda66cef6..abca6927e4ff4 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -26,7 +26,6 @@ trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.20.0", default-features = false } sp-core = { version = "2.0.0-rc2", default-features = false, path = "../core" } hashbrown = { version = "0.6.3", default-features = false, features = [ "ahash" ] } -parking_lot = { version = "0.10.0", optional = true } # TODO EMCH remove if changing trait [dev-dependencies] trie-bench = "0.21.0" @@ -38,7 +37,6 @@ sp-runtime = { version = "2.0.0-rc2", path = "../runtime" } [features] default = ["std"] std = [ - "parking_lot", "sp-std/std", "codec/std", "hash-db/std", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index d106170e81e2b..59246e3a94b72 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -34,7 +34,7 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, +pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof}; diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 3417c0a57fd86..5251558eb4fef 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -81,7 +81,7 @@ impl sp_std::fmt::Debug for Full { pub struct FullForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>); -impl StorageProof for Flat { +impl StorageProof for Flat { fn empty() -> Self { Flat(Default::default(), PhantomData) } @@ -91,7 +91,7 @@ impl StorageProof for Flat { } } -impl StorageProof for Full { +impl StorageProof for Full { fn empty() -> Self { Full(Default::default(), PhantomData) } @@ -132,20 +132,19 @@ impl MergeableStorageProof for FullForMerge { } // TODO EMCH can remove Default bound with manual impl on recorder -#[cfg(feature = "std")] -impl RegStorageProof> for Flat +impl RegStorageProof for Flat where - T: 'static + TrieLayout, + T: TrieLayout, TrieHash: Decode, { const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; - type RecordBackend = super::FullSyncRecorder>; + type RecordBackend = super::FullRecorder; fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { if let Input::ChildTrieRoots(roots) = input { let mut result = Vec::default(); - for (child_info, set) in recorder.0.read().iter() { + for (child_info, set) in recorder.0.iter() { let root = roots.get(&child_info.proof_info()) .and_then(|r| Decode::decode(&mut &r[..]).ok()) .ok_or_else(|| missing_pack_input())?; @@ -159,20 +158,19 @@ impl RegStorageProof> for Flat } } -#[cfg(feature = "std")] -impl RegStorageProof> for Full +impl RegStorageProof for Full where - T: 'static + TrieLayout, + T: TrieLayout, TrieHash: Decode, { const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; - type RecordBackend = super::FullSyncRecorder>; + type RecordBackend = super::FullRecorder; fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { if let Input::ChildTrieRoots(roots) = input { let mut result = ChildrenProofMap::default(); - for (child_info, set) in recorder.0.read().iter() { + for (child_info, set) in recorder.0.iter() { let root = roots.get(&child_info.proof_info()) .and_then(|r| Decode::decode(&mut &r[..]).ok()) .ok_or_else(|| missing_pack_input())?; @@ -186,19 +184,19 @@ impl RegStorageProof> for Full } } -#[cfg(feature = "std")] -impl RegStorageProof for FullForMerge +impl RegStorageProof for FullForMerge where - Hash: Default + Eq + Clone + Encode + sp_std::hash::Hash + Send + Sync, + H: Hasher, + H::Out: Encode, { const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; - type RecordBackend = super::FullSyncRecorder; + type RecordBackend = super::FullRecorder; fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { if let Input::ChildTrieRoots(roots) = input { let mut result = ChildrenProofMap::default(); - for (child_info, set) in recorder.0.read().iter() { + for (child_info, set) in recorder.0.iter() { let root = roots.get(&child_info.proof_info()) .ok_or_else(|| missing_pack_input())?.clone(); let trie_nodes: BTreeMap<_, _> = set @@ -214,9 +212,21 @@ impl RegStorageProof for FullForMerge } } -impl BackendStorageProof for Flat { } +impl BackendStorageProof for Flat + where + T: TrieLayout, + TrieHash: Codec, +{ + type StorageProofReg = FullForMerge; +} -impl BackendStorageProof for Full { } +impl BackendStorageProof for Full + where + T: TrieLayout, + TrieHash: Codec, +{ + type StorageProofReg = FullForMerge; +} // Note that this implementation is only possible // as long as we only have default child trie which @@ -246,7 +256,7 @@ impl FullForMerge { // TODO EMCH use try_into! fn to_full(self) -> Result> where - L: 'static + TrieLayout, + L: TrieLayout, TrieHash: Codec, { let mut result = ChildrenProofMap::::default(); @@ -263,7 +273,7 @@ impl FullForMerge { // TODO EMCH use try_into! fn to_flat(self) -> Result> where - L: 'static + TrieLayout, + L: TrieLayout, TrieHash: Codec, { let mut result = Vec::::default(); @@ -280,7 +290,7 @@ impl FullForMerge { impl Into> for FullForMerge where - L: 'static + TrieLayout, + L: TrieLayout, TrieHash: Codec, { // TODO consider only using try into (may not be very straightforward with backend) @@ -292,7 +302,7 @@ impl Into> for FullForMerge impl Into> for FullForMerge where - L: 'static + TrieLayout, + L: TrieLayout, TrieHash: Codec, { fn into(self) -> Flat { @@ -301,6 +311,18 @@ impl Into> for FullForMerge } } +impl Into for FullForMerge +{ + fn into(self) -> super::simple::Flat { + let mut result = ProofNodes::default(); + for (_child_info, (nodes, _root)) in self.0 { + // TODO EMCH do not extend on first + result.extend(nodes.0.into_iter().map(|(_k, v)| v)); + } + super::simple::Flat(result) + } +} + impl TryInto for Flat { type Error = super::Error; diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 14144281dc488..c8e9420dc10b0 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -17,11 +17,6 @@ use crate::Layout; use sp_storage::{ChildInfo, ChildInfoProof, ChildrenMap}; use trie_db::DBValue; -#[cfg(feature = "std")] -use std::sync::Arc; -#[cfg(feature = "std")] -use parking_lot::RwLock; - pub mod simple; pub mod compact; pub mod query_plan; @@ -201,7 +196,7 @@ pub enum InputKind { } /// Trait for proofs that can be use as a partial backend for verification. -pub trait StorageProof: sp_std::fmt::Debug + Sized + 'static { +pub trait StorageProof: sp_std::fmt::Debug + Sized { /// Returns a new empty proof. /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of @@ -221,12 +216,12 @@ pub trait MergeableStorageProof: StorageProof { } /// Trait for proofs that can be recorded against a trie backend. -pub trait RegStorageProof: StorageProof { +pub trait RegStorageProof: StorageProof { /// Variant of enum input to use. const INPUT_KIND: InputKind; /// The data structure for recording proof entries. - type RecordBackend: RecordBackend; + type RecordBackend: RecordBackend; /// Extracts the gathered unordered encoded trie nodes. /// Depending on `kind`, encoded trie nodes can change @@ -246,7 +241,12 @@ pub trait WithRegStorageProof: Sized { type RegStorageProof: Into + RegStorageProof; } */ -pub trait BackendStorageProof: Codec + StorageProof {} +pub trait BackendStorageProof: Codec + StorageProof { + /// The proof format use while registering proof. + type StorageProofReg: RegStorageProof + + MergeableStorageProof + + Into; // TODO EMCH consider removing this conv or make it a try into?? +} /// Trait for proofs that can use to create a partial trie backend. pub trait CheckableStorageProof: Codec + StorageProof { @@ -259,49 +259,65 @@ pub trait CheckableStorageProof: Codec + StorageProof { /// TODO EMCH consider using &mut and change reg storage (consume) proof /// to implement without rc & sync, and encapsulate from calling /// code. -pub trait RecordBackend: Sync + Send + Clone + Default { +/// TODO EMCH here we pass Hasher as parameter for convenience, but we only really need H::Out +pub trait RecordBackend: Clone + Default { /// Access recorded value, allow using the backend as a cache. - fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option>; + fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option>; /// Record the actual value. - /// TODO EMCH switch to all ref or all value for param. - fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option); + fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option); /// Merge two record, can fail. fn merge(&mut self, other: Self) -> bool; } -#[cfg(feature = "std")] -#[derive(Clone, Default)] /// Records are separated by child trie, this is needed for /// proof compaction. -pub struct FullSyncRecorder(Arc>>>); +pub struct FullRecorder(ChildrenMap>); -#[cfg(feature = "std")] -#[derive(Clone, Default)] /// Single storage for all recoded nodes (as in /// state db column). /// That this variant exists only for performance /// (on less map access than in `Full`), but is not strictly /// necessary. -pub struct FlatSyncRecorder(Arc>>); +pub struct FlatRecorder(RecordMapTrieNodes); +impl Default for FlatRecorder { + fn default() -> Self { + FlatRecorder(Default::default()) + } +} -#[cfg(feature = "std")] -impl RecordBackend for FullSyncRecorder { - fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { - self.0.read().get(child_info).and_then(|s| (**s).get(&key).cloned()) +impl Default for FullRecorder { + fn default() -> Self { + FullRecorder(Default::default()) + } +} + +impl Clone for FlatRecorder { + fn clone(&self) -> Self { + FlatRecorder(self.0.clone()) + } +} + +impl Clone for FullRecorder { + fn clone(&self) -> Self { + FullRecorder(self.0.clone()) + } +} + +impl RecordBackend for FullRecorder { + fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option> { + self.0.get(child_info).and_then(|s| (**s).get(&key).cloned()) } - fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option) { - self.0.write().entry(child_info.clone()) + fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option) { + self.0.entry(child_info) .or_default() - .insert(key.clone(), value.clone()); + .insert(key, value); } - fn merge(&mut self, other: Self) -> bool { - let mut first = self.0.write(); - let mut second = other.0.write(); - for (child_info, other) in std::mem::replace(&mut *second, Default::default()) { - match first.entry(child_info) { + fn merge(&mut self, mut other: Self) -> bool { + for (child_info, other) in std::mem::replace(&mut other.0, Default::default()) { + match self.0.entry(child_info) { Entry::Occupied(mut entry) => { for (key, value) in other.0 { match entry.get_mut().entry(key) { @@ -325,21 +341,18 @@ impl RecordBacken } } -#[cfg(feature = "std")] -impl RecordBackend for FlatSyncRecorder { - fn get(&self, _child_info: &ChildInfo, key: &Hash) -> Option> { - (**self.0.read()).get(&key).cloned() +impl RecordBackend for FlatRecorder { + fn get(&self, _child_info: &ChildInfo, key: &H::Out) -> Option> { + (*self.0).get(&key).cloned() } - fn record(&self, _child_info: &ChildInfo, key: &Hash, value: Option) { - self.0.write().insert(key.clone(), value.clone()); + fn record(&mut self, _child_info: ChildInfo, key: H::Out, value: Option) { + (*self.0).insert(key.clone(), value.clone()); } - fn merge(&mut self, other: Self) -> bool { - let mut first = self.0.write(); - let mut second = other.0.write(); - for (key, value) in std::mem::replace(&mut *second, Default::default()).0 { - match first.entry(key) { + fn merge(&mut self, mut other: Self) -> bool { + for (key, value) in std::mem::replace(&mut other.0, Default::default()).0 { + match self.0.entry(key) { HEntry::Occupied(entry) => { if entry.get() != &value { return false; @@ -403,30 +416,36 @@ impl IntoIterator for ChildrenProofMap { } /// Container recording trie nodes. -#[derive(Clone)] -pub struct RecordMapTrieNodes(HashMap>); +pub struct RecordMapTrieNodes(HashMap>); -impl sp_std::default::Default for RecordMapTrieNodes { +impl sp_std::default::Default for RecordMapTrieNodes { fn default() -> Self { RecordMapTrieNodes(Default::default()) } } -impl sp_std::ops::Deref for RecordMapTrieNodes { - type Target = HashMap>; +impl Clone for RecordMapTrieNodes { + fn clone(&self) -> Self { + RecordMapTrieNodes(self.0.clone()) + } +} + + +impl sp_std::ops::Deref for RecordMapTrieNodes { + type Target = HashMap>; fn deref(&self) -> &Self::Target { &self.0 } } -impl sp_std::ops::DerefMut for RecordMapTrieNodes { +impl sp_std::ops::DerefMut for RecordMapTrieNodes { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl HashDBRef for RecordMapTrieNodes { +impl HashDBRef for RecordMapTrieNodes { fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { self.0.get(key).and_then(Clone::clone) } diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 0d1820eaeee29..76bffdfae7f6d 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -92,7 +92,7 @@ impl sp_std::fmt::Debug for MultipleStorageProof { } /// Allow to use specific kind of proof by default. -pub trait DefaultKind: 'static + Clone + Send + Sync { +pub trait DefaultKind: Clone + Send + Sync { const KIND: StorageProofKind; } @@ -134,7 +134,7 @@ impl Encode for MultipleStorageProof { } } -impl StorageProof for MultipleStorageProof { +impl StorageProof for MultipleStorageProof { fn empty() -> Self { match D::KIND { StorageProofKind::Flat => @@ -160,69 +160,76 @@ impl StorageProof for MultipleStorageProof { } } -#[cfg(feature = "std")] -#[derive(Clone)] -pub enum MultipleSyncRecorder { - Flat(super::FlatSyncRecorder, StorageProofKind, PhantomData), - Full(super::FullSyncRecorder, StorageProofKind), +pub enum MultipleRecorder { + Flat(super::FlatRecorder, StorageProofKind, PhantomData), + Full(super::FullRecorder, StorageProofKind), } -impl MultipleSyncRecorder { +impl MultipleRecorder { /// Instantiate a recorder of a given type. pub fn new_recorder(kind: StorageProofKind) -> Self { match kind { - StorageProofKind::Flat => MultipleSyncRecorder::Flat(Default::default(), D::KIND, PhantomData), - StorageProofKind::TrieSkipHashes => MultipleSyncRecorder::Full(Default::default(), D::KIND), - StorageProofKind::FullForMerge => MultipleSyncRecorder::Full(Default::default(), D::KIND), - StorageProofKind::KnownQueryPlanAndValues => MultipleSyncRecorder::Full(Default::default(), D::KIND), + StorageProofKind::Flat => MultipleRecorder::Flat(Default::default(), D::KIND, PhantomData), + StorageProofKind::TrieSkipHashes => MultipleRecorder::Full(Default::default(), D::KIND), + StorageProofKind::FullForMerge => MultipleRecorder::Full(Default::default(), D::KIND), + StorageProofKind::KnownQueryPlanAndValues => MultipleRecorder::Full(Default::default(), D::KIND), } } /// Targetted storage proof kind. pub fn target(&self) -> StorageProofKind { match self { - MultipleSyncRecorder::Flat(_, k, _) => *k, - MultipleSyncRecorder::Full(_, k) => *k, + MultipleRecorder::Flat(_, k, _) => *k, + MultipleRecorder::Full(_, k) => *k, } } } -impl Default for MultipleSyncRecorder { +impl Default for MultipleRecorder { fn default() -> Self { Self::new_recorder(D::KIND) } } -#[cfg(feature = "std")] -impl RecordBackend for MultipleSyncRecorder { - fn get(&self, child_info: &ChildInfo, key: &Hash) -> Option> { +impl Clone for MultipleRecorder { + fn clone(&self) -> Self { match self { - MultipleSyncRecorder::Flat(rec, _ ,_) => rec.get(child_info, key), - MultipleSyncRecorder::Full(rec, _) => rec.get(child_info, key), + MultipleRecorder::Flat(data, kind, _) => MultipleRecorder::Flat(data.clone(), *kind, PhantomData), + MultipleRecorder::Full(data, kind) => MultipleRecorder::Full(data.clone(), *kind), + } + } +} + + +impl RecordBackend for MultipleRecorder { + fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option> { + match self { + MultipleRecorder::Flat(rec, _ ,_) => rec.get(child_info, key), + MultipleRecorder::Full(rec, _) => rec.get(child_info, key), } } - fn record(&self, child_info: &ChildInfo, key: &Hash, value: Option) { + fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option) { match self { - MultipleSyncRecorder::Flat(rec, _, _) => rec.record(child_info, key, value), - MultipleSyncRecorder::Full(rec, _) => rec.record(child_info, key, value), + MultipleRecorder::Flat(rec, _, _) => rec.record(child_info, key, value), + MultipleRecorder::Full(rec, _) => rec.record(child_info, key, value), } } fn merge(&mut self, other: Self) -> bool { match self { - MultipleSyncRecorder::Flat(rec, _, _) => { + MultipleRecorder::Flat(rec, _, _) => { match other { - MultipleSyncRecorder::Flat(oth, _, _) => { + MultipleRecorder::Flat(oth, _, _) => { rec.merge(oth); true }, _ => false } }, - MultipleSyncRecorder::Full(rec, _) => { + MultipleRecorder::Full(rec, _) => { match other { - MultipleSyncRecorder::Full(oth, _) => { + MultipleRecorder::Full(oth, _) => { rec.merge(oth); true }, @@ -234,11 +241,10 @@ impl RegStorageProof for MultipleStorageProof +impl RegStorageProof for MultipleStorageProof where - Hash: Hasher + 'static, - Hash::Out: Codec, + H: Hasher, + H::Out: Codec, D: DefaultKind, { // Actually one could ignore this if he knows its type to be non compact. @@ -246,17 +252,17 @@ impl RegStorageProof for MultipleStorageProof; + type RecordBackend = MultipleRecorder; fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { match recorder.target() { StorageProofKind::Flat => { - if let MultipleSyncRecorder::Flat(rec, _, _) = recorder { + if let MultipleRecorder::Flat(rec, _, _) = recorder { return Ok(MultipleStorageProof::Flat(super::simple::Flat::extract_proof(rec, input)?)) } }, StorageProofKind::TrieSkipHashes => { - if let MultipleSyncRecorder::Full(rec, _) = recorder { + if let MultipleRecorder::Full(rec, _) = recorder { return Ok(MultipleStorageProof::TrieSkipHashes( super::compact::Flat::extract_proof(rec, input)?, PhantomData, @@ -264,14 +270,14 @@ impl RegStorageProof for MultipleStorageProof { - if let MultipleSyncRecorder::Full(rec, _) = recorder { + if let MultipleRecorder::Full(rec, _) = recorder { return Ok(MultipleStorageProof::FullForMerge( super::compact::FullForMerge::extract_proof(rec, input)?, )) } }, StorageProofKind::KnownQueryPlanAndValues => { - if let MultipleSyncRecorder::Full(rec, _) = recorder { + if let MultipleRecorder::Full(rec, _) = recorder { return Ok(MultipleStorageProof::KnownQueryPlanAndValues( super::query_plan::KnownQueryPlanAndValues::extract_proof(rec, input)?, )) @@ -282,7 +288,14 @@ impl RegStorageProof for MultipleStorageProof BackendStorageProof for MultipleStorageProof { } +impl BackendStorageProof for MultipleStorageProof + where + H: Hasher, + H::Out: Codec, + D: DefaultKind, +{ + type StorageProofReg = super::compact::FullForMerge; +} impl TryInto for MultipleStorageProof { type Error = super::Error; @@ -340,6 +353,26 @@ impl MultipleStorageProof { } } + +impl Into> for super::compact::FullForMerge + where + H::Out: Codec, +{ + fn into(self) -> MultipleStorageProof { + match D::KIND { + StorageProofKind::Flat => MultipleStorageProof::Flat(self.into()), + StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes(self.into(), PhantomData), + StorageProofKind::FullForMerge => MultipleStorageProof::FullForMerge(self), + // we cannot convert, actually this should not be in storage proof kind TODO EMCH + // this was only here to be able to product query plan without using different backend. + // User shall therefore register and try into: but target is that user uses the query_plan + // backend. + StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::FullForMerge(self), + } + } +} + + /* /// Can also fail on invalid compact proof. pub fn into_partial_db(self) -> Result>> diff --git a/primitives/trie/src/storage_proof/query_plan.rs b/primitives/trie/src/storage_proof/query_plan.rs index 4d099d23f90bd..9a353772506a3 100644 --- a/primitives/trie/src/storage_proof/query_plan.rs +++ b/primitives/trie/src/storage_proof/query_plan.rs @@ -46,7 +46,7 @@ impl Clone for KnownQueryPlanAndValues { } } -impl StorageProof for KnownQueryPlanAndValues { +impl StorageProof for KnownQueryPlanAndValues { fn empty() -> Self { KnownQueryPlanAndValues(Default::default(), PhantomData) } @@ -56,21 +56,20 @@ impl StorageProof for KnownQueryPlanAndValues { } } -#[cfg(feature = "std")] -impl RegStorageProof> for KnownQueryPlanAndValues +impl RegStorageProof for KnownQueryPlanAndValues where - T: 'static + TrieConfiguration, + T: TrieConfiguration, TrieHash: Decode, { const INPUT_KIND: InputKind = InputKind::QueryPlan; - type RecordBackend = super::FullSyncRecorder>; + type RecordBackend = super::FullRecorder; fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result { if let Input::QueryPlan(input_children) = input { let mut result = ChildrenProofMap::default(); let mut root_hash = TrieHash::::default(); - for (child_info, set) in recorder.0.read().iter() { + for (child_info, set) in recorder.0.iter() { let child_info_proof = child_info.proof_info(); if let Some((root, keys)) = input_children.get(&child_info_proof) { // Layout h is the only supported one at the time being @@ -94,7 +93,7 @@ impl RegStorageProof> for KnownQueryPlanAndValues impl CheckableStorageProof for KnownQueryPlanAndValues where - T: 'static + TrieConfiguration, + T: TrieConfiguration, TrieHash: Decode, { fn verify(self, input: &Input) -> Result { diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index e7886affbc8e4..98e2e532dfb8b 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -72,15 +72,13 @@ impl MergeableStorageProof for Full { } } -// TODO EMCH can remove Default bound with manual impl on recorder -#[cfg(feature = "std")] -impl RegStorageProof for Flat { +impl RegStorageProof for Flat { const INPUT_KIND: InputKind = InputKind::None; - type RecordBackend = super::FlatSyncRecorder; + type RecordBackend = super::FlatRecorder; fn extract_proof(recorder: &Self::RecordBackend, _input: Input) -> Result { - let trie_nodes = recorder.0.read() + let trie_nodes = recorder.0 .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) .collect(); @@ -88,15 +86,14 @@ impl RegStoragePr } } -#[cfg(feature = "std")] -impl RegStorageProof for Full { +impl RegStorageProof for Full { const INPUT_KIND: InputKind = InputKind::None; - type RecordBackend = super::FullSyncRecorder; + type RecordBackend = super::FullRecorder; fn extract_proof(recorder: &Self::RecordBackend, _input: Input) -> Result { let mut result = ChildrenProofMap::default(); - for (child_info, set) in recorder.0.read().iter() { + for (child_info, set) in recorder.0.iter() { let trie_nodes: Vec> = set .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) @@ -107,9 +104,13 @@ impl RegStoragePr } } -impl BackendStorageProof for Flat { } +impl BackendStorageProof for Flat { + type StorageProofReg = Self; +} -impl BackendStorageProof for Full { } +impl BackendStorageProof for Full { + type StorageProofReg = Self; +} // Note that this implementation is only possible // as long as we only have default child trie which From bfd5a6dd046aefa0f734975b5a8744651aff034f Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 8 Jun 2020 19:23:05 +0200 Subject: [PATCH 147/185] New proof extract function to implement. --- primitives/state-machine/src/backend.rs | 12 +++- .../state-machine/src/in_memory_backend.rs | 20 +++---- primitives/state-machine/src/lib.rs | 2 +- .../state-machine/src/proving_backend.rs | 60 +++++++++---------- primitives/state-machine/src/trie_backend.rs | 40 ++++++++----- primitives/trie/src/lib.rs | 7 ++- primitives/trie/src/storage_proof/mod.rs | 5 +- 7 files changed, 84 insertions(+), 62 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4f94b0664734e..d1810481ef135 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -24,7 +24,7 @@ use crate::{ trie_backend_essence::TrieBackendStorage, UsageInfo, StorageKey, StorageValue, StorageCollection, }; -use sp_trie::ProofInput; +use sp_trie::{ProofInput, BackendStorageProof}; /// Access the state of the proof backend of a backend. pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; @@ -41,7 +41,7 @@ pub trait Backend: std::fmt::Debug { type Transaction: Consolidate + Default + Send; /// The actual proof produced. - type StorageProof: sp_trie::BackendStorageProof; + type StorageProof: BackendStorageProof; // + sp_trie::WithRegStorageProof; /// Type of proof backend. @@ -267,7 +267,7 @@ pub trait ProofRegBackend: crate::backend::Backend type State: Default + Send + Sync + Clone; /// Extract proof when run. - fn extract_proof(&self, input: ProofInput) -> Self::StorageProofReg; + fn extract_proof(&self, input: ProofInput) -> >::StorageProofReg; } /// Backend used to produce proof. @@ -366,6 +366,12 @@ impl<'a, T, H> Backend for &'a T fn usage_info(&self) -> UsageInfo { (*self).usage_info() } + + fn from_reg_state(self, _previous: ProofRegStateFor) -> Option { + // cannot move out of reference, consider cloning or + // if needed. + None + } } /// Trait that allows consolidate two transactions together. diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 8c0ae1ec8bf41..344e66d20c552 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -59,7 +59,7 @@ where } /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> +pub fn new_in_mem() -> TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -69,7 +69,7 @@ where backend } -impl TrieBackend, H> +impl TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -133,7 +133,7 @@ where } } -impl Clone for TrieBackend, H> +impl Clone for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -142,7 +142,7 @@ where } } -impl Default for TrieBackend, H> +impl Default for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -151,8 +151,8 @@ where } } -impl From, BTreeMap>> - for TrieBackend, H> +impl From, BTreeMap>> + for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -163,7 +163,7 @@ where } } -impl From for TrieBackend, H> +impl From for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -175,7 +175,7 @@ where } } -impl From> for TrieBackend, H> +impl From> for TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -186,8 +186,8 @@ where } } -impl From, StorageCollection)>> - for TrieBackend, H> +impl From, StorageCollection)>> + for TrieBackend, H, P> where H::Out: Codec + Ord, { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 23568bfb97ffc..fdd363a97c8cd 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -93,7 +93,7 @@ pub type ChangesTrieTransaction = ( ); /// Trie backend with in-memory storage. -pub type InMemoryBackend = TrieBackend, H>; +pub type InMemoryBackend = TrieBackend, H, sp_trie::SimpleProof>; /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index c5cd2bf9f88c5..62fd2a4d8fd9c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,7 +23,7 @@ use codec::{Decode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, record_all_keys, StorageProofKind, TrieNodesStorageProof as StorageProof, ProofInputKind, ProofInput, RecordMapTrieNodes, RecordBackend, RegStorageProof, ProofFlatDefault, BackendStorageProof, }; @@ -120,10 +120,9 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub struct ProvingBackend< S: TrieBackendStorage, H: Hasher, - R: RegStorageProof, - P, + P: BackendStorageProof, > ( - pub TrieBackend, H>, + pub TrieBackend>, H, P>, PhantomData

, ); @@ -134,22 +133,22 @@ pub struct ProofRecorderBackend, H: Hasher, R: RecordBa _ph: PhantomData, } -impl<'a, S, H, R, P> ProvingBackend<&'a S, H, R, P> +impl<'a, S, H, P> ProvingBackend<&'a S, H, P> where S: TrieBackendStorage, H: Hasher, H::Out: Codec, - R: RegStorageProof, + P: BackendStorageProof, { /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { + pub fn new(backend: &'a TrieBackend) -> Self { let proof_recorder = Default::default(); Self::new_with_recorder(backend, proof_recorder) } fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: R, + backend: &'a TrieBackend, + proof_recorder: RecordBackendFor, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -161,18 +160,18 @@ impl<'a, S, H, R, P> ProvingBackend<&'a S, H, R, P> } } -impl ProvingBackend +impl ProvingBackend where S: TrieBackendStorage, H: Hasher, H::Out: Codec, - R: RegStorageProof, + P: BackendStorageProof, { /// Create new proving backend with the given recorder. pub fn from_backend_with_recorder( backend: S, root: H::Out, - proof_recorder: R, + proof_recorder: RecordBackendFor, ) -> Self { let recorder = ProofRecorderBackend { backend, @@ -183,7 +182,7 @@ impl ProvingBackend /// Extract current recording state. /// This is sharing a rc over a sync reference. - pub fn extract_recorder(&self) -> R { + pub fn extract_recorder(&self) -> RecordBackendFor { self.0.backend_storage().proof_recorder.clone() } } @@ -203,46 +202,43 @@ impl, H: Hasher, R: RecordBackend> TrieBackendStorag } } -impl, H: Hasher, R: RegStorageProof, P> std::fmt::Debug - for ProvingBackend +impl, H: Hasher, P: BackendStorageProof> std::fmt::Debug + for ProvingBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ProvingBackend") } } -impl ProofRegBackend for ProvingBackend +impl ProofRegBackend for ProvingBackend where S: TrieBackendStorage, - H: Hasher + 'static, + H: Hasher, H::Out: Ord + Codec, - R: RegStorageProof, - P: BackendStorageProof, + P: BackendStorageProof, { type State = as RegStorageProof>::RecordBackend; - fn extract_proof(&self, input: ProofInput) -> Self::StorageProofReg { - StorageProof::::extract_proof( + fn extract_proof(&self, input: ProofInput) -> >::StorageProofReg { + >::StorageProofReg::::extract_proof( &self.0.essence().backend_storage().proof_recorder, input, ) } } -impl Backend for ProvingBackend +impl Backend for ProvingBackend where S: TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, - R: RegStorageProof, - P: BackendStorageProof, + P: BackendStorageProof, { type Error = String; type Transaction = S::Overlay; - type StorageProofReg = R; type StorageProof = P; type ProofRegBackend = Self; - type ProofCheckBackend = TrieBackend, H>; + type ProofCheckBackend = TrieBackend, H, P>; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.trie_backend.storage(key) @@ -348,10 +344,10 @@ impl Backend for ProvingBackend } /// Create flat proof check backend. -pub fn create_flat_proof_check_backend( +pub fn create_flat_proof_check_backend( root: H::Out, - proof: StorageProof, -) -> Result, H>, Box> + proof: P, +) -> Result, H, P>, Box> where H: Hasher, H::Out: Codec, @@ -366,10 +362,10 @@ where } /// Create proof check backend. -pub fn create_proof_check_backend( +pub fn create_proof_check_backend( root: H::Out, - proof: StorageProof, -) -> Result>, H>, Box> + proof: P, +) -> Result>, H, P>, Box> where H: Hasher, H::Out: Codec, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 8bd5238311220..9081df1d3bd01 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,7 +20,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, - ChildrenProofMap, ProofInput}; + ChildrenProofMap, ProofInput, BackendStorageProof}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_trie::RegStorageProof; use crate::backend::{ProofRegStateFor}; @@ -32,18 +32,20 @@ use crate::{ }; use sp_trie::MemoryDB; use parking_lot::RwLock; +use std::marker::PhantomData; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher, R, P> { +pub struct TrieBackend, H: Hasher, P> { pub (crate) essence: TrieBackendEssence, - _ph: PhantomData<(R, P)>, + _ph: PhantomData

, } -impl, H: Hasher> TrieBackend where H::Out: Codec { +impl, H: Hasher, P> TrieBackend where H::Out: Codec { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root, None), + _ph: PhantomData, } } @@ -53,6 +55,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec let register_roots = Some(RwLock::new(Default::default())); TrieBackend { essence: TrieBackendEssence::new(storage, root, register_roots), + _ph: PhantomData, } } @@ -102,30 +105,27 @@ impl, H: Hasher> TrieBackend where H::Out: Codec } } -impl, H: Hasher, R, P> std::fmt::Debug for TrieBackend { +impl, H: Hasher, P> std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "TrieBackend") } } -impl Backend for TrieBackend where +impl Backend for TrieBackend where H: Hasher, S: TrieBackendStorage, H::Out: Ord + Codec, - R: RegStorageProof, - P: BackendStorageProof, + P: BackendStorageProof, { type Error = String; type Transaction = S::Overlay; - type StorageProofReg = R, - type StorageProof = P, + type StorageProof = P; type ProofRegBackend = crate::proving_backend::ProvingBackend< S, H, - >::RecordBackend, Self::StorageProof, >; - type ProofCheckBackend = TrieBackend, H, R, P>; + type ProofCheckBackend = TrieBackend, H, P>; fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) @@ -301,13 +301,25 @@ impl Backend for TrieBackend where } } -impl ProofCheckBackend for TrieBackend, H> where - H::Out: Ord + Codec, +impl ProofCheckBackend for TrieBackend, H, P> + where + H::Out: Ord + Codec, + P: BackendStorageProof, { fn create_proof_check_backend( root: H::Out, proof: Self::StorageProof, ) -> Result> { + use hash_db::HashDB; + let mut mem_db = MemoryDB::new(); + for node in proof.trie_backend_nodes()? { + let hash = H::hash(node.as_ref(); + mem_db.emplace(key, hash_db::EMPTY_PREFIX, node); + } + if !mem_db.contains(key, hash_db::EMPTY_PREFIX) { + return Err("No matching root for proof".into()); + } + Ok(TrieBackend::new(mem_db, root)) } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 59246e3a94b72..f83e5ace54bbb 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -37,7 +37,7 @@ pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, - multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof}; + multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, @@ -49,6 +49,11 @@ pub use memory_db::prefixed_key; /// Various re-exports from the `hash-db` crate. pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +/// Access record backend for a given backend storage proof. +/// TODO EMCH check if can be use at other place (rg 'as BackendS') +pub type RecordBackendFor = <

>::StorageProofReg as RegStorageProof>::RecordBackend; + + #[derive(Default)] /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index c8e9420dc10b0..f375bc3aa54b7 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -246,6 +246,9 @@ pub trait BackendStorageProof: Codec + StorageProof { type StorageProofReg: RegStorageProof + MergeableStorageProof + Into; // TODO EMCH consider removing this conv or make it a try into?? + + /// To check proof over a trie backend. + fn trie_backend_nodes(self) -> Result>>; } /// Trait for proofs that can use to create a partial trie backend. @@ -260,7 +263,7 @@ pub trait CheckableStorageProof: Codec + StorageProof { /// to implement without rc & sync, and encapsulate from calling /// code. /// TODO EMCH here we pass Hasher as parameter for convenience, but we only really need H::Out -pub trait RecordBackend: Clone + Default { +pub trait RecordBackend: Send + Sync + Clone + Default { /// Access recorded value, allow using the backend as a cache. fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option>; /// Record the actual value. From d7f51d6b3451cbd06cbf9000b81fad8080225592 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 9 Jun 2020 18:14:34 +0200 Subject: [PATCH 148/185] very messy but got something with right type system for state machine --- primitives/state-machine/src/backend.rs | 14 +- .../state-machine/src/in_memory_backend.rs | 16 +- primitives/state-machine/src/lib.rs | 123 ++++---- .../state-machine/src/proving_backend.rs | 294 ++++++++++-------- primitives/state-machine/src/trie_backend.rs | 23 +- primitives/trie/src/lib.rs | 9 +- primitives/trie/src/storage_proof/compact.rs | 48 ++- primitives/trie/src/storage_proof/mod.rs | 15 +- primitives/trie/src/storage_proof/multiple.rs | 59 +--- primitives/trie/src/storage_proof/simple.rs | 36 +++ 10 files changed, 362 insertions(+), 275 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index d1810481ef135..941cf13708cdd 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -29,11 +29,14 @@ use sp_trie::{ProofInput, BackendStorageProof}; /// Access the state of the proof backend of a backend. pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; +/// Access the state of the proof backend of a backend. +pub type ProofRegFor = <>::StorageProof as BackendStorageProof>::StorageProofReg; + /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. -pub trait Backend: std::fmt::Debug { +pub trait Backend: Sized + std::fmt::Debug { /// An error type when fetching data is not possible. type Error: super::Error; @@ -164,8 +167,6 @@ pub trait Backend: std::fmt::Debug { } /// Try convert into a proof backend. - /// If one do not want to consume the backend, calling on '&self' is fine - /// since '&Backend' implement 'Backend'. fn as_proof_backend(self) -> Option { self.from_reg_state(Default::default()) } @@ -266,8 +267,8 @@ pub trait ProofRegBackend: crate::backend::Backend /// State of a backend. type State: Default + Send + Sync + Clone; - /// Extract proof when run. - fn extract_proof(&self, input: ProofInput) -> >::StorageProofReg; + /// Extract proof after running operation to prove. + fn extract_proof(&self) -> Result<>::StorageProofReg, Box>; } /// Backend used to produce proof. @@ -368,8 +369,7 @@ impl<'a, T, H> Backend for &'a T } fn from_reg_state(self, _previous: ProofRegStateFor) -> Option { - // cannot move out of reference, consider cloning or - // if needed. + // cannot move out of reference, consider cloning when needed. None } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 344e66d20c552..ab734c4269be3 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -59,7 +59,15 @@ where } /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H, P> +pub fn new_in_mem() -> TrieBackend, H, sp_trie::SimpleProof> +where + H::Out: Codec + Ord, +{ + new_in_mem_proof::() +} + +/// Create a new empty instance of in-memory backend, specifying proof type. +pub fn new_in_mem_proof() -> TrieBackend, H, P> where H::Out: Codec + Ord, { @@ -147,7 +155,7 @@ where H::Out: Codec + Ord, { fn default() -> Self { - new_in_mem() + new_in_mem_proof() } } @@ -157,7 +165,7 @@ where H::Out: Codec + Ord, { fn from(inner: HashMap, BTreeMap>) -> Self { - let mut backend = new_in_mem(); + let mut backend = new_in_mem_proof(); backend.insert(inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect()))); backend } @@ -226,7 +234,7 @@ mod tests { vec![(b"2".to_vec(), Some(b"3".to_vec()))] )] ); - let trie_backend = storage.as_trie_backend().unwrap(); + let trie_backend = storage.as_proof_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index fdd363a97c8cd..e9e03ec2b7011 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -71,7 +71,7 @@ pub use overlayed_changes::{ StorageCollection, ChildStorageCollection, }; pub use proving_backend::{ProvingBackend, ProvingBackendRecorder, - create_proof_check_backend, create_flat_proof_check_backend}; + create_proof_check_backend, create_full_proof_check_backend}; pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; @@ -79,7 +79,7 @@ pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use sp_core::traits::CloneableSpawn; -use backend::{Backend, ProofRegBackend, ProofCheckBackend}; +use backend::{Backend, ProofRegBackend, ProofCheckBackend, ProofRegFor}; type CallResult = Result, E>; @@ -95,6 +95,10 @@ pub type ChangesTrieTransaction = ( /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H, sp_trie::SimpleProof>; +/// Trie backend with in-memory storage and choice of proof. +/// TODO EMCH consider renaming to ProofCheckBackend +pub type InMemoryBackendWithProof = TrieBackend, H, P>; + /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { @@ -461,9 +465,8 @@ pub fn prove_execution( spawn_handle: Box, method: &str, call_data: &[u8], - kind: StorageProofKind, runtime_code: &RuntimeCode, -) -> Result<(Vec, B::StorageProof), Box> +) -> Result<(Vec, ProofRegFor), Box> where B: Backend, H: Hasher, @@ -480,7 +483,6 @@ where spawn_handle, method, call_data, - kind, runtime_code, ) } @@ -501,9 +503,8 @@ pub fn prove_execution_on_proof_backend( spawn_handle: Box, method: &str, call_data: &[u8], - kind: StorageProofKind, runtime_code: &RuntimeCode, -) -> Result<(Vec, P::StorageProof), Box> +) -> Result<(Vec, ProofRegFor), Box> where P: ProofRegBackend, H: Hasher, @@ -529,7 +530,7 @@ where always_wasm(), None, )?; - let proof = sm.backend.extract_proof(); + let proof = sm.backend.extract_proof()?; Ok((result.into_encoded(), proof)) } @@ -604,8 +605,7 @@ where pub fn prove_read( backend: B, keys: I, - kind: StorageProofKind, -) -> Result> +) -> Result, Box> where B: Backend, H: Hasher, @@ -617,7 +617,7 @@ where .ok_or_else( || Box::new(ExecutionError::UnableToGenerateProof) as Box )?; - prove_read_on_proof_backend(&proof_backend, keys, kind) + prove_read_on_proof_backend(&proof_backend, keys) } /// Generate child storage read proof. @@ -625,8 +625,7 @@ pub fn prove_child_read( backend: B, child_info: &ChildInfo, keys: I, - kind: StorageProofKind, -) -> Result> +) -> Result, Box> where B: Backend, H: Hasher, @@ -636,15 +635,14 @@ where { let proving_backend = backend.as_proof_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_proof_backend(&proving_backend, child_info, keys, kind) + prove_child_read_on_proof_backend(&proving_backend, child_info, keys) } /// Generate storage read proof on pre-created trie backend. pub fn prove_read_on_proof_backend( proving_backend: &P, keys: I, - kind: StorageProofKind, -) -> Result> +) -> Result, Box> where P: ProofRegBackend, H: Hasher, @@ -657,7 +655,7 @@ where .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + proving_backend.extract_proof() } /// Generate storage read proof on pre-created trie backend. @@ -665,8 +663,7 @@ pub fn prove_child_read_on_proof_backend( proving_backend: &P, child_info: &ChildInfo, keys: I, - kind: StorageProofKind, -) -> Result> +) -> Result, Box> where P: ProofRegBackend, H: Hasher, @@ -679,7 +676,7 @@ where .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + proving_backend.extract_proof() } /// Check storage read proof, generated by `prove_read` call. @@ -768,12 +765,9 @@ mod tests { use super::changes_trie::Configuration as ChangesTrieConfig; use sp_core::{map, traits::{Externalities, RuntimeCode}}; use sp_runtime::traits::BlakeTwo256; - use sp_trie::Layout; + use sp_trie::{Layout, SimpleProof, BackendStorageProof}; - type ProvingBackend = super::TrieBackend< - MemoryDB, - Layout, - >; + type CompactProof = sp_trie::CompactProof>; #[derive(Clone)] struct DummyCodeExecutor { @@ -944,13 +938,14 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { - prove_execution_and_proof_check_works_inner(StorageProofKind::Flat); - prove_execution_and_proof_check_works_inner(StorageProofKind::Full); + prove_execution_and_proof_check_works_inner::(); + prove_execution_and_proof_check_works_inner::(); + /* TODO EMCH consider testing oven full backend to. prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); + */ } - - fn prove_execution_and_proof_check_works_inner(kind: StorageProofKind) { + fn prove_execution_and_proof_check_works_inner>() { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -959,7 +954,7 @@ mod tests { }; // fetch execution proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _>( remote_backend, @@ -968,14 +963,13 @@ mod tests { sp_core::tasks::executor(), "test", &[], - kind, &RuntimeCode::empty(), ).unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::, BlakeTwo256, u64, _>( remote_root, - remote_proof, + remote_proof.into(), &mut Default::default(), &executor, sp_core::tasks::executor(), @@ -998,7 +992,7 @@ mod tests { b"bbb".to_vec() => b"3".to_vec() ]; let mut state = InMemoryBackend::::from(initial); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); @@ -1014,7 +1008,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1042,7 +1036,7 @@ mod tests { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; let mut state = new_in_mem::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1050,7 +1044,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1089,7 +1083,7 @@ mod tests { ]; let key = b"key".to_vec(); let mut state = new_in_mem::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1098,7 +1092,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1115,7 +1109,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1134,7 +1128,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1154,7 +1148,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); let mut state = new_in_mem::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_proof_backend().unwrap(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut overlay = OverlayedChanges::default(); @@ -1164,7 +1158,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1179,7 +1173,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1204,7 +1198,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1230,7 +1224,7 @@ mod tests { &mut overlay, &mut offchain_overlay, &mut cache, - backend, + &backend, changes_trie::disabled_state::<_, u64>(), None, ); @@ -1243,28 +1237,30 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - prove_read_and_proof_check_works_inner(StorageProofKind::Full); - prove_read_and_proof_check_works_inner(StorageProofKind::Flat); - prove_read_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); - prove_read_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); + prove_read_and_proof_check_works_inner::(); + prove_read_and_proof_check_works_inner::; + /* TODO EMCH consider full storage test and value skip test */ } - - fn prove_read_and_proof_check_works_inner(kind: StorageProofKind) { + fn prove_read_and_proof_check_works_inner

() + where + P: BackendStorageProof, + P::StorageProofReg: Clone, + { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_read(remote_backend, &[b"value2"], kind).unwrap(); + let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::( + let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::( + let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), &[&[0xff]], ).is_ok(); // check that results are correct @@ -1274,23 +1270,22 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, child_info, &[b"value3"], - kind, ).unwrap(); - let local_result1 = read_child_proof_check::( + let local_result1 = read_child_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), child_info, &[b"value3"], ).unwrap(); - let local_result2 = read_child_proof_check::( + let local_result2 = read_child_proof_check::, BlakeTwo256, _>( remote_root, - remote_proof.clone(), + remote_proof.clone().into(), child_info, &[b"value2"], ).unwrap(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 62fd2a4d8fd9c..a74648e91bc19 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -26,6 +26,7 @@ use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, record_all_keys, StorageProofKind, TrieNodesStorageProof as StorageProof, ProofInputKind, ProofInput, RecordMapTrieNodes, RecordBackend, RegStorageProof, ProofFlatDefault, BackendStorageProof, + FullBackendStorageProof, }; pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; @@ -35,6 +36,9 @@ use crate::backend::{Backend, ProofRegStateFor, ProofRegBackend}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildrenMap}; use std::marker::PhantomData; +/// Clonable recorder backend with inner mutability. +type SyncRecordBackendFor = Arc>>; + /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { pub(crate) backend: &'a TrieBackendEssence, @@ -121,10 +125,10 @@ pub struct ProvingBackend< S: TrieBackendStorage, H: Hasher, P: BackendStorageProof, - > ( - pub TrieBackend>, H, P>, - PhantomData

, -); +> { + trie_backend: TrieBackend>, H, P>, + _ph: PhantomData

, +} /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { @@ -148,15 +152,31 @@ impl<'a, S, H, P> ProvingBackend<&'a S, H, P> fn new_with_recorder( backend: &'a TrieBackend, - proof_recorder: RecordBackendFor, + proof_recorder: SyncRecordBackendFor, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder, + _ph: PhantomData, }; - ProvingBackend(TrieBackend::new(recorder, root), PhantomData) + match P::StorageProofReg::INPUT_KIND { + ProofInputKind::ChildTrieRoots => { + ProvingBackend { + trie_backend: TrieBackend::new_with_roots(recorder, root), + _ph: PhantomData + } + }, + ProofInputKind::None + | ProofInputKind::QueryPlan + | ProofInputKind::QueryPlanWithValues => { + ProvingBackend { + trie_backend: TrieBackend::new(recorder, root), + _ph: PhantomData, + } + }, + } } } @@ -171,19 +191,39 @@ impl ProvingBackend pub fn from_backend_with_recorder( backend: S, root: H::Out, - proof_recorder: RecordBackendFor, + proof_recorder: SyncRecordBackendFor, ) -> Self { let recorder = ProofRecorderBackend { backend, proof_recorder, + _ph: PhantomData, }; - ProvingBackend(TrieBackend::new(recorder, root)) + match P::StorageProofReg::INPUT_KIND { + ProofInputKind::ChildTrieRoots => { + ProvingBackend { + trie_backend: TrieBackend::new_with_roots(recorder, root), + _ph: PhantomData + } + }, + ProofInputKind::None + | ProofInputKind::QueryPlan + | ProofInputKind::QueryPlanWithValues => { + ProvingBackend { + trie_backend: TrieBackend::new(recorder, root), + _ph: PhantomData, + } + }, + } } - /// Extract current recording state. + /// Extract current recording state. /// This is sharing a rc over a sync reference. - pub fn extract_recorder(&self) -> RecordBackendFor { - self.0.backend_storage().proof_recorder.clone() + /// TODO EMCH seems unused + pub fn extract_recorder(&self) -> (SyncRecordBackendFor, ProofInput) { + ( + self.trie_backend.backend_storage().proof_recorder.clone(), + self.trie_backend.extract_registered_roots(), + ) } } @@ -193,11 +233,11 @@ impl, H: Hasher, R: RecordBackend> TrieBackendStorag type Overlay = S::Overlay; fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.get(child_info, key) { + if let Some(v) = self.proof_recorder.read().get(child_info, key) { return Ok(v.clone()); } let backend_value = self.backend.get(child_info, key, prefix)?; - self.proof_recorder.record(child_info, key, backend_value.clone()); + self.proof_recorder.write().record(child_info.clone(), key.clone(), backend_value.clone()); Ok(backend_value) } } @@ -217,13 +257,14 @@ impl ProofRegBackend for ProvingBackend H::Out: Ord + Codec, P: BackendStorageProof, { - type State = as RegStorageProof>::RecordBackend; + type State = SyncRecordBackendFor; - fn extract_proof(&self, input: ProofInput) -> >::StorageProofReg { - >::StorageProofReg::::extract_proof( - &self.0.essence().backend_storage().proof_recorder, + fn extract_proof(&self) -> Result<>::StorageProofReg, Box> { + let input = self.trie_backend.extract_registered_roots(); + >::StorageProofReg::extract_proof( + &self.trie_backend.essence().backend_storage().proof_recorder.read(), input, - ) + ).map_err(|e| Box::new(e) as Box) } } @@ -326,33 +367,38 @@ impl Backend for ProvingBackend self.trie_backend.usage_info() } - fn as_proof_backend(self) -> Option { - Some(self) - } - fn from_reg_state(self, previous_recorder: ProofRegStateFor) -> Option { - let root = self.0.essence().root().clone(); - let storage = self.0.into_storage(); + let root = self.trie_backend.essence().root().clone(); + let storage = self.trie_backend.into_storage(); let current_recorder = storage.proof_recorder; let backend = storage.backend; - if current_recorder.merge(previous_recorder) { - ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder) + if std::sync::Arc::ptr_eq(¤t_recorder, &previous_recorder) { + Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) } else { - None + let previous_recorder = match Arc::try_unwrap(previous_recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), + }; + if current_recorder.write().merge(previous_recorder) { + Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) + } else { + None + } } } } /// Create flat proof check backend. -pub fn create_flat_proof_check_backend( +pub fn create_proof_check_backend( root: H::Out, proof: P, ) -> Result, H, P>, Box> where H: Hasher, H::Out: Codec, + P: BackendStorageProof, { - let db = proof.into_partial_flat_db() + let db = proof.into_partial_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.contains(&root, EMPTY_PREFIX) { Ok(TrieBackend::new(db, root)) @@ -362,16 +408,17 @@ where } /// Create proof check backend. -pub fn create_proof_check_backend( +pub fn create_full_proof_check_backend( root: H::Out, proof: P, ) -> Result>, H, P>, Box> where H: Hasher, H::Out: Codec, + P: FullBackendStorageProof, { use std::ops::Deref; - let db = proof.into_partial_db() + let db = proof.into_partial_full_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; if db.deref().get(&ChildInfoProof::top_trie()) .map(|db| db.contains(&root, EMPTY_PREFIX)) @@ -384,42 +431,40 @@ where #[cfg(test)] mod tests { - use crate::InMemoryBackend; - use crate::trie_backend::tests::test_trie; + use crate::InMemoryBackendWithProof; + use crate::trie_backend::tests::test_trie_proof; use super::*; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; + use sp_trie::{SimpleProof, StorageProof as _}; use sp_runtime::traits::BlakeTwo256; - fn test_proving<'a>( - trie_backend: &'a TrieBackend, BlakeTwo256>, - kind: StorageProofKind, - ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { - ProvingBackend::new(trie_backend, kind) + type CompactProof = sp_trie::CompactProof>; + + fn test_proving>( + trie_backend: &TrieBackend, BlakeTwo256, P>, + ) -> ProvingBackend<&PrefixedMemoryDB, BlakeTwo256, P> { + ProvingBackend::new(trie_backend) } #[test] fn proof_is_empty_until_value_is_read() { - let trie_backend = test_trie(); - let kind = StorageProofKind::Flat; - assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); - let kind = StorageProofKind::Full; - assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); - let kind = StorageProofKind::TrieSkipHashesFull; - assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); - let kind = StorageProofKind::TrieSkipHashes; - assert!(test_proving(&trie_backend, kind).extract_proof().unwrap().is_empty()); + proof_is_empty_until_value_is_read_inner::(); + proof_is_empty_until_value_is_read_inner::(); + } + fn proof_is_empty_until_value_is_read_inner>() { + let trie_backend = test_trie_proof::

(); + assert!(test_proving(&trie_backend).extract_proof().unwrap().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { - let trie_backend = test_trie(); - let kind = StorageProofKind::Flat; - let mut backend = test_proving(&trie_backend, kind); - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().unwrap().is_empty()); - let kind = StorageProofKind::Full; - let mut backend = test_proving(&trie_backend, kind); + proof_is_non_empty_after_value_is_read_inner::(); + proof_is_non_empty_after_value_is_read_inner::(); + } + fn proof_is_non_empty_after_value_is_read_inner>() { + let trie_backend = test_trie_proof::

(); + let mut backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().unwrap().is_empty()); } @@ -427,60 +472,67 @@ mod tests { #[test] fn proof_is_invalid_when_does_not_contains_root() { use sp_core::H256; - let result = create_proof_check_backend::( + let result = create_proof_check_backend::( + H256::from_low_u64_be(1), + SimpleProof::empty() + ); + assert!(result.is_err()); + let result = create_proof_check_backend::( H256::from_low_u64_be(1), - StorageProof::empty() + CompactProof::empty() ); assert!(result.is_err()); } #[test] fn passes_through_backend_calls() { - let test = |proof_kind| { - let trie_backend = test_trie(); - let proving_backend = test_proving(&trie_backend, proof_kind); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - }; - test(StorageProofKind::Flat); - test(StorageProofKind::Full); + passes_through_backend_calls_inner::(); + passes_through_backend_calls_inner::(); + } + fn passes_through_backend_calls_inner>() { + let trie_backend = test_trie_proof::

(); + let proving_backend = test_proving(&trie_backend); + assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + assert_eq!(trie_root, proving_root); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } #[test] fn proof_recorded_and_checked() { + proof_recorded_and_checked_inner::(); + proof_recorded_and_checked_inner::(); + } + fn proof_recorded_and_checked_inner>() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackend::::default(); + let in_memory = InMemoryBackendWithProof::::default(); let mut in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - let trie = in_memory.as_trie_backend().unwrap(); + let trie = &in_memory; let trie_root = trie.storage_root(::std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let test = |kind: StorageProofKind| { - let mut proving = ProvingBackend::new(trie, kind); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + let mut proving = in_memory.as_proof_backend().unwrap(); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - let proof = proving.extract_proof().unwrap(); + let proof = proving.extract_proof().unwrap(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - }; - test(StorageProofKind::Flat); - test(StorageProofKind::Full); - test(StorageProofKind::TrieSkipHashesFull); - test(StorageProofKind::TrieSkipHashes); + let proof_check = create_proof_check_backend::(in_memory_root.into(), proof.into()).unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); } #[test] fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner::(); + proof_recorded_and_checked_with_child_inner::(); + } + fn proof_recorded_and_checked_with_child_inner>() { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -492,7 +544,7 @@ mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = InMemoryBackend::::default(); + let in_memory = InMemoryBackendWithProof::::default(); let mut in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( @@ -512,7 +564,7 @@ mod tests { vec![i] )); - let trie = in_memory.as_trie_backend().unwrap(); + let trie = &in_memory; let trie_root = trie.storage_root(::std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!( @@ -520,51 +572,33 @@ mod tests { vec![i] )); - let test = |kind: StorageProofKind| { - let mut proving = ProvingBackend::new(trie, kind); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof().unwrap(); - - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert!(proof_check.storage(&[0]).is_err()); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - // note that it is include in root because proof close - assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); - assert_eq!(proof_check.storage(&[64]).unwrap(), None); - - let mut proving = ProvingBackend::new(trie, kind); - assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - - let proof = proving.extract_proof().unwrap(); - if kind.use_full_partial_db().unwrap() { - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - - assert_eq!( - proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), - vec![64] - ); - } else { - let proof_check = create_flat_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - - assert_eq!( - proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), - vec![64] - ); - } - }; - test(StorageProofKind::Flat); - test(StorageProofKind::Full); - test(StorageProofKind::TrieSkipHashesFull); - test(StorageProofKind::TrieSkipHashes); + let mut proving = in_memory.clone().as_proof_backend().unwrap(); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = create_proof_check_backend::( + in_memory_root.into(), + proof.into(), + ).unwrap(); + assert!(proof_check.storage(&[0]).is_err()); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + // note that it is include in root because proof close + assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); + assert_eq!(proof_check.storage(&[64]).unwrap(), None); + + let mut proving = in_memory.as_proof_backend().unwrap(); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); + + let proof = proving.extract_proof().unwrap(); + let proof_check = create_proof_check_backend::( + in_memory_root.into(), + proof.into(), + ).unwrap(); + + assert_eq!( + proof_check.child_storage(&child_info_1, &[64]).unwrap().unwrap(), + vec![64] + ); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 9081df1d3bd01..becf080b98da0 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -310,15 +310,8 @@ impl ProofCheckBackend for TrieBackend, H, P> root: H::Out, proof: Self::StorageProof, ) -> Result> { - use hash_db::HashDB; - let mut mem_db = MemoryDB::new(); - for node in proof.trie_backend_nodes()? { - let hash = H::hash(node.as_ref(); - mem_db.emplace(key, hash_db::EMPTY_PREFIX, node); - } - if !mem_db.contains(key, hash_db::EMPTY_PREFIX) { - return Err("No matching root for proof".into()); - } + let mem_db = proof.into_partial_db() + .map_err(|e| Box::new(e) as Box)?; Ok(TrieBackend::new(mem_db, root)) } } @@ -328,7 +321,7 @@ pub mod tests { use std::{collections::HashSet, iter}; use sp_core::H256; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut, SimpleProof}; use sp_runtime::traits::BlakeTwo256; use super::*; @@ -362,7 +355,13 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { + pub(crate) fn test_trie_proof>() + -> TrieBackend, BlakeTwo256, P> { + let (mdb, root) = test_db(); + TrieBackend::new(mdb, root) + } + + pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256, SimpleProof> { let (mdb, root) = test_db(); TrieBackend::new(mdb, root) } @@ -393,7 +392,7 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, BlakeTwo256>::new( + assert!(TrieBackend::, BlakeTwo256, SimpleProof>::new( PrefixedMemoryDB::default(), Default::default(), ).pairs().is_empty()); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index f83e5ace54bbb..23066c13af19f 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -35,7 +35,8 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, - Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, + compact::Flat as CompactProof, + Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, FullBackendStorageProof, BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; /// Various re-exports from the `trie-db` crate. @@ -53,7 +54,6 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// TODO EMCH check if can be use at other place (rg 'as BackendS') pub type RecordBackendFor = <

>::StorageProofReg as RegStorageProof>::RecordBackend; - #[derive(Default)] /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); @@ -339,17 +339,16 @@ fn unpack_proof(input: &[Vec]) Ok((root.0, memory_db.drain().into_iter().map(|(_k, (v, _rc))| v).collect())) } -/* TODO remove ?? /// Unpack packed proof. /// This is faster than `unpack_proof`, and should be prefered is encoded node /// will be use in a new memory db. -fn unpack_proof_to_memdb(input: &[Vec]) +fn unpack_proof_to_memdb(input: &[Vec]) -> Result<(TrieHash, MemoryDB::<::Hash>), Box>> { let mut memory_db = MemoryDB::<::Hash>::default(); let root = trie_db::decode_compact::(&mut memory_db, input)?; Ok((root.0, memory_db)) } -*/ + /// Read a value from the child trie. pub fn read_child_trie_value( keyspace: &[u8], diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 5251558eb4fef..561ea13d69877 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -212,12 +212,27 @@ impl RegStorageProof for FullForMerge } } -impl BackendStorageProof for Flat +impl BackendStorageProof for Flat where T: TrieLayout, TrieHash: Codec, { type StorageProofReg = FullForMerge; + + fn into_partial_db(self) -> Result> { + let mut db = MemoryDB::default(); + let mut db_empty = true; + for proof in self.0.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + Ok(db) + } } impl BackendStorageProof for Full @@ -226,6 +241,37 @@ impl BackendStorageProof for Full TrieHash: Codec, { type StorageProofReg = FullForMerge; + + fn into_partial_db(self) -> Result> { + let mut db = MemoryDB::default(); + let mut db_empty = true; + for (_child_info, proof) in self.0.into_iter() { + let (_root, child_db) = crate::unpack_proof_to_memdb::(proof.as_slice())?; + if db_empty { + db_empty = false; + db = child_db; + } else { + db.consolidate(child_db); + } + } + Ok(db) + } +} + +impl FullBackendStorageProof for Full + where + T: TrieLayout, + TrieHash: Codec, +{ + fn into_partial_full_db(self) -> Result>> { + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in self.0 { + // Note that this does check all hashes by using a trie backend + let (_root, db) = crate::unpack_proof_to_memdb::(proof.as_slice())?; + result.insert(child_info, db); + } + Ok(result) + } } // Note that this implementation is only possible diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index f375bc3aa54b7..7b988e0281e57 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -16,6 +16,7 @@ use hash_db::{Hasher, HashDBRef}; use crate::Layout; use sp_storage::{ChildInfo, ChildInfoProof, ChildrenMap}; use trie_db::DBValue; +use crate::MemoryDB; pub mod simple; pub mod compact; @@ -44,6 +45,9 @@ pub enum Error { Trie(String), } +#[cfg(feature = "std")] +impl std::error::Error for Error { } + #[cfg(not(feature = "std"))] #[derive(PartialEq, Eq, Clone, Debug)] pub enum Error { @@ -247,8 +251,15 @@ pub trait BackendStorageProof: Codec + StorageProof { + MergeableStorageProof + Into; // TODO EMCH consider removing this conv or make it a try into?? - /// To check proof over a trie backend. - fn trie_backend_nodes(self) -> Result>>; + /// Extract a flat trie db from the proof. + /// Fail on invalid proof content. + fn into_partial_db(self) -> Result>; +} + +pub trait FullBackendStorageProof: BackendStorageProof { + /// Extract a trie db with children info from the proof. + /// Fail on invalid proof content. + fn into_partial_full_db(self) -> Result>>; } /// Trait for proofs that can use to create a partial trie backend. diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 76bffdfae7f6d..884060cc98b28 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -295,6 +295,15 @@ impl BackendStorageProof for MultipleStorageProof D: DefaultKind, { type StorageProofReg = super::compact::FullForMerge; + + fn into_partial_db(self) -> Result> { + match self { + MultipleStorageProof::Flat(p) => p.into_partial_db(), + MultipleStorageProof::TrieSkipHashes(p, _) => p.into_partial_db(), + _ => panic!("misused multiproof"), // TODO EMCH this is a tradeoff for producing proof without checking but the corresponding variant should be removed. + } + } + } impl TryInto for MultipleStorageProof { @@ -374,56 +383,6 @@ impl Into> for super::comp /* - /// Can also fail on invalid compact proof. - pub fn into_partial_db(self) -> Result>> - where - H: Hasher, - H::Out: Decode, - { - let mut result = ChildrenProofMap::default(); - match self { - s@MultipleStorageProof::Flat(..) => { - let db = s.into_partial_flat_db::()?; - result.insert(ChildInfoProof::top_trie(), db); - }, - MultipleStorageProof::Full(children) => { - for (child_info, proof) in children.into_iter() { - let mut db = MemoryDB::default(); - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - result.insert(child_info, db); - } - }, - MultipleStorageProof::TrieSkipHashesForMerge(children) => { - for (child_info, (proof, _root)) in children.into_iter() { - let mut db = MemoryDB::default(); - for (key, value) in proof.0.into_iter() { - let key = Decode::decode(&mut &key[..])?; - db.emplace(key, EMPTY_PREFIX, value); - } - result.insert(child_info, db); - } - }, - MultipleStorageProof::TrieSkipHashesFull(children) => { - for (child_info, proof) in children.into_iter() { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - result.insert(child_info, db); - } - }, - s@MultipleStorageProof::TrieSkipHashes(..) => { - let db = s.into_partial_flat_db::()?; - result.insert(ChildInfoProof::top_trie(), db); - }, - MultipleStorageProof::KnownQueryPlanAndValues(_children) => { - return Err(no_partial_db_support()); - }, - } - Ok(result) - } - /// Create in-memory storage of proof check backend. /// /// Behave similarily to `into_partial_db`. diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index 98e2e532dfb8b..ccc77c134df0d 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -106,10 +106,46 @@ impl RegStorageProof for Full { impl BackendStorageProof for Flat { type StorageProofReg = Self; + + fn into_partial_db(self) -> Result> { + use hash_db::HashDB; + let mut db = MemoryDB::default(); + for item in self.0.into_iter() { + db.insert(hash_db::EMPTY_PREFIX, &item[..]); + } + Ok(db) + } } impl BackendStorageProof for Full { type StorageProofReg = Self; + + fn into_partial_db(self) -> Result> { + use hash_db::HashDB; + let mut db = MemoryDB::default(); + for (_child_info, proof) in self.0.into_iter() { + for item in proof.into_iter() { + db.insert(hash_db::EMPTY_PREFIX, &item); + } + } + + Ok(db) + } +} + +impl FullBackendStorageProof for Full { + fn into_partial_full_db(self) -> Result>> { + use hash_db::HashDB; + let mut result = ChildrenProofMap::default(); + for (child_info, proof) in self.0.into_iter() { + let mut db = MemoryDB::default(); + for item in proof.into_iter() { + db.insert(hash_db::EMPTY_PREFIX, &item); + } + result.insert(child_info, db); + } + Ok(result) + } } // Note that this implementation is only possible From d5279ddb2b5069f4109707dc2f3fff4f6da9e32e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 9 Jun 2020 18:33:30 +0200 Subject: [PATCH 149/185] Test on full backend --- primitives/state-machine/src/lib.rs | 50 ++++++++++++++++++-- primitives/state-machine/src/trie_backend.rs | 17 ++++++- primitives/trie/src/lib.rs | 2 +- 3 files changed, 62 insertions(+), 7 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index e9e03ec2b7011..8a092fb21d500 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -99,6 +99,11 @@ pub type InMemoryBackend = TrieBackend, H, sp_trie::SimpleProof>; /// TODO EMCH consider renaming to ProofCheckBackend pub type InMemoryBackendWithProof = TrieBackend, H, P>; +/// Trie backend with in-memory storage and choice of proof running over +/// separate child backends. +/// TODO EMCH consider renaming to ProofCheckBackend +pub type InMemoryBackendWithFullProof = TrieBackend>, H, P>; + /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { @@ -765,9 +770,10 @@ mod tests { use super::changes_trie::Configuration as ChangesTrieConfig; use sp_core::{map, traits::{Externalities, RuntimeCode}}; use sp_runtime::traits::BlakeTwo256; - use sp_trie::{Layout, SimpleProof, BackendStorageProof}; + use sp_trie::{Layout, SimpleProof, SimpleFullProof, BackendStorageProof, FullBackendStorageProof}; type CompactProof = sp_trie::CompactProof>; + type CompactFullProof = sp_trie::CompactFullProof>; #[derive(Clone)] struct DummyCodeExecutor { @@ -940,10 +946,8 @@ mod tests { fn prove_execution_and_proof_check_works() { prove_execution_and_proof_check_works_inner::(); prove_execution_and_proof_check_works_inner::(); - /* TODO EMCH consider testing oven full backend to. - prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashesFull); - prove_execution_and_proof_check_works_inner(StorageProofKind::TrieSkipHashes); - */ + prove_execution_and_proof_check_works_inner::(); + prove_execution_and_proof_check_works_inner::(); } fn prove_execution_and_proof_check_works_inner>() { let executor = DummyCodeExecutor { @@ -1239,6 +1243,8 @@ mod tests { fn prove_read_and_proof_check_works() { prove_read_and_proof_check_works_inner::(); prove_read_and_proof_check_works_inner::; + prove_read_and_proof_check_works_inner::(); + prove_read_and_proof_check_works_inner::; /* TODO EMCH consider full storage test and value skip test */ } fn prove_read_and_proof_check_works_inner

() @@ -1298,6 +1304,40 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } + fn prove_read_and_proof_on_fullbackend_works

() { + prove_read_and_proof_on_fullbackend_works_inner::(); + prove_read_and_proof_on_fullbackend_works_inner::; + } + fn prove_read_and_proof_on_fullbackend_works_inner

() + where + P: FullBackendStorageProof, + P::StorageProofReg: Clone, + { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; + // fetch read proof from 'remote' full node + let remote_backend = trie_backend::tests::test_trie_proof::

(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); + // check proof locally + let local_result1 = read_proof_check::, BlakeTwo256, _>( + remote_root, + remote_proof.clone().into(), + &[b"value2"], + ).unwrap(); + let local_result2 = read_proof_check::, BlakeTwo256, _>( + remote_root, + remote_proof.clone().into(), + &[&[0xff]], + ).is_ok(); + // check that results are correct + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"value2".to_vec(), Some(vec![24]))], + ); + assert_eq!(local_result2, false); + } + #[test] fn child_storage_uuid() { diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index becf080b98da0..2184ece57b4af 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,7 +20,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, - ChildrenProofMap, ProofInput, BackendStorageProof}; + ChildrenProofMap, ProofInput, BackendStorageProof, FullBackendStorageProof}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_trie::RegStorageProof; use crate::backend::{ProofRegStateFor}; @@ -315,6 +315,21 @@ impl ProofCheckBackend for TrieBackend, H, P> Ok(TrieBackend::new(mem_db, root)) } } + +impl ProofCheckBackend for TrieBackend>, H, P> + where + H::Out: Ord + Codec, + P: FullBackendStorageProof, +{ + fn create_proof_check_backend( + root: H::Out, + proof: Self::StorageProof, + ) -> Result> { + let mem_db = proof.into_partial_full_db() + .map_err(|e| Box::new(e) as Box)?; + Ok(TrieBackend::new(mem_db, root)) + } +} #[cfg(test)] pub mod tests { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 23066c13af19f..8706fe79ecb64 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -35,7 +35,7 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, - compact::Flat as CompactProof, + compact::Flat as CompactProof, simple::Full as SimpleFullProof, compact::Full as CompactFullProof, Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, FullBackendStorageProof, BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; From cc915c1c7427d27b5f11d5a1c9da3b902a09065a Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 9 Jun 2020 20:33:17 +0200 Subject: [PATCH 150/185] basic knownstorageandvalue test. --- primitives/state-machine/src/backend.rs | 6 +- primitives/state-machine/src/lib.rs | 107 +++++++++++++++++- .../state-machine/src/proving_backend.rs | 18 +-- primitives/trie/src/lib.rs | 1 + primitives/trie/src/storage_proof/mod.rs | 6 +- 5 files changed, 119 insertions(+), 19 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 941cf13708cdd..6022c43daf69e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -24,7 +24,7 @@ use crate::{ trie_backend_essence::TrieBackendStorage, UsageInfo, StorageKey, StorageValue, StorageCollection, }; -use sp_trie::{ProofInput, BackendStorageProof}; +use sp_trie::{ProofInput, BackendStorageProof, RecordBackendFor}; /// Access the state of the proof backend of a backend. pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; @@ -236,6 +236,7 @@ pub trait Backend: Sized + std::fmt::Debug { } /// Backend that can be instantiated from its state. +/// TODO EMCH does not seem use at this point pub trait InstantiableStateBackend: Backend where H: Hasher, @@ -269,6 +270,9 @@ pub trait ProofRegBackend: crate::backend::Backend /// Extract proof after running operation to prove. fn extract_proof(&self) -> Result<>::StorageProofReg, Box>; + + /// Get current recording state. + fn extract_recorder(self) -> (RecordBackendFor, ProofInput); } /// Backend used to produce proof. diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8a092fb21d500..08bce544c2dac 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -625,6 +625,32 @@ where prove_read_on_proof_backend(&proof_backend, keys) } +/// Generate storage read proof for query plan verification. +pub fn prove_read_for_query_plan_check( + backend: B, + keys: I, +) -> Result<(sp_trie::RecordBackendFor, ProofInput), Box> +where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, +{ + let proof_backend = backend.as_proof_backend() + .ok_or_else( + || Box::new(ExecutionError::UnableToGenerateProof) as Box + )?; + for key in keys.into_iter() { + proof_backend + .storage(key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + + Ok(proof_backend.extract_recorder()) +} + + /// Generate child storage read proof. pub fn prove_child_read( backend: B, @@ -774,6 +800,7 @@ mod tests { type CompactProof = sp_trie::CompactProof>; type CompactFullProof = sp_trie::CompactFullProof>; + type QueryPlanProof = sp_trie::QueryPlanProof>; #[derive(Clone)] struct DummyCodeExecutor { @@ -1239,13 +1266,75 @@ mod tests { } } + #[test] + fn prove_read_and_proof_check_works_query_plan() { + use sp_trie::{CheckableStorageProof, ProofInput}; + + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; + // fetch read proof from 'remote' full node. + // Using compact proof to get record backend and proofs. + let remote_backend = trie_backend::tests::test_trie_proof::(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); + let mut root_map = ChildrenProofMap::default(); + root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); + assert!(ProofInput::ChildTrieRoots(root_map) == root_input); + + // TODO EMCH could do a primitive function to avoid building the input manually. + let mut query_plan = ChildrenProofMap::default(); + query_plan.insert( + ChildInfo::top_trie().proof_info(), + (remote_root.encode(), vec![(b"value2".to_vec(), Some(vec![24u8]))]), + ); + let input_check = ProofInput::QueryPlanWithValues(query_plan); + let mut query_plan = ChildrenProofMap::default(); + query_plan.insert( + ChildInfo::top_trie().proof_info(), + (remote_root.encode(), vec![b"value2".to_vec()]), + ); + let input = ProofInput::QueryPlan(query_plan); + let remote_proof = >::extract_proof(&recorder, input).unwrap(); + remote_proof.verify(&input_check); + /* + // on child trie + let remote_backend = trie_backend::tests::test_trie_proof::(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_child_read( + remote_backend, + child_info, + &[b"value3"], + ).unwrap(); + let local_result1 = read_child_proof_check::, BlakeTwo256, _>( + remote_root, + remote_proof.clone().into(), + child_info, + &[b"value3"], + ).unwrap(); + let local_result2 = read_child_proof_check::, BlakeTwo256, _>( + remote_root, + remote_proof.clone().into(), + child_info, + &[b"value2"], + ).unwrap(); + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"value3".to_vec(), Some(vec![142]))], + ); + assert_eq!( + local_result2.into_iter().collect::>(), + vec![(b"value2".to_vec(), None)], + ); +*/ + // TODO test with no child trie ref + } + #[test] fn prove_read_and_proof_check_works() { prove_read_and_proof_check_works_inner::(); - prove_read_and_proof_check_works_inner::; + prove_read_and_proof_check_works_inner::(); prove_read_and_proof_check_works_inner::(); - prove_read_and_proof_check_works_inner::; - /* TODO EMCH consider full storage test and value skip test */ + prove_read_and_proof_check_works_inner::(); } fn prove_read_and_proof_check_works_inner

() where @@ -1258,7 +1347,8 @@ mod tests { let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); - // check proof locally + + // check proof locally let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), @@ -1269,12 +1359,13 @@ mod tests { remote_proof.clone().into(), &[&[0xff]], ).is_ok(); - // check that results are correct + // check that results are correct assert_eq!( local_result1.into_iter().collect::>(), vec![(b"value2".to_vec(), Some(vec![24]))], ); assert_eq!(local_result2, false); + // on child trie let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1304,9 +1395,13 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } + fn prove_read_and_proof_on_fullbackend_works

() { + // more proof could be tested, but at this point the full backend + // is just here to assert that we are able to test child trie content + // and are able to switch backend for checking proof. prove_read_and_proof_on_fullbackend_works_inner::(); - prove_read_and_proof_on_fullbackend_works_inner::; + prove_read_and_proof_on_fullbackend_works_inner::(); } fn prove_read_and_proof_on_fullbackend_works_inner

() where diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index a74648e91bc19..2d85421aea553 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -216,15 +216,6 @@ impl ProvingBackend } } - /// Extract current recording state. - /// This is sharing a rc over a sync reference. - /// TODO EMCH seems unused - pub fn extract_recorder(&self) -> (SyncRecordBackendFor, ProofInput) { - ( - self.trie_backend.backend_storage().proof_recorder.clone(), - self.trie_backend.extract_registered_roots(), - ) - } } impl, H: Hasher, R: RecordBackend> TrieBackendStorage @@ -266,6 +257,15 @@ impl ProofRegBackend for ProvingBackend input, ).map_err(|e| Box::new(e) as Box) } + + fn extract_recorder(self) -> (RecordBackendFor, ProofInput) { + let input = self.trie_backend.extract_registered_roots(); + let recorder = match Arc::try_unwrap(self.trie_backend.into_storage().proof_recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), + }; + (recorder, input) + } } impl Backend for ProvingBackend diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8706fe79ecb64..181fc04c23fd3 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,6 +36,7 @@ pub use trie_stream::TrieStream; pub use node_codec::NodeCodec; pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, compact::Flat as CompactProof, simple::Full as SimpleFullProof, compact::Full as CompactFullProof, + query_plan::KnownQueryPlanAndValues as QueryPlanProof, CheckableStorageProof, Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, FullBackendStorageProof, BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 7b988e0281e57..ac65f31a6efe0 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -115,7 +115,7 @@ const fn incompatible_type() -> Error { } -#[derive(Clone)] +#[derive(Clone, Eq, PartialEq)] /// Additional information needed for packing or unpacking storage proof. /// These do not need to be part of the proof but are required /// when processing the proof. @@ -126,8 +126,8 @@ pub enum Input { /// Contains trie roots used during proof processing. ChildTrieRoots(ChildrenProofMap>), - /// Contains trie roots used during proof processing. - /// Contains key and values queried during the proof processing. + /// For each children, contains encoded trie roots used during proof processing. + /// Also contains key and values queried during the proof processing. QueryPlanWithValues(ChildrenProofMap<(Vec, Vec<(Vec, Option>)>)>), /// Contains trie roots used during proof processing. From e087c58db3838c4d7ede73735cb7b40b90a1b420 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 10 Jun 2020 09:37:08 +0200 Subject: [PATCH 151/185] Fix tests remove warnings. --- primitives/state-machine/src/backend.rs | 5 +---- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 15 ++++++++------- .../state-machine/src/proving_backend.rs | 19 +++++++++---------- primitives/state-machine/src/trie_backend.rs | 1 - primitives/trie/src/storage_proof/mod.rs | 1 + 6 files changed, 20 insertions(+), 23 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 6022c43daf69e..5c11141f6c2d9 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,10 +20,7 @@ use hash_db::Hasher; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; -use crate::{ - trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, -}; +use crate::{UsageInfo, StorageKey, StorageValue, StorageCollection}; use sp_trie::{ProofInput, BackendStorageProof, RecordBackendFor}; /// Access the state of the proof backend of a backend. diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ab734c4269be3..ff31eae9822fd 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -228,7 +228,7 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let mut storage = storage.update( + let storage = storage.update( vec![( Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 08bce544c2dac..41cb84d3cb03b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -464,7 +464,7 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where /// Prove execution using the given state backend, overlayed changes, and call executor. pub fn prove_execution( - mut backend: B, + backend: B, overlay: &mut OverlayedChanges, exec: &Exec, spawn_handle: Box, @@ -1022,7 +1022,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from(initial); let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1066,7 +1066,7 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut offchain_overlay = OffchainOverlayedChanges::default(); @@ -1113,7 +1113,7 @@ mod tests { b"d4".to_vec(), ]; let key = b"key".to_vec(); - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut offchain_overlay = OffchainOverlayedChanges::default(); @@ -1178,7 +1178,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let mut state = new_in_mem::(); + let state = new_in_mem::(); let backend = state.as_proof_backend().unwrap(); let mut offchain_overlay = OffchainOverlayedChanges::default(); let mut overlay = OverlayedChanges::default(); @@ -1295,7 +1295,7 @@ mod tests { ); let input = ProofInput::QueryPlan(query_plan); let remote_proof = >::extract_proof(&recorder, input).unwrap(); - remote_proof.verify(&input_check); + assert!(remote_proof.verify(&input_check).unwrap()); /* // on child trie let remote_backend = trie_backend::tests::test_trie_proof::(); @@ -1396,7 +1396,8 @@ mod tests { ); } - fn prove_read_and_proof_on_fullbackend_works

() { + #[test] + fn prove_read_and_proof_on_fullbackend_works() { // more proof could be tested, but at this point the full backend // is just here to assert that we are able to test child trie content // and are able to switch backend for checking proof. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 2d85421aea553..72d629a4a22fe 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -24,16 +24,15 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, - record_all_keys, StorageProofKind, TrieNodesStorageProof as StorageProof, ProofInputKind, - ProofInput, RecordMapTrieNodes, RecordBackend, RegStorageProof, ProofFlatDefault, BackendStorageProof, - FullBackendStorageProof, + ProofInput, RecordBackend, RegStorageProof, BackendStorageProof, + record_all_keys, ProofInputKind, FullBackendStorageProof, }; pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, DBValue}; use crate::backend::{Backend, ProofRegStateFor, ProofRegBackend}; -use sp_core::storage::{ChildInfo, ChildInfoProof, ChildrenMap}; +use sp_core::storage::{ChildInfo, ChildInfoProof}; use std::marker::PhantomData; /// Clonable recorder backend with inner mutability. @@ -464,7 +463,7 @@ mod tests { } fn proof_is_non_empty_after_value_is_read_inner>() { let trie_backend = test_trie_proof::

(); - let mut backend = test_proving(&trie_backend); + let backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().unwrap().is_empty()); } @@ -509,7 +508,7 @@ mod tests { fn proof_recorded_and_checked_inner>() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackendWithProof::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); + let in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -518,7 +517,7 @@ mod tests { assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - let mut proving = in_memory.as_proof_backend().unwrap(); + let proving = in_memory.as_proof_backend().unwrap(); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof().unwrap(); @@ -545,7 +544,7 @@ mod tests { (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackendWithProof::::default(); - let mut in_memory = in_memory.update(contents); + let in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( std::iter::empty(), @@ -572,7 +571,7 @@ mod tests { vec![i] )); - let mut proving = in_memory.clone().as_proof_backend().unwrap(); + let proving = in_memory.clone().as_proof_backend().unwrap(); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof().unwrap(); @@ -587,7 +586,7 @@ mod tests { assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); assert_eq!(proof_check.storage(&[64]).unwrap(), None); - let mut proving = in_memory.as_proof_backend().unwrap(); + let proving = in_memory.as_proof_backend().unwrap(); assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof().unwrap(); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2184ece57b4af..2ad5c40b34b79 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -22,7 +22,6 @@ use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, ChildrenProofMap, ProofInput, BackendStorageProof, FullBackendStorageProof}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_trie::RegStorageProof; use crate::backend::{ProofRegStateFor}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index ac65f31a6efe0..5e74179dd8cc7 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -430,6 +430,7 @@ impl IntoIterator for ChildrenProofMap { } /// Container recording trie nodes. +/// TODO EMCH looks unused pub struct RecordMapTrieNodes(HashMap>); impl sp_std::default::Default for RecordMapTrieNodes { From 69ebd3958b8f15965499e01b07703f6de00bb54a Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 10 Jun 2020 11:04:58 +0200 Subject: [PATCH 152/185] sp_api using associated rec backend --- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 15 +++++----- .../proc-macro/src/mock_impl_runtime_apis.rs | 4 +-- primitives/api/src/lib.rs | 30 +++++++------------ primitives/state-machine/src/lib.rs | 4 +-- 5 files changed, 24 insertions(+), 31 deletions(-) diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 6ceed91cacab8..7375cb609c7a2 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -415,7 +415,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { initialized_block: &std::cell::RefCell>>, native_call: Option, context: #crate_::ExecutionContext, - recorder: Option<&std::cell::RefCell<#crate_::ProofRecorder>>, + recorder: Option<&std::cell::RefCell<#crate_::ProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { let version = call_runtime_at.runtime_version_at(at)?; use #crate_::InitializeBlock; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 8ecc8d6ce4e33..d4e19d98183cf 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -212,7 +212,7 @@ fn generate_runtime_api_base_structures() -> Result { storage_transaction_cache: std::cell::RefCell< #crate_::StorageTransactionCache >, - recorder: Option>>, + recorder: Option>>, } // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a @@ -281,17 +281,18 @@ fn generate_runtime_api_base_structures() -> Result { self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) } - fn record_proof(&mut self, kind: #crate_::StorageProofKind) { - self.recorder = Some(std::cell::RefCell::new(kind.into())); + fn record_proof(&mut self) { + self.recorder = Some(std::cell::RefCell::new(Default::default())); } - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + fn extract_proof(&mut self) -> Option<#crate_::ProofRegFor>> { + use #crate_::RegStorageProof; self.recorder .take() .and_then(|recorder| { - let #crate_::ProofRecorder{ recorder, kind, input } = &mut *recorder.borrow_mut(); + let #crate_::ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); let input = std::mem::replace(input, #crate_::ProofInput::None); - recorder.extract_proof(*kind, input).ok() + <#crate_::ProofRegFor>>::extract_proof(recorder, input).ok() }) } @@ -357,7 +358,7 @@ fn generate_runtime_api_base_structures() -> Result { &std::cell::RefCell<#crate_::OffchainOverlayedChanges>, &std::cell::RefCell<#crate_::StorageTransactionCache>, &std::cell::RefCell>>, - Option<&std::cell::RefCell<#crate_::ProofRecorder>>, + Option<&std::cell::RefCell<#crate_::ProofRecorder>>, ) -> std::result::Result<#crate_::NativeOrEncoded, E>, E, >( diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 4664a295483bc..727c33931c40f 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -95,11 +95,11 @@ fn implement_common_api_traits( Ok(pred(A::VERSION)) } - fn record_proof(&mut self, _kind: #crate_::StorageProofKind) { + fn record_proof(&mut self) { unimplemented!("`record_proof` not implemented for runtime api mocks") } - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + fn extract_proof(&mut self) -> Option<#crate_::ProofRegFor>> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 1c9c9d0c8ebf6..cfddbe94176c9 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -39,8 +39,8 @@ extern crate self as sp_api; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - OverlayedChanges, StorageProof, StorageProofKind, Backend as StateBackend, ChangesTrieState, InMemoryBackend, - ProofInput, + OverlayedChanges, StorageProof, StorageProofKind, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, + ProofInput, backend::ProofRegFor, RegStorageProof, }; #[doc(hidden)] #[cfg(feature = "std")] @@ -374,14 +374,14 @@ pub trait ApiExt: ApiErrorExt { ) -> Result where Self: Sized; /// Start recording all accessed trie nodes for generating proofs. - fn record_proof(&mut self, kind: StorageProofKind); + fn record_proof(&mut self); /// Extract the recorded proof. /// /// This stops the proof recording. /// /// If `record_proof` was not called before, this will return `None`. - fn extract_proof(&mut self) -> Option; + fn extract_proof(&mut self) -> Option>>; /// Convert the api object into the storage changes that were done while executing runtime /// api functions. @@ -443,7 +443,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>>, + pub recorder: Option<&'a RefCell>>, } /// Something that can call into the an api at a given block. @@ -523,27 +523,19 @@ pub trait RuntimeApiInfo { /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub struct ProofRecorder { +pub struct ProofRecorder>, Block: BlockT> { /// The recorder to use over the db use by trie db. - pub recorder: sp_state_machine::ProofRecorder>, - /// The kind of proof to produce. - pub kind: StorageProofKind, + /// TODO EMCH is this the sync recorder, should not (there is something fishy with this sync rec) + pub recorder: sp_state_machine::RecordBackendFor>, /// The additional input needed for the proof. pub input: ProofInput, } #[cfg(feature = "std")] -impl From for ProofRecorder { - fn from(kind: StorageProofKind) -> Self { - let recorder = if kind.is_full_proof_recorder_needed() { - sp_state_machine::ProofRecorder::>::Full(Default::default()) - } else { - sp_state_machine::ProofRecorder::>::Flat(Default::default()) - }; - +impl>, Block: BlockT> Default for ProofRecorder { + fn default() -> Self { ProofRecorder { - recorder, - kind, + recorder: Default::default(), input: ProofInput::None, } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 41cb84d3cb03b..f44ee23e38435 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -45,8 +45,8 @@ mod stats; mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - TrieNodesStorageProof as StorageProof, StorageProof as StorageProofT, StorageProofKind, ChildrenProofMap, - ProofInput, ProofInputKind, ProofNodes}; + TrieNodesStorageProof, StorageProof, StorageProofKind, ChildrenProofMap, + ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; From f4e6cb1a25869c09d68e2eb5f561b3a7fe900935 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 10 Jun 2020 14:34:40 +0200 Subject: [PATCH 153/185] a few more, did not abstract state at client level (direct simpleproof usage) --- client/api/src/backend.rs | 4 +- client/api/src/call_executor.rs | 25 ++++---- client/api/src/cht.rs | 26 ++++---- client/api/src/in_mem.rs | 2 +- client/api/src/lib.rs | 3 +- client/api/src/light.rs | 14 ++--- client/api/src/proof_provider.rs | 18 +++--- client/block-builder/src/lib.rs | 10 +-- client/db/src/lib.rs | 67 ++++++++++++--------- client/db/src/storage_cache.rs | 36 +++++++---- client/network/src/chain.rs | 6 +- client/network/src/light_client_handler.rs | 39 ++++++------ client/network/src/on_demand_layer.rs | 11 ++-- client/network/src/protocol.rs | 17 +++--- client/rpc/src/state/mod.rs | 4 +- client/rpc/src/state/state_full.rs | 10 ++- client/service/src/client/call_executor.rs | 13 ++-- primitives/consensus/common/src/lib.rs | 34 ++--------- primitives/state-machine/src/backend.rs | 4 ++ primitives/state-machine/src/lib.rs | 5 +- primitives/trie/src/storage_proof/simple.rs | 9 +++ 21 files changed, 177 insertions(+), 180 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 6a7114fcc825c..bdc79f764803f 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -40,7 +40,9 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use parking_lot::RwLock; -pub use sp_state_machine::Backend as StateBackend; +pub use sp_state_machine::backend::Backend as StateBackend; +pub use sp_state_machine::backend::ProofRegFor; +pub use sp_state_machine::backend::ProofFor; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 0e9957018c6ee..677719a6c7f94 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -24,8 +24,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashFor}, }; use sp_state_machine::{ - OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, - StorageProofKind, + OverlayedChanges, ExecutionManager, ExecutionStrategy, }; use sc_executor::{RuntimeVersion, NativeVersion}; use sp_externalities::Extensions; @@ -94,7 +93,7 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: Option<&RefCell>>, + proof_recorder: Option<&RefCell>::State, B>>>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; @@ -106,33 +105,31 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given state, gathering execution proof. /// /// No changes are made. - fn prove_at_state>>( + fn prove_at_state>>( &self, - mut state: S, + state: S, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8], - kind: StorageProofKind, - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - let trie_state = state.as_trie_backend() + call_data: &[u8] + ) -> Result<(Vec, sp_state_machine::backend::ProofRegFor>), sp_blockchain::Error> { + let proof_state = state.as_proof_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box )?; - self.prove_at_trie_state(trie_state, overlay, method, call_data, kind) + self.prove_at_proof_backend_state(&proof_state, overlay, method, call_data) } /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - trie_state: &sp_state_machine::TrieBackend>, + proof_backend: &P, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8], - kind: StorageProofKind, - ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; + ) -> Result<(Vec, sp_state_machine::backend::ProofRegFor>), sp_blockchain::Error>; /// Get runtime version if supported. fn native_runtime_version(&self) -> Option<&NativeVersion>; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 33b59ab60c088..36d8e2c5584b5 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,13 +32,14 @@ use sp_trie; use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_flat_proving_backend, - StorageProofKind, + MemoryDB, backend::Backend as StateBackend, SimpleProof as StorageProof, + prove_read_on_proof_backend, read_proof_check, read_proof_check_on_proving_backend, + SimpleProof, InMemoryBackend, }; - use sp_blockchain::{Error as ClientError, Result as ClientResult}; +type ProofCheckBackend = sp_state_machine::TrieBackend, H, SimpleProof>; + /// The size of each CHT. This value is passed to every CHT-related function from /// production code. Other values are passed from tests. const SIZE: u32 = 2048; @@ -117,13 +118,12 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage.as_trie_backend() - .expect("InMemoryState::as_trie_backend always returns Some; qed"); - prove_read_on_trie_backend( - trie_storage, + let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let proof_backend = storage.as_proof_backend() + .expect("InMemoryState::as_proof_backend always returns Some; qed"); + prove_read_on_proof_backend( + &proof_backend, blocks.into_iter().map(|number| encode_cht_key(number)), - StorageProofKind::Flat, ).map_err(ClientError::Execution) } @@ -144,7 +144,7 @@ pub fn check_proof( local_number, remote_hash, move |local_root, local_cht_key| - read_proof_check::( + read_proof_check::, Hasher, _>( local_root, remote_proof, ::std::iter::once(local_cht_key), @@ -161,7 +161,7 @@ pub fn check_proof_on_proving_backend( local_root: Header::Hash, local_number: Header::Number, remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, + proving_backend: &ProofCheckBackend, ) -> ClientResult<()> where Header: HeaderT, @@ -173,7 +173,7 @@ pub fn check_proof_on_proving_backend( local_number, remote_hash, |_, local_cht_key| - read_proof_check_on_flat_proving_backend::( + read_proof_check_on_proving_backend::, Hasher>( proving_backend, local_cht_key, ).map_err(|e| ClientError::from(e)), diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 45c41fbcb7b20..5bb1cb5e4882a 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -28,7 +28,7 @@ use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; use sp_runtime::{Justification, Storage}; use sp_state_machine::{ - ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, + ChangesTrieTransaction, InMemoryBackend, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection, }; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index e9c93b17eea02..e9a036a6b3f63 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -37,8 +37,7 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{StorageProof, LegacyDecodeAdapter, LegacyEncodeAdapter, - FlatEncodeAdapter, StorageProofKind, ExecutionStrategy, CloneableSpawn, ProofNodes}; +pub use sp_state_machine::{SimpleProof, StorageProof, ExecutionStrategy, CloneableSpawn, ProofNodes}; /// Usage Information Provider interface /// diff --git a/client/api/src/light.rs b/client/api/src/light.rs index b359c1149eea6..79f020c78e333 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -27,7 +27,7 @@ use sp_runtime::{ generic::BlockId }; use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; -use sp_state_machine::StorageProof; +use sp_state_machine::{SimpleProof, StorageProof}; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, Error as ClientError, Result as ClientResult, @@ -124,7 +124,7 @@ pub struct ChangesProof { pub roots: BTreeMap, /// The proofs for all changes tries roots that have been touched AND are /// missing from the requester' node. It is a map of CHT number => proof. - pub roots_proof: StorageProof, + pub roots_proof: SimpleProof, } /// Remote block body request @@ -190,31 +190,31 @@ pub trait Fetcher: Send + Sync { /// /// Implementations of this trait should not use any prunable blockchain data /// except that is passed to its methods. -pub trait FetchChecker: Send + Sync { +pub trait FetchChecker: Send + Sync { /// Check remote header proof. fn check_header_proof( &self, request: &RemoteHeaderRequest, header: Option, - remote_proof: StorageProof, + remote_proof: SimpleProof, ) -> ClientResult; /// Check remote storage read proof. fn check_read_proof( &self, request: &RemoteReadRequest, - remote_proof: StorageProof, + remote_proof: P, ) -> ClientResult, Option>>>; /// Check remote storage read proof. fn check_read_child_proof( &self, request: &RemoteReadChildRequest, - remote_proof: StorageProof, + remote_proof: P, ) -> ClientResult, Option>>>; /// Check remote method execution proof. fn check_execution_proof( &self, request: &RemoteCallRequest, - remote_proof: StorageProof, + remote_proof: P, ) -> ClientResult>; /// Check remote changes query proof. fn check_changes_proof( diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index f86120385df9b..bb537883ffec5 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -19,20 +19,20 @@ //! Proof utilities use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT}, + traits::Block as BlockT, }; -use crate::{StorageProof, ChangesProof, StorageProofKind}; +use crate::{SimpleProof, ChangesProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; +use sp_trie::StorageProof; /// Interface for providing block proving utilities. -pub trait ProofProvider { +pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, id: &BlockId, keys: &mut dyn Iterator, - kind: StorageProofKind, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning /// read proof. @@ -41,8 +41,7 @@ pub trait ProofProvider { id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - kind: StorageProofKind, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash /// AND returning execution proof. @@ -53,11 +52,10 @@ pub trait ProofProvider { id: &BlockId, method: &str, call_data: &[u8], - kind: StorageProofKind, - ) -> sp_blockchain::Result<(Vec, StorageProof)>; + ) -> sp_blockchain::Result<(Vec, Proof)>; /// Reads given header and generates CHT-based header proof. - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; + fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, SimpleProof)>; /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index bd96275f5c877..1f052dd79fbb7 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -34,7 +34,7 @@ use sp_runtime::{ }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; -use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof}; +use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges}; use sp_consensus::RecordProof; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -53,12 +53,12 @@ pub struct BuiltBlock, /// An optional proof that was recorded while building the block. - pub proof: Option, + pub proof: Option>>, } impl>> BuiltBlock { /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option) { + pub fn into_inner(self) -> (Block, StorageChanges, Option>>) { (self.block, self.storage_changes, self.proof) } } @@ -130,8 +130,8 @@ where let mut api = api.runtime_api(); - if let Some(kind) = record_proof.kind() { - api.record_proof(kind); + if record_proof.yes() { + api.record_proof(); } let block_id = BlockId::Hash(parent_hash); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9fb8f3c8c0454..787e96d97750d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -76,8 +76,8 @@ use sp_runtime::traits::{ }; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, - StorageCollection, ChildStorageCollection, - backend::Backend as StateBackend, StateMachineStats, + StorageCollection, ChildStorageCollection, SimpleProof, + backend::{Backend as StateBackend, ProofRegStateFor}, StateMachineStats, }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; @@ -101,7 +101,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor + Arc>>, HashFor, SimpleProof, >; const DB_HASH_LEN: usize = 32; @@ -113,7 +113,7 @@ pub type DbHash = [u8; DB_HASH_LEN]; /// It makes sure that the hash we are using stays pinned in storage /// until this structure is dropped. pub struct RefTrackingState { - state: DbState, + state: Option>, storage: Arc>, parent_hash: Option, } @@ -121,7 +121,7 @@ pub struct RefTrackingState { impl RefTrackingState { fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { RefTrackingState { - state, + state: Some(state), parent_hash, storage, } @@ -142,17 +142,27 @@ impl std::fmt::Debug for RefTrackingState { } } +impl RefTrackingState { + #[inline] + fn state(&self) -> &DbState { + self.state.as_ref().expect("Non dropped state") + } +} + + impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + type StorageProof = as StateBackend>>::StorageProof; + type ProofRegBackend = as StateBackend>>::ProofRegBackend; + type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.storage(key) + self.state().storage(key) } fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.storage_hash(key) + self.state().storage_hash(key) } fn child_storage( @@ -160,11 +170,11 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.child_storage(child_info, key) + self.state().child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { - self.state.exists_storage(key) + self.state().exists_storage(key) } fn exists_child_storage( @@ -172,11 +182,11 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(child_info, key) + self.state().exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) + self.state().next_storage_key(key) } fn next_child_storage_key( @@ -184,15 +194,15 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(child_info, key) + self.state().next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) + self.state().for_keys_with_prefix(prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) + self.state().for_key_values_with_prefix(prefix, f) } fn for_keys_in_child_storage( @@ -200,7 +210,7 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(child_info, f) + self.state().for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( @@ -209,14 +219,14 @@ impl StateBackend> for RefTrackingState { prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(child_info, prefix, f) + self.state().for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root<'a>( &self, delta: impl Iterator)>, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta) + self.state().storage_root(delta) } fn child_storage_root<'a>( @@ -224,15 +234,15 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, delta: impl Iterator)>, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta) + self.state().child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() + self.state().pairs() } fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) + self.state().keys(prefix) } fn child_keys( @@ -240,21 +250,20 @@ impl StateBackend> for RefTrackingState { child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(child_info, prefix) + self.state().child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - self.state.as_trie_backend() + fn from_reg_state(mut self, previous: ProofRegStateFor>) -> Option { + let state = std::mem::replace(&mut self.state, Default::default()).expect("Non dropped state"); + state.from_reg_state(previous) } fn register_overlay_stats(&mut self, stats: &StateMachineStats) { - self.state.register_overlay_stats(stats); + self.state().register_overlay_stats(stats); } fn usage_info(&self) -> StateUsageInfo { - self.state.usage_info() + self.state().usage_info() } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 434b301ed6240..70de8f0a79dd6 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; use sp_state_machine::{ - backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, + backend::{Backend as StateBackend, ProofRegStateFor}, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; use log::trace; @@ -495,7 +495,9 @@ impl>, B: BlockT> CachingState { impl>, B: BlockT> StateBackend> for CachingState { type Error = S::Error; type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; + type StorageProof = S::StorageProof; + type ProofRegBackend = S::ProofRegBackend; + type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { let local_cache = self.cache.local_cache.upgradable_read(); @@ -652,8 +654,8 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { - self.state.as_trie_backend() + fn from_reg_state(self, previous: ProofRegStateFor>) -> Option { + self.state.from_reg_state(previous) } fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { @@ -736,7 +738,9 @@ impl std::fmt::Debug for SyncingCachingState { impl>, B: BlockT> StateBackend> for SyncingCachingState { type Error = S::Error; type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; + type StorageProof = S::StorageProof; + type ProofRegBackend = S::ProofRegBackend; + type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.caching_state().storage(key) @@ -834,13 +838,6 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { - self.caching_state - .as_mut() - .expect("`caching_state` is valid for the lifetime of the object; qed") - .as_trie_backend() - } - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { self.caching_state().register_overlay_stats(stats); } @@ -848,12 +845,22 @@ impl>, B: BlockT> StateBackend> for Syncin fn usage_info(&self) -> sp_state_machine::UsageInfo { self.caching_state().usage_info() } + + fn from_reg_state(mut self, previous: ProofRegStateFor>) -> Option { + self.sync().and_then(|s| s.from_reg_state(previous)) + } } impl Drop for SyncingCachingState { fn drop(&mut self) { + let _ = self.sync(); + } +} + +impl SyncingCachingState { + fn sync(&mut self) -> Option> { if self.disable_syncing { - return; + return None; } if let Some(mut caching_state) = self.caching_state.take() { @@ -864,6 +871,9 @@ impl Drop for SyncingCachingState { let is_best = self.meta.read().best_hash == hash; caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best); } + Some(caching_state) + } else { + None } } } diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 20fbe0284397d..eb605affd3611 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -19,17 +19,17 @@ //! Blockchain access trait use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{BlockBackend, ProofProvider, SimpleProof as StorageProof}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; /// Local client abstraction for the network. -pub trait Client: HeaderBackend + ProofProvider + BlockIdTo +pub trait Client: HeaderBackend + ProofProvider + BlockIdTo + BlockBackend + HeaderMetadata + Send + Sync {} impl Client for T where - T: HeaderBackend + ProofProvider + BlockIdTo + T: HeaderBackend + ProofProvider + BlockIdTo + BlockBackend + HeaderMetadata + Send + Sync {} diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index e37b9c2e63beb..a6d9a06468fd7 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -56,8 +56,8 @@ use libp2p::{ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ - StorageProof, StorageProofKind, LegacyDecodeAdapter, - FlatEncodeAdapter as LegacyEncodeAdapter, + SimpleProof as StorageProof, + StorageProof as StorageProofT, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -290,7 +290,7 @@ pub struct LightClientHandler { /// Blockchain client. chain: Arc>, /// Verifies that received responses are correct. - checker: Arc>, + checker: Arc>, /// Peer information (addresses, their best block, etc.) peers: HashMap>, /// Futures sending back response to remote clients. @@ -313,7 +313,7 @@ where pub fn new( cfg: Config, chain: Arc>, - checker: Arc>, + checker: Arc>, peerset: sc_peerset::PeersetHandle, ) -> Self { LightClientHandler { @@ -445,7 +445,7 @@ where match response.response { Some(Response::RemoteCallResponse(response)) => if let Request::Call { request , .. } = request { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { @@ -454,12 +454,12 @@ where Some(Response::RemoteReadResponse(response)) => match request { Request::Read { request, .. } => { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } Request::ReadChild { request, .. } => { - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } @@ -468,7 +468,7 @@ where Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = LegacyDecodeAdapter::decode(&mut response.roots_proof.as_ref())?.0; + let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; let roots = { let mut r = BTreeMap::new(); for pair in response.roots { @@ -496,7 +496,7 @@ where } else { Some(Decode::decode(&mut response.header.as_ref())?) }; - let proof = LegacyDecodeAdapter::decode(&mut response.proof.as_ref())?.0; + let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { @@ -550,7 +550,6 @@ where &BlockId::Hash(block), &request.method, &request.data, - StorageProofKind::Flat, ) { Ok((_, proof)) => proof, Err(e) => { @@ -565,7 +564,7 @@ where }; let response = { - let r = schema::v1::light::RemoteCallResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteCallResponse(r) }; @@ -593,7 +592,6 @@ where let proof = match self.chain.read_proof( &BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flat, ) { Ok(proof) => proof, Err(error) => { @@ -607,7 +605,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -641,8 +639,7 @@ where let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), &child_info, - &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flat, + &mut request.keys.iter().map(AsRef::as_ref) )) { Ok(proof) => proof, Err(error) => { @@ -657,7 +654,7 @@ where }; let response = { - let r = schema::v1::light::RemoteReadResponse { proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; schema::v1::light::response::Response::RemoteReadResponse(r) }; @@ -685,7 +682,7 @@ where }; let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: LegacyEncodeAdapter(&proof).encode() }; + let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; schema::v1::light::response::Response::RemoteHeaderResponse(r) }; @@ -745,7 +742,7 @@ where roots: proof.roots.into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), - roots_proof: LegacyEncodeAdapter(&proof.roots_proof).encode(), + roots_proof: proof.roots_proof.encode(), }; schema::v1::light::response::Response::RemoteChangesResponse(r) }; @@ -1333,7 +1330,7 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::{StorageProof, RemoteReadChildRequest, FetchChecker}; + use sc_client_api::{StorageProof as StorageProofT, RemoteReadChildRequest, FetchChecker, SimpleProof as StorageProof}; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; use std::{ @@ -1353,7 +1350,7 @@ mod tests { type Swarm = libp2p::swarm::Swarm; fn empty_proof() -> Vec { - LegacyEncodeAdapter(&StorageProof::empty()).encode() + StorageProof::empty().encode() } fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { @@ -1377,7 +1374,7 @@ mod tests { _mark: std::marker::PhantomData } - impl light::FetchChecker for DummyFetchChecker { + impl light::FetchChecker for DummyFetchChecker { fn check_header_proof( &self, _request: &RemoteHeaderRequest, diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 084172ee57c4f..baa555f38d9ac 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -24,7 +24,8 @@ use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; use sc_client_api::{ FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, + RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, SimpleProof as StorageProof, + ChangesProof, }; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::Error as ClientError; @@ -38,7 +39,7 @@ use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; /// responsible for pulling elements out of that queue and fulfilling them. pub struct OnDemand { /// Objects that checks whether what has been retrieved is correct. - checker: Arc>, + checker: Arc>, /// Queue of requests. Set to `Some` at initialization, then extracted by the network. /// @@ -58,7 +59,7 @@ pub struct OnDemand { #[derive(Default, Clone)] pub struct AlwaysBadChecker; -impl FetchChecker for AlwaysBadChecker { +impl FetchChecker for AlwaysBadChecker { fn check_header_proof( &self, _request: &RemoteHeaderRequest, @@ -114,7 +115,7 @@ where B::Header: HeaderT, { /// Creates new on-demand service. - pub fn new(checker: Arc>) -> Self { + pub fn new(checker: Arc>) -> Self { let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); let requests_queue = Mutex::new(Some(requests_queue)); @@ -126,7 +127,7 @@ where } /// Get checker reference. - pub fn checker(&self) -> &Arc> { + pub fn checker(&self) -> &Arc> { &self.checker } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index f5923a1edd49e..e08c0b440eaaa 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -56,7 +56,7 @@ use std::sync::Arc; use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; -use sc_client_api::{ChangesProof, StorageProof, StorageProofKind}; +use sc_client_api::{ChangesProof, StorageProof}; use util::LruHashSet; use wasm_timer::Instant; @@ -1460,7 +1460,6 @@ impl Protocol { &BlockId::Hash(request.block), &request.method, &request.data, - StorageProofKind::Flat, ) { Ok((_, proof)) => proof, Err(error) => { @@ -1481,7 +1480,7 @@ impl Protocol { None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof: proof.into_nodes(), }), ); } @@ -1607,8 +1606,7 @@ impl Protocol { request.id, who, keys_str(), request.block); let proof = match self.context_data.chain.read_proof( &BlockId::Hash(request.block), - &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flat, + &mut request.keys.iter().map(AsRef::as_ref) ) { Ok(proof) => proof, Err(error) => { @@ -1627,7 +1625,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof: proof.into_nodes(), }), ); } @@ -1664,7 +1662,6 @@ impl Protocol { &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), - StorageProofKind::Flat, )) { Ok(proof) => proof, Err(error) => { @@ -1684,7 +1681,7 @@ impl Protocol { None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, - proof: proof.expect_flatten_content(), + proof: proof.into_nodes(), }), ); } @@ -1714,7 +1711,7 @@ impl Protocol { GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, - proof: proof.expect_flatten_content(), + proof: proof.into_nodes(), }), ); } @@ -1777,7 +1774,7 @@ impl Protocol { max: proof.max_block, proof: proof.proof, roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof.expect_flatten_content(), + roots_proof: proof.roots_proof.into_nodes(), }), ); } diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 168dc3e0105a4..ec1fde75ad1e7 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -40,7 +40,7 @@ use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; pub use sc_rpc_api::child_state::*; -use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, ProofProvider}; +use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, ProofProvider, SimpleProof}; use sp_blockchain::{HeaderMetadata, HeaderBackend}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; @@ -175,7 +175,7 @@ pub fn new_full( where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index caf44bfb9d590..4627a9cfae712 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -36,12 +36,11 @@ use sp_version::RuntimeVersion; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion, CheckedSub}, }; - use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err, SimpleProof}; use std::marker::PhantomData; -use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider, ProofProvider, StorageProofKind}; +use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider, ProofProvider}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -219,7 +218,7 @@ impl FullState impl StateBackend for FullState where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, @@ -363,10 +362,9 @@ impl StateBackend for FullState>>( + fn prove_at_proof_backend_state>>( &self, - trie_state: &sp_state_machine::TrieBackend>, + proof_backend: &P, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8], - kind: StorageProofKind, - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( - trie_state, + call_data: &[u8] + ) -> Result<(Vec, ProofRegFor>), sp_blockchain::Error> { + sp_state_machine::prove_execution_on_proof_backend::<_, _, NumberFor, _>( + backend, overlay, &self.executor, self.spawn_handle.clone(), diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 41e3e8b8a6bfd..4d3415f91d13c 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -36,7 +36,7 @@ use sp_runtime::{ }; use futures::prelude::*; pub use sp_inherents::InherentData; -use sp_state_machine::StorageProof; +use sp_state_machine::SimpleProof as StorageProof; pub use sp_state_machine::StorageProofKind; pub mod block_validation; pub mod offline_tracker; @@ -56,7 +56,7 @@ pub use block_import::{ ImportResult, JustificationImport, FinalityProofImport, }; pub use select_chain::SelectChain; -pub use sp_state_machine::Backend as StateBackend; +pub use sp_state_machine::backend::Backend as StateBackend; /// Block status. #[derive(Debug, PartialEq, Eq)] @@ -108,7 +108,7 @@ pub struct Proposal { #[derive(Copy, Clone, PartialEq)] pub enum RecordProof { /// `Yes`, record a proof. - Yes(StorageProofKind), + Yes, /// `No`, don't record any proof. No, } @@ -117,40 +117,16 @@ impl RecordProof { /// Returns if `Self` == `Yes`. pub fn yes(&self) -> bool { match self { - Self::Yes(_) => true, + Self::Yes => true, Self::No => false, } } - - /// Returns storage proof kind. - pub fn kind(self) -> Option { - match self { - Self::Yes(kind) => Some(kind), - Self::No => None, - } - } -} - -impl From for RecordProof { - fn from(val: StorageProofKind) -> Self { - Self::Yes(val) - } -} - -impl From> for RecordProof { - fn from(val: Option) -> Self { - match val { - Some(kind) => Self::Yes(kind), - None => Self::No, - } - } } impl From for RecordProof { fn from(val: bool) -> Self { if val { - // default to a flatten proof. - Self::Yes(StorageProofKind::Flat) + Self::Yes } else { Self::No } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 5c11141f6c2d9..806576538b290 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -29,6 +29,10 @@ pub type ProofRegStateFor = <>::ProofRegBackend as ProofRe /// Access the state of the proof backend of a backend. pub type ProofRegFor = <>::StorageProof as BackendStorageProof>::StorageProofReg; +/// Access the state of the proof backend of a backend. +/// TODO should not be an alias +pub type ProofFor = >::StorageProof; + /// A state backend is used to read state data and can have changes committed /// to it. /// diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index f44ee23e38435..4442eeefff1fd 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -46,7 +46,8 @@ mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, TrieNodesStorageProof, StorageProof, StorageProofKind, ChildrenProofMap, - ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof}; + ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof, + SimpleProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; @@ -93,7 +94,7 @@ pub type ChangesTrieTransaction = ( ); /// Trie backend with in-memory storage. -pub type InMemoryBackend = TrieBackend, H, sp_trie::SimpleProof>; +pub type InMemoryBackend = TrieBackend, H, SimpleProof>; /// Trie backend with in-memory storage and choice of proof. /// TODO EMCH consider renaming to ProofCheckBackend diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index ccc77c134df0d..1943569e0e637 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -26,6 +26,15 @@ pub struct Flat(pub(crate) ProofNodes); #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct Full(pub(crate) ChildrenProofMap); +impl Flat { + /// Access to inner proof node, + /// mainly needed for part of the + /// code that is not generic. + pub fn into_nodes(self) -> ProofNodes { + self.0 + } +} + impl StorageProof for Flat { fn empty() -> Self { Flat(Default::default()) From 4711052606e89ab4d2dd4ffb561f49c7c8cee3d7 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 10 Jun 2020 17:40:20 +0200 Subject: [PATCH 154/185] the proof recorde struct is useless when we extract mergeable proof or input should move when fetching the recorder as associated content of proofRecorder directly (with access from ProofRecorder trait). --- client/api/src/call_executor.rs | 2 +- client/api/src/proof_provider.rs | 1 + client/db/src/bench.rs | 21 +++--- client/service/src/builder.rs | 6 +- client/service/src/client/call_executor.rs | 70 ++++++++----------- client/service/src/client/client.rs | 31 ++++---- client/service/src/client/light/backend.rs | 10 +-- .../service/src/client/light/call_executor.rs | 57 +++++++-------- client/service/src/client/light/fetcher.rs | 12 ++-- primitives/api/src/lib.rs | 1 + primitives/state-machine/src/lib.rs | 2 +- 11 files changed, 99 insertions(+), 114 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 677719a6c7f94..c5e1145668a71 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -93,7 +93,7 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: Option<&RefCell>::State, B>>>, + recorder: Option>::State, HashFor>>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index bb537883ffec5..75fc1cfa1c82e 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -28,6 +28,7 @@ use sp_trie::StorageProof; /// Interface for providing block proving utilities. pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. + /// TODO EMCH consider returning Proof::ProofReg instead!!! : more flexible fn read_proof( &self, id: &BlockId, diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 99ce1edae00c5..521640776fdd6 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -27,12 +27,12 @@ use sp_trie::{MemoryDB, prefixed_key}; use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::Backend as StateBackend}; +use sp_state_machine::{DBValue, backend::{Backend as StateBackend, ProofRegStateFor}, SimpleProof}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor + Arc>>, HashFor, SimpleProof, >; type State = CachingState, B>; @@ -118,7 +118,9 @@ fn state_err() -> String { impl StateBackend> for BenchmarkingState { type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + type StorageProof = as StateBackend>>::StorageProof; + type ProofRegBackend = as StateBackend>>::ProofRegBackend; + type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) @@ -205,7 +207,8 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, delta: impl Iterator)>, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + self.state.borrow().as_ref() + .map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -224,12 +227,6 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - None - } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) -> Result<(), Self::Error> { @@ -282,6 +279,10 @@ impl StateBackend> for BenchmarkingState { fn usage_info(&self) -> sp_state_machine::UsageInfo { self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } + + fn from_reg_state(self, previous: ProofRegStateFor>) -> Option { + self.state.borrow_mut().take().and_then(|s| s.from_reg_state(previous)) + } } impl std::fmt::Debug for BenchmarkingState { diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index d921606ea6b16..c8cc5fae22021 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -23,7 +23,8 @@ use crate::config::{Configuration, KeystoreConfig, PrometheusConfig, OffchainWor use crate::metrics::MetricsService; use sc_client_api::{ self, BlockchainEvents, backend::RemoteBackend, light::RemoteBlockchain, execution_extensions::ExtensionsFactory, - ExecutorProvider, CallExecutor, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, + ExecutorProvider, CallExecutor, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, ProofFor, SimpleProof, + StateBackend, }; use crate::client::{Client, ClientConfig}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; @@ -893,6 +894,8 @@ ServiceBuilder< TImpQu: 'static + ImportQueue, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, TRpc: sc_rpc::RpcExtension, + // TODO EMCH this constraint should be lifted when client get generic over StateBackend and Proof + TBackend::State: StateBackend, StorageProof = SimpleProof>, { /// Set an ExecutionExtensionsFactory @@ -916,6 +919,7 @@ ServiceBuilder< >, >, Error> where TExec: CallExecutor, + TExec: CallExecutor, { let ServiceBuilder { marker: _, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index dd22dd64dcb31..be4fce8cfe22f 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -23,7 +23,7 @@ use sp_runtime::{ }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, StorageProofKind, ProofInput, + backend::{Backend as _, ProofRegFor}, StorageProof, StorageProofKind, ProofInput, }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; @@ -140,7 +140,7 @@ where initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, - recorder: Option<&RefCell>>, + recorder: Option<&RefCell>::State, Block>>>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { match initialize_block { @@ -162,47 +162,36 @@ where match recorder { Some(recorder) => { - let ProofRecorder{ recorder, kind, input} = &mut *recorder.borrow_mut(); - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; - - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); // It is important to extract the runtime code here before we create the proof // recorder. let runtime_code = state_runtime_code.runtime_code()?; - let input_backend = std::mem::replace(input, ProofInput::None); - let backend = sp_state_machine::ProvingBackend::new_with_recorder( - trie_state, - recorder.clone(), - *kind, - input_backend, + let state = self.backend.state_at(*at)?; + + // TODO EMCH we need to check if previously recording root are still in recorder, + // previous code did some input merging (if it is not we can remove the ProofRecorder + // struct). + let backend = state.from_reg_state(recorder) + .ok_or_else(|| + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box + )?; + + let mut state_machine = StateMachine::new( + &backend, + changes_trie_state, + changes, + offchain_changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), ); - let result = { - use std::borrow::BorrowMut; - let changes = &mut *changes.borrow_mut(); - let mut state_machine = StateMachine::new( - &backend, - changes_trie_state, - changes, - offchain_changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ); - // TODO: https://github.com/paritytech/substrate/issues/4455 - // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) - }; - let (recorder_state, input_state) = backend.recording_state()?; - *recorder = recorder_state; - *input = input_state; - result + // TODO: https://github.com/paritytech/substrate/issues/4455 + // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -254,14 +243,13 @@ where call_data: &[u8] ) -> Result<(Vec, ProofRegFor>), sp_blockchain::Error> { sp_state_machine::prove_execution_on_proof_backend::<_, _, NumberFor, _>( - backend, + proof_backend, overlay, &self.executor, self.spawn_handle.clone(), method, call_data, - kind, - &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + &sp_state_machine::backend::BackendRuntimeCode::new(proof_backend).runtime_code()?, ) .map_err(Into::into) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 68bb1fd4cfa20..63e575ce731ed 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -18,6 +18,7 @@ //! Substrate Client +use sc_client_api::backend::{ProofFor, ProofRegFor}; use std::{ marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, @@ -25,7 +26,7 @@ use std::{ }; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; -use codec::{Encode, Decode}; +use codec::{Codec, Encode, Decode}; use hash_db::Prefix; use sp_core::{ ChangesTrieConfiguration, convert_hash, NativeOrEncoded, @@ -41,10 +42,10 @@ use sp_runtime::{ }, }; use sp_state_machine::{ - DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, + DBValue, backend::Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, - ChangesTrieConfigurationRange, key_changes, key_changes_proof, StorageProof, - StorageProofKind, + ChangesTrieConfigurationRange, key_changes, key_changes_proof, SimpleProof as StorageProof, + StorageProofKind, StorageProof as StorageProofT, }; use sc_executor::RuntimeVersion; use sp_consensus::{ @@ -1175,8 +1176,9 @@ impl UsageProvider for Client where } } -impl ProofProvider for Client where +impl ProofProvider>> for Client where B: backend::Backend, +// HashFor: Ord + Codec, E: CallExecutor, Block: BlockT, { @@ -1184,10 +1186,9 @@ impl ProofProvider for Client where &self, id: &BlockId, keys: &mut dyn Iterator, - kind: StorageProofKind, - ) -> sp_blockchain::Result { + ) -> sp_blockchain::Result>> { self.state_at(id) - .and_then(|state| prove_read(state, keys, kind) + .and_then(|state| prove_read(state, keys) .map_err(Into::into)) } @@ -1196,10 +1197,9 @@ impl ProofProvider for Client where id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - kind: StorageProofKind, - ) -> sp_blockchain::Result { + ) -> sp_blockchain::Result>> { self.state_at(id) - .and_then(|state| prove_child_read(state, child_info, keys, kind) + .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) } @@ -1208,9 +1208,7 @@ impl ProofProvider for Client where id: &BlockId, method: &str, call_data: &[u8], - kind: StorageProofKind, - ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let (merge_kind, prefer_full) = kind.mergeable_kind(); + ) -> sp_blockchain::Result<(Vec, ProofFor>)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1218,7 +1216,6 @@ impl ProofProvider for Client where let code_proof = self.read_proof( id, &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES].iter().map(|v| *v), - merge_kind, )?; let state = self.state_at(id)?; @@ -1229,13 +1226,9 @@ impl ProofProvider for Client where &self.executor, method, call_data, - merge_kind, - true, ).and_then(|(r, p)| { Ok((r, StorageProof::merge::, _>( vec![p, code_proof], - prefer_full, - false, ).map_err(|e| format!("{}", e))?)) }) } diff --git a/client/service/src/client/light/backend.rs b/client/service/src/client/light/backend.rs index 2cf994d3f5993..ba393fa7708c9 100644 --- a/client/service/src/client/light/backend.rs +++ b/client/service/src/client/light/backend.rs @@ -29,7 +29,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, + backend::{Backend as StateBackend, ProofRegStateFor}, TrieBackend, InMemoryBackend, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; @@ -380,7 +380,9 @@ impl StateBackend for GenesisOrUnavailableState { type Error = ClientError; type Transaction = as StateBackend>::Transaction; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; + type StorageProof = as StateBackend>::StorageProof; + type ProofRegBackend = as StateBackend>::ProofRegBackend; + type ProofCheckBackend = as StateBackend>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> ClientResult>> { match *self { @@ -509,9 +511,9 @@ impl StateBackend for GenesisOrUnavailableState sp_state_machine::UsageInfo::empty() } - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + fn from_reg_state(self, previous: ProofRegStateFor) -> Option { match self { - GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), + GenesisOrUnavailableState::Genesis(state) => state.from_reg_state(previous), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/service/src/client/light/call_executor.rs b/client/service/src/client/light/call_executor.rs index 1f83f8c27f64b..3f73ace5878d1 100644 --- a/client/service/src/client/light/call_executor.rs +++ b/client/service/src/client/light/call_executor.rs @@ -29,11 +29,12 @@ use sp_runtime::{ }; use sp_externalities::Extensions; use sp_state_machine::{ - self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, - StorageProofKind, + self, OverlayedChanges, ExecutionStrategy, execution_proof_check_on_proof_backend, + ExecutionManager, CloneableSpawn, create_proof_check_backend, InMemoryBackend, }; +use sp_state_machine::backend::{Backend as StateBackend, ProofRegStateFor, ProofRegFor}; use hash_db::Hasher; +use sp_trie::{SimpleProof as StorageProof, StorageProof as StorageProofT, MergeableStorageProof}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; @@ -116,7 +117,7 @@ impl CallExecutor for initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, - recorder: Option<&RefCell>>, + _recorder: Option<&RefCell>::State, Block>>>, extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { // there's no actual way/need to specify native/wasm execution strategy on light node @@ -143,7 +144,7 @@ impl CallExecutor for initialize_block, ExecutionManager::NativeWhenPossible, native_call, - recorder, + None, extensions, ).map_err(|e| ClientError::Execution(Box::new(e.to_string()))), false => Err(ClientError::NotAvailableOnLightClient), @@ -157,14 +158,13 @@ impl CallExecutor for } } - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - _state: &sp_state_machine::TrieBackend>, - _changes: &mut OverlayedChanges, + _proof_backend: &P, + _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8], - _kind: StorageProofKind, - ) -> ClientResult<(Vec, StorageProof)> { + ) -> ClientResult<(Vec, ProofRegFor>)> { Err(ClientError::NotAvailableOnLightClient) } @@ -183,44 +183,37 @@ pub fn prove_execution( executor: &E, method: &str, call_data: &[u8], - kind: StorageProofKind, - proof_used_in_other: bool, -) -> ClientResult<(Vec, StorageProof)> +) -> ClientResult<(Vec, ProofRegFor>)> where Block: BlockT, S: StateBackend>, E: CallExecutor, { - let trie_state = state.as_trie_backend() + let proof_state = state.as_proof_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box )?; - let (merge_kind, prefer_full) = kind.mergeable_kind(); // prepare execution environment + record preparation proof let mut changes = Default::default(); - let (_, init_proof) = executor.prove_at_trie_state( - trie_state, + let (_, init_proof) = executor.prove_at_proof_backend_state( + &proof_state, &mut changes, "Core_initialize_block", &header.encode(), - merge_kind, )?; // execute method + record execution proof - let (result, exec_proof) = executor.prove_at_trie_state( - trie_state, + let (result, exec_proof) = executor.prove_at_proof_backend_state( + &proof_state, &mut changes, method, call_data, - merge_kind, )?; - let total_proof = StorageProof::merge::, _>( + let total_proof = >>::merge( vec![init_proof, exec_proof], - prefer_full, - proof_used_in_other, - ).map_err(|e| format!("{}", e))?; + ); Ok((result, total_proof)) } @@ -241,7 +234,8 @@ pub fn check_execution_proof( H: Hasher, H::Out: Ord + codec::Codec + 'static, { - check_execution_proof_with_make_header::( + + check_execution_proof_with_make_header::, Header, E, H, _>( executor, spawn_handle, request, @@ -260,14 +254,15 @@ pub fn check_execution_proof( /// /// Method is executed using passed header as environment' current block. /// Proof should include both environment preparation proof and method execution proof. -pub fn check_execution_proof_with_make_header( +pub fn check_execution_proof_with_make_header( executor: &E, spawn_handle: Box, request: &RemoteCallRequest

, - remote_proof: StorageProof, + remote_proof: P::StorageProof, make_next_header: MakeNextHeader, ) -> ClientResult> where + P: sp_state_machine::backend::ProofCheckBackend, E: CodeExecutor + Clone + 'static, H: Hasher, Header: HeaderT, @@ -279,14 +274,14 @@ pub fn check_execution_proof_with_make_header( // prepare execution environment + check preparation proof let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; + let trie_backend = P::create_proof_check_backend(root, remote_proof)?; let next_header = make_next_header(&request.header); // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); let runtime_code = backend_runtime_code.runtime_code()?; - execution_proof_check_on_trie_backend::( + execution_proof_check_on_proof_backend::( &trie_backend, &mut changes, executor, @@ -297,7 +292,7 @@ pub fn check_execution_proof_with_make_header( )?; // execute method - execution_proof_check_on_trie_backend::( + execution_proof_check_on_proof_backend::( &trie_backend, &mut changes, executor, diff --git a/client/service/src/client/light/fetcher.rs b/client/service/src/client/light/fetcher.rs index ac083b61bfcbb..c39e86a83a634 100644 --- a/client/service/src/client/light/fetcher.rs +++ b/client/service/src/client/light/fetcher.rs @@ -33,9 +33,9 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, CloneableSpawn, + read_child_proof_check, CloneableSpawn, BackendStorageProof, InMemoryBackend, }; -pub use sp_state_machine::StorageProof; +pub use sp_state_machine::{SimpleProof as StorageProof, StorageProof as StorageProofT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; pub use sc_client_api::{ @@ -159,7 +159,7 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = remote_roots_proof.into_partial_flat_db::() + let storage = remote_roots_proof.into_partial_db() .map_err(|e| format!("{}", e))?; // remote_roots.keys() are sorted => we can use this to group changes tries roots @@ -204,7 +204,7 @@ impl> LightDataChecker { } } -impl FetchChecker for LightDataChecker +impl FetchChecker for LightDataChecker where Block: BlockT, E: CodeExecutor + Clone + 'static, @@ -234,7 +234,7 @@ impl FetchChecker for LightDataChecker request: &RemoteReadRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - read_proof_check::( + read_proof_check::, H, _>( convert_hash(request.header.state_root()), remote_proof, request.keys.iter(), @@ -250,7 +250,7 @@ impl FetchChecker for LightDataChecker Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; - read_child_proof_check::( + read_child_proof_check::, H, _>( convert_hash(request.header.state_root()), remote_proof, &child_info, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index cfddbe94176c9..6835ffd1c3b31 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -526,6 +526,7 @@ pub trait RuntimeApiInfo { pub struct ProofRecorder>, Block: BlockT> { /// The recorder to use over the db use by trie db. /// TODO EMCH is this the sync recorder, should not (there is something fishy with this sync rec) + /// in master-state-trait we use proofregrecorder so the sync one and no refcell on call executor pub recorder: sp_state_machine::RecordBackendFor>, /// The additional input needed for the proof. pub input: ProofInput, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 4442eeefff1fd..ab6880762bb02 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -47,7 +47,7 @@ mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, TrieNodesStorageProof, StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof, - SimpleProof}; + SimpleProof, BackendStorageProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; From 3273887ea235b72e5b0e56d498dc02b7f38c095e Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 10 Jun 2020 18:45:49 +0200 Subject: [PATCH 155/185] Switch proof provider to return proofreg, we needed to merge --- client/api/src/call_executor.rs | 2 +- client/api/src/proof_provider.rs | 13 +++--- client/service/src/client/call_executor.rs | 44 +++++++++++-------- client/service/src/client/client.rs | 15 +++---- primitives/api/src/lib.rs | 6 +-- primitives/state-machine/src/backend.rs | 8 ++-- primitives/state-machine/src/lib.rs | 12 +++-- .../state-machine/src/proving_backend.rs | 7 +-- primitives/trie/src/lib.rs | 1 + 9 files changed, 58 insertions(+), 50 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index c5e1145668a71..677719a6c7f94 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -93,7 +93,7 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - recorder: Option>::State, HashFor>>, + proof_recorder: Option<&RefCell>::State, B>>>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 75fc1cfa1c82e..c8065e478b5e0 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -19,21 +19,20 @@ //! Proof utilities use sp_runtime::{ generic::BlockId, - traits::Block as BlockT, + traits::{Block as BlockT, HashFor}, }; use crate::{SimpleProof, ChangesProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; -use sp_trie::StorageProof; +use sp_trie::BackendStorageProof; /// Interface for providing block proving utilities. -pub trait ProofProvider { +pub trait ProofProvider>> { /// Reads storage value at a given block + key, returning read proof. - /// TODO EMCH consider returning Proof::ProofReg instead!!! : more flexible fn read_proof( &self, id: &BlockId, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning /// read proof. @@ -42,7 +41,7 @@ pub trait ProofProvider { id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash /// AND returning execution proof. @@ -53,7 +52,7 @@ pub trait ProofProvider { id: &BlockId, method: &str, call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, Proof)>; + ) -> sp_blockchain::Result<(Vec, Proof::StorageProofReg)>; /// Reads given header and generates CHT-based header proof. fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, SimpleProof)>; diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index be4fce8cfe22f..53967b632d495 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -163,35 +163,41 @@ where match recorder { Some(recorder) => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + + let ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); // It is important to extract the runtime code here before we create the proof // recorder. let runtime_code = state_runtime_code.runtime_code()?; let state = self.backend.state_at(*at)?; - // TODO EMCH we need to check if previously recording root are still in recorder, - // previous code did some input merging (if it is not we can remove the ProofRecorder - // struct). - let backend = state.from_reg_state(recorder) + let backend = state.from_reg_state(std::mem::replace(recorder, Default::default())) .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box )?; - let mut state_machine = StateMachine::new( - &backend, - changes_trie_state, - changes, - offchain_changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ); - // TODO: https://github.com/paritytech/substrate/issues/4455 - // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + let result = { + let mut state_machine = StateMachine::new( + &backend, + changes_trie_state, + changes, + offchain_changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ); + // TODO: https://github.com/paritytech/substrate/issues/4455 + // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + }; + use sp_state_machine::backend::ProofRegBackend; + let (recorder_state, input_state) = backend.extract_recorder(); + *recorder = recorder_state; + input.consolidate(input_state).map_err(|e| format!("{:?}", e))?; + result }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 63e575ce731ed..f3e4a0bd35bb1 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -45,7 +45,7 @@ use sp_state_machine::{ DBValue, backend::Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, SimpleProof as StorageProof, - StorageProofKind, StorageProof as StorageProofT, + StorageProofKind, StorageProof as StorageProofT, MergeableStorageProof, }; use sc_executor::RuntimeVersion; use sp_consensus::{ @@ -526,8 +526,7 @@ impl Client where Ok(()) }, ())?; - Ok(StorageProof::merge::, _>(proofs, false, false) - .map_err(|e| format!("{}", e))?) + Ok(StorageProof::merge(proofs).into()) } /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). @@ -1186,7 +1185,7 @@ impl ProofProvider>> f &self, id: &BlockId, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result>> { + ) -> sp_blockchain::Result>> { self.state_at(id) .and_then(|state| prove_read(state, keys) .map_err(Into::into)) @@ -1197,7 +1196,7 @@ impl ProofProvider>> f id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result>> { + ) -> sp_blockchain::Result>> { self.state_at(id) .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) @@ -1208,7 +1207,7 @@ impl ProofProvider>> f id: &BlockId, method: &str, call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, ProofFor>)> { + ) -> sp_blockchain::Result<(Vec, ProofRegFor>)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1227,9 +1226,9 @@ impl ProofProvider>> f method, call_data, ).and_then(|(r, p)| { - Ok((r, StorageProof::merge::, _>( + Ok((r, ProofRegFor::>::merge( vec![p, code_proof], - ).map_err(|e| format!("{}", e))?)) + ))) }) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 6835ffd1c3b31..d2989e759442d 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -525,9 +525,9 @@ pub trait RuntimeApiInfo { #[cfg(feature = "std")] pub struct ProofRecorder>, Block: BlockT> { /// The recorder to use over the db use by trie db. - /// TODO EMCH is this the sync recorder, should not (there is something fishy with this sync rec) - /// in master-state-trait we use proofregrecorder so the sync one and no refcell on call executor - pub recorder: sp_state_machine::RecordBackendFor>, + /// TODO EMCH this the sync recorder and we got a mechanism of extract / merge for it + /// when it should only be reusing it, but merge still needed for input. + pub recorder: sp_state_machine::backend::ProofRegStateFor>, /// The additional input needed for the proof. pub input: ProofInput, } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 806576538b290..1dc8b531cbdd1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,7 +21,7 @@ use hash_db::Hasher; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; use crate::{UsageInfo, StorageKey, StorageValue, StorageCollection}; -use sp_trie::{ProofInput, BackendStorageProof, RecordBackendFor}; +use sp_trie::{ProofInput, BackendStorageProof}; /// Access the state of the proof backend of a backend. pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; @@ -267,13 +267,15 @@ pub trait ProofRegBackend: crate::backend::Backend H: Hasher, { /// State of a backend. + /// TODO try to merge with RecordBackendFor (aka remove the arc rwlock in code) type State: Default + Send + Sync + Clone; - + // + Into> + // + From> /// Extract proof after running operation to prove. fn extract_proof(&self) -> Result<>::StorageProofReg, Box>; /// Get current recording state. - fn extract_recorder(self) -> (RecordBackendFor, ProofInput); + fn extract_recorder(self) -> (Self::State, ProofInput); } /// Backend used to produce proof. diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ab6880762bb02..1c12e2f306f0f 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -47,7 +47,7 @@ mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, TrieNodesStorageProof, StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof, - SimpleProof, BackendStorageProof}; + SimpleProof, BackendStorageProof, MergeableStorageProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; @@ -630,7 +630,7 @@ where pub fn prove_read_for_query_plan_check( backend: B, keys: I, -) -> Result<(sp_trie::RecordBackendFor, ProofInput), Box> +) -> Result<(crate::backend::ProofRegStateFor, ProofInput), Box> where B: Backend, H: Hasher, @@ -647,8 +647,7 @@ where .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - - Ok(proof_backend.extract_recorder()) + Ok(proof_backend.extract_recorder()) } @@ -1278,6 +1277,11 @@ mod tests { let remote_backend = trie_backend::tests::test_trie_proof::(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); + let recorder = match std::sync::Arc::try_unwrap(recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), + }; + let mut root_map = ChildrenProofMap::default(); root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); assert!(ProofInput::ChildTrieRoots(root_map) == root_input); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 72d629a4a22fe..58dd1bf462e77 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -257,12 +257,9 @@ impl ProofRegBackend for ProvingBackend ).map_err(|e| Box::new(e) as Box) } - fn extract_recorder(self) -> (RecordBackendFor, ProofInput) { + fn extract_recorder(self) -> (ProofRegStateFor, ProofInput) { let input = self.trie_backend.extract_registered_roots(); - let recorder = match Arc::try_unwrap(self.trie_backend.into_storage().proof_recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), - }; + let recorder = self.trie_backend.into_storage().proof_recorder; (recorder, input) } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 181fc04c23fd3..f262650418935 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -53,6 +53,7 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// Access record backend for a given backend storage proof. /// TODO EMCH check if can be use at other place (rg 'as BackendS') +/// TODO seems rather useless we use the reg one moste of the time, not exposing it ? pub type RecordBackendFor = <

>::StorageProofReg as RegStorageProof>::RecordBackend; #[derive(Default)] From d3907400d110300503544203146d44b90601777d Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 10 Jun 2020 21:15:59 +0200 Subject: [PATCH 156/185] test still needs a few updates, builds --- bin/node/bench/src/trie.rs | 5 +++-- client/api/src/backend.rs | 8 +++++++- .../basic-authorship/src/basic_authorship.rs | 5 +++-- client/block-builder/src/lib.rs | 7 ++++--- client/finality-grandpa/src/finality_proof.rs | 18 +++++++++--------- client/service/src/client/client.rs | 2 +- client/service/test/src/client/light.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 5 ++++- primitives/api/src/lib.rs | 2 +- primitives/consensus/common/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 5 +++++ .../state-machine/src/proving_backend.rs | 7 +++++++ primitives/trie/src/storage_proof/compact.rs | 2 +- primitives/trie/src/storage_proof/mod.rs | 10 ++++------ primitives/trie/src/storage_proof/simple.rs | 10 +++++++++- 15 files changed, 60 insertions(+), 30 deletions(-) diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 886dc6011492f..8eff2967a8b63 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -23,7 +23,8 @@ use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; use hash_db::Prefix; -use sp_state_machine::Backend as _; +use sp_state_machine::backend::Backend as _; +use sp_state_machine::SimpleProof; use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; use node_primitives::Hash; @@ -181,7 +182,7 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new( + let trie_backend = sp_state_machine::TrieBackend::<_, _, SimpleProof>::new( storage, self.root, ); diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index bdc79f764803f..94cc9bc74c33b 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -42,12 +42,18 @@ use parking_lot::RwLock; pub use sp_state_machine::backend::Backend as StateBackend; pub use sp_state_machine::backend::ProofRegFor; -pub use sp_state_machine::backend::ProofFor; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. pub type StateBackendFor = >::State; +/// Extracts the proof for the given backend. +pub type ProofFor = as StateBackend>>::StorageProof; + +type ProofRegForSB = >>::ProofRegBackend; + +type ProofRegForB = ProofRegForSB, Block>; + /// Extracts the transaction for the given state backend. pub type TransactionForSB = >>::Transaction; diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index cd241f38849a1..ad4c524046766 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -22,7 +22,7 @@ use std::{time, sync::Arc}; use sc_client_api::backend; -use codec::Decode; +use codec::{Encode, Decode}; use sp_consensus::{evaluation, Proposal, RecordProof}; use sp_inherents::InherentData; use log::{error, info, debug, trace, warn}; @@ -320,7 +320,8 @@ impl Proposer error!("Failed to evaluate authored block: {:?}", err); } - Ok(Proposal { block, proof, storage_changes }) + let proof: Option> = proof.map(Into::into); + Ok(Proposal { block, encoded_proof: proof.map(|p| p.encode()), storage_changes }) } } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 1f052dd79fbb7..bb7294f76ddb2 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -219,7 +219,8 @@ mod tests { use super::*; use sp_blockchain::HeaderBackend; use sp_core::Blake2Hasher; - use sp_state_machine::Backend; + use sp_state_machine::backend::Backend; + use sp_state_machine::SimpleProof; use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; #[test] @@ -232,14 +233,14 @@ mod tests { &client, client.info().best_hash, client.info().best_number, - RecordProof::Yes(sp_api::StorageProofKind::Flat), + RecordProof::Yes, Default::default(), &*backend, ).unwrap().build().unwrap(); let proof = block.proof.expect("Proof is build on request"); - let backend = sp_state_machine::create_proof_check_backend::( + let backend = sp_state_machine::create_proof_check_backend::( block.storage_changes.transaction_storage_root, proof, ).unwrap(); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index f12dfbb16f050..7a00165ec67e6 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -41,9 +41,9 @@ use log::{trace, warn}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::Backend, StorageProof, StorageProofKind, + backend::Backend, SimpleProof as StorageProof, light::{FetchChecker, RemoteReadRequest}, - StorageProvider, ProofProvider, ProofNodes, + StorageProvider, ProofProvider, }; use parity_scale_codec::{Encode, Decode}; use finality_grandpa::BlockNumberOps; @@ -70,7 +70,7 @@ pub trait AuthoritySetForFinalityProver: Send + Sync { } /// Trait that combines `StorageProvider` and `ProofProvider` -pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync +pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync where Block: BlockT, BE: Backend + Send + Sync, @@ -81,7 +81,7 @@ impl StorageAndProofProvider for P where Block: BlockT, BE: Backend + Send + Sync, - P: StorageProvider + ProofProvider + Send + Sync, + P: StorageProvider + ProofProvider + Send + Sync, {} /// Implementation of AuthoritySetForFinalityProver. @@ -98,7 +98,7 @@ impl AuthoritySetForFinalityProver for Arc) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY), StorageProofKind::Flat) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) } } @@ -114,7 +114,7 @@ pub trait AuthoritySetForFinalityChecker: Send + Sync { } /// FetchChecker-based implementation of AuthoritySetForFinalityChecker. -impl AuthoritySetForFinalityChecker for Arc> { +impl AuthoritySetForFinalityChecker for Arc> { fn check_authorities_proof( &self, hash: Block::Hash, @@ -229,7 +229,7 @@ pub(crate) struct FinalityProofFragment { /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. pub unknown_headers: Vec

, /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -345,7 +345,7 @@ pub(crate) fn prove_finality, J>( block: current, justification, unknown_headers: ::std::mem::take(&mut unknown_headers), - authorities_proof: new_authorities_proof.map(StorageProof::expect_flatten_content), + authorities_proof: new_authorities_proof, }; // append justification to finality proof if required @@ -512,7 +512,7 @@ fn check_finality_proof_fragment( current_authorities = authorities_provider.check_authorities_proof( proof_fragment.block, header, - StorageProof::Flat(new_authorities_proof), + new_authorities_proof, )?; current_set_id += 1; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index f3e4a0bd35bb1..8639df6c6c0f2 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1175,7 +1175,7 @@ impl UsageProvider for Client where } } -impl ProofProvider>> for Client where +impl ProofProvider> for Client where B: backend::Backend, // HashFor: Ord + Codec, E: CallExecutor, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 59fc233874447..e0087ee57b677 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -40,7 +40,7 @@ use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOv use sp_consensus::{BlockOrigin}; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; -use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, StorageProofKind}; +use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest}; use sp_externalities::Extensions; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::{ diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index d4e19d98183cf..88639d4e4ad2b 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -292,7 +292,10 @@ fn generate_runtime_api_base_structures() -> Result { .and_then(|recorder| { let #crate_::ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); let input = std::mem::replace(input, #crate_::ProofInput::None); - <#crate_::ProofRegFor>>::extract_proof(recorder, input).ok() + <>>::ProofRegBackend as #crate_::ProofRegBackend<#crate_::HashFor>>::extract_proof_reg( + &recorder, + input, + ).ok() }) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index d2989e759442d..0cfdb4daf6578 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -40,7 +40,7 @@ extern crate self as sp_api; #[cfg(feature = "std")] pub use sp_state_machine::{ OverlayedChanges, StorageProof, StorageProofKind, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, - ProofInput, backend::ProofRegFor, RegStorageProof, + ProofInput, backend::{ProofRegFor, ProofRegBackend}, RegStorageProof, }; #[doc(hidden)] #[cfg(feature = "std")] diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 4d3415f91d13c..b75671876cf11 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -95,7 +95,7 @@ pub struct Proposal { /// The block that was build. pub block: Block, /// Optional proof that was recorded while building the block. - pub proof: Option, + pub encoded_proof: Option>, /// The storage changes while building this block. pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 1dc8b531cbdd1..bdc0b67f70387 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -46,6 +46,7 @@ pub trait Backend: Sized + std::fmt::Debug { /// The actual proof produced. type StorageProof: BackendStorageProof; + // + sp_trie::WithRegStorageProof; /// Type of proof backend. @@ -276,6 +277,10 @@ pub trait ProofRegBackend: crate::backend::Backend /// Get current recording state. fn extract_recorder(self) -> (Self::State, ProofInput); + + /// Extract from the state and input. + /// TODO EMCH fusing state and record could avoid this + fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::StorageProofReg, Box>; } /// Backend used to produce proof. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 58dd1bf462e77..1415c7c7c3cf5 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -262,6 +262,13 @@ impl ProofRegBackend for ProvingBackend let recorder = self.trie_backend.into_storage().proof_recorder; (recorder, input) } + + fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::StorageProofReg, Box> { + <>::StorageProofReg>::extract_proof( + & recorder_state.read(), + input, + ).map_err(|e| Box::new(e) as Box) + } } impl Backend for ProvingBackend diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 561ea13d69877..83d1f2d93f809 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -12,11 +12,11 @@ use super::*; use super::simple::ProofNodes; use codec::{Codec, Encode, Decode}; use crate::TrieLayout; -#[cfg(feature = "std")] use crate::TrieHash; use sp_storage::ChildType; use sp_std::marker::PhantomData; use sp_std::convert::TryInto; +use sp_std::{vec, vec::Vec}; /// A collection on encoded and compacted trie nodes. /// Nodes are sorted by trie node iteration order, and some hash diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 5e74179dd8cc7..5f1adbb64d6da 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -8,8 +8,6 @@ use sp_std::collections::{btree_map::BTreeMap, btree_map, btree_map::Entry}; use sp_std::collections::btree_set::BTreeSet; -#[cfg(feature = "std")] -use std::collections::hash_map::Entry as HEntry; use sp_std::vec::Vec; use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; use hash_db::{Hasher, HashDBRef}; @@ -27,10 +25,10 @@ pub mod multiple; // usage is restricted here to proof. // In practice it is already use internally by no_std trie_db. #[cfg(not(feature = "std"))] -use hashbrown::HashMap; +use hashbrown::{hash_map::Entry as HEntry, HashMap}; #[cfg(feature = "std")] -use std::collections::HashMap; +use std::collections::{hash_map::Entry as HEntry, HashMap}; type Result = sp_std::result::Result; type CodecResult = sp_std::result::Result; @@ -330,7 +328,7 @@ impl RecordBackend for FullRecorder { } fn merge(&mut self, mut other: Self) -> bool { - for (child_info, other) in std::mem::replace(&mut other.0, Default::default()) { + for (child_info, other) in sp_std::mem::replace(&mut other.0, Default::default()) { match self.0.entry(child_info) { Entry::Occupied(mut entry) => { for (key, value) in other.0 { @@ -365,7 +363,7 @@ impl RecordBackend for FlatRecorder { } fn merge(&mut self, mut other: Self) -> bool { - for (key, value) in std::mem::replace(&mut other.0, Default::default()).0 { + for (key, value) in sp_std::mem::replace(&mut other.0, Default::default()).0 { match self.0.entry(key) { HEntry::Occupied(entry) => { if entry.get() != &value { diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index 1943569e0e637..7200a5a1fe47f 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -33,8 +33,16 @@ impl Flat { pub fn into_nodes(self) -> ProofNodes { self.0 } + /// Instantiate from inner proof node, + /// mainly needed for part of the + /// code that is not generic. + pub fn from_nodes(nodes: ProofNodes) -> Self { + Flat(nodes) + } } - + +// TODO EMCH tets that proof nodes encode to the same as flat (to validate change in grandpa) + impl StorageProof for Flat { fn empty() -> Self { Flat(Default::default()) From a8017b824120d10380bc3a7738719023ba29875b Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Jun 2020 15:19:13 +0200 Subject: [PATCH 157/185] test fixed --- bin/node/cli/src/service.rs | 4 +- client/api/src/call_executor.rs | 1 + client/consensus/aura/src/lib.rs | 2 +- client/consensus/babe/src/tests.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 14 +- client/finality-grandpa/src/light_import.rs | 4 +- client/finality-grandpa/src/tests.rs | 11 +- client/service/src/builder.rs | 2 +- client/service/src/client/call_executor.rs | 4 +- client/service/src/client/client.rs | 4 +- client/service/src/client/light/backend.rs | 2 +- .../service/src/client/light/call_executor.rs | 8 +- client/service/test/src/client/light.rs | 163 +++++++----------- primitives/api/test/tests/runtime_calls.rs | 6 +- primitives/consensus/common/src/lib.rs | 2 - primitives/state-machine/src/lib.rs | 2 +- primitives/trie/src/storage_proof/simple.rs | 4 +- 17 files changed, 104 insertions(+), 131 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 5a531a1073eec..05f168bd8bad9 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -409,7 +409,7 @@ mod tests { use sc_consensus_epochs::descendent_query; use sp_consensus::{ Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - RecordProof, StorageProofKind, + RecordProof, }; use node_primitives::{Block, DigestItem, Signature}; use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; @@ -527,7 +527,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes(StorageProofKind::Flat), + RecordProof::Yes, ).await }).expect("Error making test block").block; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 677719a6c7f94..bbf174f9acf13 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -123,6 +123,7 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. + /// TODO EMCH try to remove P param and use the associated backend type? fn prove_at_proof_backend_state>>( &self, proof_backend: &P, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 818bb563484be..23e5f29bd002b 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -914,7 +914,7 @@ mod tests { future::ready(r.map(|b| Proposal { block: b.block, - proof: b.proof, + encoded_proof: b.proof.as_ref().map(Encode::encode), storage_changes: b.storage_changes, })) } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index ada1332295d46..bd64060b7228e 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -149,7 +149,7 @@ impl DummyProposer { // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, encoded_proof: None, storage_changes: Default::default() })) } } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 7a00165ec67e6..5603826ca9f25 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -837,8 +837,8 @@ pub(crate) mod tests { _ => unreachable!("no other authorities should be fetched: {:?}", block_id), }, |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::Flat(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::Flat(vec![vec![70]])), + BlockId::Number(5) => Ok(StorageProof::from_nodes(vec![vec![50]])), + BlockId::Number(7) => Ok(StorageProof::from_nodes(vec![vec![70]])), _ => unreachable!("no other authorities should be proved: {:?}", block_id), }, ), @@ -854,14 +854,14 @@ pub(crate) mod tests { block: header(5).hash(), justification: just5, unknown_headers: Vec::new(), - authorities_proof: Some(vec![vec![50]]), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![50]])), }, // last fragment provides justification for #7 && unknown#7 FinalityProofFragment { block: header(7).hash(), justification: just7.clone(), unknown_headers: vec![header(7)], - authorities_proof: Some(vec![vec![70]]), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![70]])), }, ]); @@ -875,7 +875,7 @@ pub(crate) mod tests { 0, auth3, &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().iter_nodes_flatten().next().map(|x| x[0]) { + |hash, _header, proof: StorageProof| match proof.clone().into_nodes().into_iter().next().map(|x| x[0]) { Some(50) => Ok(auth5.clone()), Some(70) => Ok(auth7.clone()), _ => unreachable!("no other proofs should be checked: {}", hash), @@ -936,7 +936,7 @@ pub(crate) mod tests { block: header(4).hash(), justification: TestJustification((0, authorities.clone()), vec![7]).encode(), unknown_headers: vec![header(4)], - authorities_proof: Some(vec![vec![42]]), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![42]])), }, FinalityProofFragment { block: header(5).hash(), justification: TestJustification((0, authorities), vec![8]).encode(), @@ -986,7 +986,7 @@ pub(crate) mod tests { block: header(2).hash(), justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(vec![vec![42]]), + authorities_proof: Some(StorageProof::from_nodes(vec![vec![42]])), }, FinalityProofFragment { block: header(4).hash(), justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 25389f8e2dae3..dfc4913a21b7b 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -573,7 +573,7 @@ pub mod tests { use sp_consensus::{import_queue::CacheKeyId, ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; - use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore}; + use sc_client_api::{in_mem::Blockchain as InMemoryAuxStore, SimpleProof}; use substrate_test_runtime_client::runtime::{Block, Header}; use crate::tests::TestApi; use crate::finality_proof::{ @@ -867,7 +867,7 @@ pub mod tests { Vec::new(), ).encode(), unknown_headers: Vec::new(), - authorities_proof: Some(vec![]), + authorities_proof: Some(SimpleProof::from_nodes(vec![])), }, ].encode(), &mut verifier, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index dede7837425f4..37870bdf7656f 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -31,7 +31,7 @@ use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; -use sp_api::{ApiRef, StorageProof, StorageProofKind, ProvideRuntimeApi}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, @@ -43,7 +43,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; use sp_runtime::generic::{BlockId, DigestItem}; use sp_core::{H256, crypto::Public}; use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; -use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; +use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check, MemoryDB}; use authorities::AuthoritySet; use finality_proof::{ @@ -52,6 +52,9 @@ use finality_proof::{ use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; +use sp_state_machine::SimpleProof as StorageProof; + +type ProvingBackend = sp_state_machine::TrieBackend, H, StorageProof>; type PeerData = Mutex< @@ -249,7 +252,7 @@ impl AuthoritySetForFinalityProver for TestApi { let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); - let proof = prove_read(backend, vec![b"authorities"], StorageProofKind::Flat) + let proof = prove_read(backend, vec![b"authorities"]) .expect("failure proving read from in-memory storage backend"); Ok(proof) } @@ -262,7 +265,7 @@ impl AuthoritySetForFinalityChecker for TestApi { header: ::Header, proof: StorageProof, ) -> Result { - let results = read_proof_check::, _>( + let results = read_proof_check::>, HashFor, _>( *header.state_root(), proof, vec![b"authorities"] ) .expect("failure checking read proof for authorities"); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index c8cc5fae22021..5739e65f9f502 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -23,7 +23,7 @@ use crate::config::{Configuration, KeystoreConfig, PrometheusConfig, OffchainWor use crate::metrics::MetricsService; use sc_client_api::{ self, BlockchainEvents, backend::RemoteBackend, light::RemoteBlockchain, execution_extensions::ExtensionsFactory, - ExecutorProvider, CallExecutor, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, ProofFor, SimpleProof, + ExecutorProvider, CallExecutor, ForkBlocks, BadBlocks, CloneableSpawn, UsageProvider, SimpleProof, StateBackend, }; use crate::client::{Client, ClientConfig}; diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 53967b632d495..ebdf0b18813b4 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -23,7 +23,7 @@ use sp_runtime::{ }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::{Backend as _, ProofRegFor}, StorageProof, StorageProofKind, ProofInput, + backend::{Backend as _, ProofRegFor}, }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; @@ -155,7 +155,7 @@ where let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let mut state = self.backend.state_at(*at)?; + let state = self.backend.state_at(*at)?; let changes = &mut *changes.borrow_mut(); let offchain_changes = &mut *offchain_changes.borrow_mut(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 8639df6c6c0f2..d478c84ae8417 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -26,7 +26,7 @@ use std::{ }; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; -use codec::{Codec, Encode, Decode}; +use codec::{Encode, Decode}; use hash_db::Prefix; use sp_core::{ ChangesTrieConfiguration, convert_hash, NativeOrEncoded, @@ -45,7 +45,7 @@ use sp_state_machine::{ DBValue, backend::Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, SimpleProof as StorageProof, - StorageProofKind, StorageProof as StorageProofT, MergeableStorageProof, + MergeableStorageProof, }; use sc_executor::RuntimeVersion; use sp_consensus::{ diff --git a/client/service/src/client/light/backend.rs b/client/service/src/client/light/backend.rs index ba393fa7708c9..165fa3711abb8 100644 --- a/client/service/src/client/light/backend.rs +++ b/client/service/src/client/light/backend.rs @@ -29,7 +29,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - backend::{Backend as StateBackend, ProofRegStateFor}, TrieBackend, InMemoryBackend, ChangesTrieTransaction, + backend::{Backend as StateBackend, ProofRegStateFor}, InMemoryBackend, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; diff --git a/client/service/src/client/light/call_executor.rs b/client/service/src/client/light/call_executor.rs index 3f73ace5878d1..fc9dd6bfcc308 100644 --- a/client/service/src/client/light/call_executor.rs +++ b/client/service/src/client/light/call_executor.rs @@ -30,11 +30,11 @@ use sp_runtime::{ use sp_externalities::Extensions; use sp_state_machine::{ self, OverlayedChanges, ExecutionStrategy, execution_proof_check_on_proof_backend, - ExecutionManager, CloneableSpawn, create_proof_check_backend, InMemoryBackend, + ExecutionManager, CloneableSpawn, InMemoryBackend, }; -use sp_state_machine::backend::{Backend as StateBackend, ProofRegStateFor, ProofRegFor}; +use sp_state_machine::backend::{Backend as StateBackend, ProofRegFor}; use hash_db::Hasher; -use sp_trie::{SimpleProof as StorageProof, StorageProof as StorageProofT, MergeableStorageProof}; +use sp_trie::{SimpleProof as StorageProof, MergeableStorageProof}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; @@ -178,7 +178,7 @@ impl CallExecutor for /// Method is executed using passed header as environment' current block. /// Proof includes both environment preparation proof and method execution proof. pub fn prove_execution( - mut state: S, + state: S, header: Block::Header, executor: &E, method: &str, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index e0087ee57b677..463fd7910255c 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -40,7 +40,12 @@ use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOv use sp_consensus::{BlockOrigin}; use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; use sp_core::{H256, tasks::executor as tasks_executor, NativeOrEncoded}; -use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest}; +use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientBackend, + ProofProvider, in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, + AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, BlockImportOperation, + RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, + RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, + SimpleProof as StorageProof, StorageProof as _}; use sp_externalities::Extensions; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::{ @@ -58,7 +63,13 @@ use substrate_test_runtime_client::{ use sp_core::{blake2_256, ChangesTrieConfiguration}; use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; -use sp_state_machine::Backend as _; +use sp_state_machine::backend::{ProofRegFor, Backend as _}; + +type ProvingBackend = sp_state_machine::TrieBackend< + sp_trie::MemoryDB, + BlakeTwo256, + StorageProof, +>; pub type DummyBlockchain = Blockchain; @@ -226,7 +237,7 @@ impl CallExecutor for DummyCallExecutor { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: Option<&RefCell>>, + _proof_recorder: Option<&RefCell>::State, Block>>>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() @@ -236,14 +247,13 @@ impl CallExecutor for DummyCallExecutor { unreachable!() } - fn prove_at_trie_state>>( + fn prove_at_proof_backend_state>>( &self, - _trie_state: &sp_state_machine::TrieBackend>, + _proof_backend: &P, _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8], - _kind: StorageProofKind, - ) -> Result<(Vec, StorageProof), ClientError> { + ) -> Result<(Vec, ProofRegFor>), ClientError> { unreachable!() } @@ -299,11 +309,7 @@ fn light_aux_store_is_updated_via_non_importing_op() { #[test] fn execution_proof_is_generated_and_checked() { - fn execute( - remote_client: &TestClient, - at: u64, method: &'static str, - kind: StorageProofKind, - ) -> (Vec, Vec) { + fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -312,7 +318,6 @@ fn execution_proof_is_generated_and_checked() { &remote_block_id, method, &[], - kind, ).unwrap(); // check remote execution proof locally @@ -332,12 +337,7 @@ fn execution_proof_is_generated_and_checked() { (remote_result, local_result) } - fn execute_with_proof_failure( - remote_client: &TestClient, - at: u64, - method: &'static str, - kind: StorageProofKind, - ) { + fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -346,11 +346,10 @@ fn execution_proof_is_generated_and_checked() { &remote_block_id, method, &[], - kind, ).unwrap(); // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + let execution_result = check_execution_proof_with_make_header::( &local_executor(), tasks_executor(), &RemoteCallRequest { @@ -387,35 +386,28 @@ fn execution_proof_is_generated_and_checked() { ).unwrap(); } - let kinds = [ - StorageProofKind::Flat, - StorageProofKind::TrieSkipHashes, - ]; - - for kind in &kinds { - // check method that doesn't requires environment - let (remote, local) = execute(&remote_client, 0, "Core_version", *kind); - assert_eq!(remote, local); + // check method that doesn't requires environment + let (remote, local) = execute(&remote_client, 0, "Core_version"); + assert_eq!(remote, local); - let (remote, local) = execute(&remote_client, 2, "Core_version", *kind); - assert_eq!(remote, local); + let (remote, local) = execute(&remote_client, 2, "Core_version"); + assert_eq!(remote, local); - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block", *kind); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); + // check method that requires environment + let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 1); - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block", *kind); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); + let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 3); - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2, "Core_version", *kind); + // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set + execute_with_proof_failure(&remote_client, 2, "Core_version"); - // check that proof check doesn't panic even if proof is incorrect AND panic handler is set - sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2, "Core_version", *kind); - } + // check that proof check doesn't panic even if proof is incorrect AND panic handler is set + sp_panic_handler::set("TEST", "1.2.3"); + execute_with_proof_failure(&remote_client, 2, "Core_version"); } #[test] @@ -455,12 +447,6 @@ fn code_is_executed_at_genesis_only() { } } -const KINDS: [StorageProofKind; 4] = [ - StorageProofKind::Flat, - StorageProofKind::Full, - StorageProofKind::TrieSkipHashes, - StorageProofKind::TrieSkipHashesFull, -]; type TestChecker = LightDataChecker< NativeExecutor, @@ -469,8 +455,7 @@ type TestChecker = LightDataChecker< DummyStorage, >; -fn prepare_for_read_proof_check(kind: StorageProofKind) - -> (TestChecker, Header, StorageProof, u32) { +fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { // prepare remote client let remote_client = substrate_test_runtime_client::new(); let remote_block_id = BlockId::Number(0); @@ -486,7 +471,6 @@ fn prepare_for_read_proof_check(kind: StorageProofKind) let remote_read_proof = remote_client.read_proof( &remote_block_id, &mut std::iter::once(well_known_keys::HEAP_PAGES), - kind, ).unwrap(); // check remote read proof locally @@ -506,8 +490,7 @@ fn prepare_for_read_proof_check(kind: StorageProofKind) (local_checker, remote_block_header, remote_read_proof, heap_pages) } -fn prepare_for_read_child_proof_check(kind: StorageProofKind) - -> (TestChecker, Header, StorageProof, Vec) { +fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; let child_info = ChildInfo::new_default(b"child1"); @@ -536,7 +519,6 @@ fn prepare_for_read_child_proof_check(kind: StorageProofKind) &remote_block_id, child_info, &mut std::iter::once("key1".as_bytes()), - kind, ).unwrap(); // check locally @@ -598,51 +580,40 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { - for kind in &KINDS { - let ( - local_checker, - remote_block_header, - remote_read_proof, - heap_pages, - ) = prepare_for_read_proof_check(*kind); - assert_eq!((&local_checker as &dyn FetchChecker) - .check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap() - .remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); - } + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); } #[test] fn storage_child_read_proof_is_generated_and_checked() { let child_info = ChildInfo::new_default(&b"child1"[..]); - for kind in &KINDS { - let ( - local_checker, - remote_block_header, - remote_read_proof, - result, - ) = prepare_for_read_child_proof_check(*kind); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: child_info.prefixed_storage_key(), - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); - } + let ( + local_checker, + remote_block_header, + remote_read_proof, + result, + ) = prepare_for_read_child_proof_check(); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: child_info.prefixed_storage_key(), + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); } #[test] fn header_proof_is_generated_and_checked() { let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: local_cht_root, block: 1, retry_count: None, @@ -653,7 +624,7 @@ fn header_proof_is_generated_and_checked() { fn check_header_proof_fails_if_cht_root_is_invalid() { let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: Default::default(), block: 1, retry_count: None, @@ -664,7 +635,7 @@ fn check_header_proof_fails_if_cht_root_is_invalid() { fn check_header_proof_fails_if_invalid_header_provided() { let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: local_cht_root, block: 1, retry_count: None, @@ -679,7 +650,7 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { local_executor(), tasks_executor(), ); - let local_checker = &local_checker as &dyn FetchChecker; + let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; let max_hash = remote_client.chain_info().best_hash; @@ -789,7 +760,7 @@ fn check_changes_proof_fails_if_proof_is_wrong() { local_executor(), tasks_executor(), ); - let local_checker = &local_checker as &dyn FetchChecker; + let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; let max_hash = remote_client.chain_info().best_hash; diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 555104446ae2e..476563b3271bd 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -24,7 +24,7 @@ use substrate_test_runtime_client::{ use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; use sp_state_machine::{ ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, + execution_proof_check_on_proof_backend, }; use sp_consensus::SelectChain; @@ -185,7 +185,7 @@ fn record_proof_works() { builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - let backend = create_proof_check_backend::>( + let backend = create_proof_check_backend::, sp_state_machine::SimpleProof>( storage_root, proof.expect("Proof was generated"), ).expect("Creates proof backend."); @@ -197,7 +197,7 @@ fn record_proof_works() { None, 8, ); - execution_proof_check_on_trie_backend::<_, u64, _>( + execution_proof_check_on_proof_backend::<_, _, u64, _>( &backend, &mut overlay, &executor, diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index b75671876cf11..17d3737e02346 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -36,8 +36,6 @@ use sp_runtime::{ }; use futures::prelude::*; pub use sp_inherents::InherentData; -use sp_state_machine::SimpleProof as StorageProof; -pub use sp_state_machine::StorageProofKind; pub mod block_validation; pub mod offline_tracker; pub mod error; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1c12e2f306f0f..da0a7ffeb5c78 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -47,7 +47,7 @@ mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, TrieNodesStorageProof, StorageProof, StorageProofKind, ChildrenProofMap, ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof, - SimpleProof, BackendStorageProof, MergeableStorageProof}; + SimpleProof, CompactProof, BackendStorageProof, MergeableStorageProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index 7200a5a1fe47f..09091ffed0b97 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -29,13 +29,13 @@ pub struct Full(pub(crate) ChildrenProofMap); impl Flat { /// Access to inner proof node, /// mainly needed for part of the - /// code that is not generic. + /// code that is not generic or test. pub fn into_nodes(self) -> ProofNodes { self.0 } /// Instantiate from inner proof node, /// mainly needed for part of the - /// code that is not generic. + /// code that is not generic or test. pub fn from_nodes(nodes: ProofNodes) -> Self { Flat(nodes) } From 9a56fd67a14e1f82a319e84dab8c68450966dc5c Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Jun 2020 17:39:47 +0200 Subject: [PATCH 158/185] completed query plan test. --- primitives/state-machine/src/lib.rs | 143 ++++++++++++++++------- primitives/trie/src/storage_proof/mod.rs | 56 +++++++++ 2 files changed, 154 insertions(+), 45 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index da0a7ffeb5c78..78f75e41f5554 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -710,6 +710,44 @@ where proving_backend.extract_proof() } +/// Generate storage child read proof for query plan verification. +pub fn prove_child_read_for_query_plan_check( + backend: B, + top_keys: I, + child_keys: I3, +) -> Result<(crate::backend::ProofRegStateFor, ProofInput), Box> +where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, + I2: IntoIterator, + I2::Item: AsRef<[u8]>, + I3: IntoIterator, +{ + let proof_backend = backend.as_proof_backend() + .ok_or_else( + || Box::new(ExecutionError::UnableToGenerateProof) as Box + )?; + for key in top_keys.into_iter() { + proof_backend + .storage(key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + for (child_info, keys) in child_keys.into_iter() { + for key in keys.into_iter() { + proof_backend + .child_storage(&child_info, key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + } + + Ok(proof_backend.extract_recorder()) +} + + + /// Check storage read proof, generated by `prove_read` call. pub fn read_proof_check( root: H::Out, @@ -1270,68 +1308,83 @@ mod tests { fn prove_read_and_proof_check_works_query_plan() { use sp_trie::{CheckableStorageProof, ProofInput}; + fn extract_recorder(recorder: std::sync::Arc>) -> T { + match std::sync::Arc::try_unwrap(recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), + } + } + let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node. // Using compact proof to get record backend and proofs. let remote_backend = trie_backend::tests::test_trie_proof::(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let remote_root_child = remote_backend.child_storage_root(child_info, std::iter::empty()).0; let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); - let recorder = match std::sync::Arc::try_unwrap(recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), - }; - + let recorder = extract_recorder(recorder); let mut root_map = ChildrenProofMap::default(); root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); assert!(ProofInput::ChildTrieRoots(root_map) == root_input); - // TODO EMCH could do a primitive function to avoid building the input manually. - let mut query_plan = ChildrenProofMap::default(); - query_plan.insert( - ChildInfo::top_trie().proof_info(), - (remote_root.encode(), vec![(b"value2".to_vec(), Some(vec![24u8]))]), - ); - let input_check = ProofInput::QueryPlanWithValues(query_plan); - let mut query_plan = ChildrenProofMap::default(); - query_plan.insert( - ChildInfo::top_trie().proof_info(), - (remote_root.encode(), vec![b"value2".to_vec()]), + let input = ProofInput::query_plan( + remote_root.encode(), + vec![b"value2".to_vec()].into_iter(), + std::iter::empty::<(_, _, std::iter::Empty<_>)>(), + true, ); - let input = ProofInput::QueryPlan(query_plan); let remote_proof = >::extract_proof(&recorder, input).unwrap(); + + let input_check = ProofInput::query_plan_with_values( + remote_root.encode(), + vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), + std::iter::empty::<(_, _, std::iter::Empty<_>)>(), + true, + ); + assert!(remote_proof.verify(&input_check).unwrap()); - /* + // on child trie - let remote_backend = trie_backend::tests::test_trie_proof::(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_child_read( + let remote_backend = trie_backend::tests::test_trie_proof::(); + + let (recorder, root_input) = prove_child_read_for_query_plan_check( remote_backend, - child_info, - &[b"value3"], - ).unwrap(); - let local_result1 = read_child_proof_check::, BlakeTwo256, _>( - remote_root, - remote_proof.clone().into(), - child_info, - &[b"value3"], - ).unwrap(); - let local_result2 = read_child_proof_check::, BlakeTwo256, _>( - remote_root, - remote_proof.clone().into(), - child_info, &[b"value2"], + vec![(child_info.clone(), &[b"value3"])], ).unwrap(); - assert_eq!( - local_result1.into_iter().collect::>(), - vec![(b"value3".to_vec(), Some(vec![142]))], - ); - assert_eq!( - local_result2.into_iter().collect::>(), - vec![(b"value2".to_vec(), None)], - ); -*/ - // TODO test with no child trie ref + let recorder = extract_recorder(recorder); + + let test_with_roots = |include_roots: bool| { + let input = ProofInput::query_plan( + remote_root.encode(), + vec![b"value2".to_vec()].into_iter(), + vec![(child_info.clone(), remote_root_child.encode(), vec![b"value3".to_vec()].into_iter())].into_iter(), + include_roots, + ); + let remote_proof = >::extract_proof(&recorder, input).unwrap(); + + let input_check = ProofInput::query_plan_with_values( + remote_root.encode(), + vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), + vec![(child_info.clone(), remote_root_child.encode(), vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter())].into_iter(), + include_roots, + ); + + assert!(remote_proof.clone().verify(&input_check).unwrap()); + + let input_check = ProofInput::query_plan_with_values( + remote_root.encode(), + vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), + vec![(child_info.clone(), remote_root_child.encode(), vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter())].into_iter(), + !include_roots, // not including child root in parent breaks extract + ); + + assert!(!remote_proof.verify(&input_check).unwrap()); + }; + + test_with_roots(true); + test_with_roots(false); } #[test] diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 5f1adbb64d6da..63fdcaeb23e27 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -182,6 +182,62 @@ impl Input { } Ok(()) } + + /// Build a query plan with values. + /// All tuples are key and optional value. + /// Children iterator also contains children encoded root. + /// If `include_child_root` is set to true, we add the child trie query to the top + /// trie, that is usually what we want (unless we only want to prove something + /// local to a child trie. + pub fn query_plan_with_values( + top_encoded_root: Vec, + top: impl Iterator, Option>)>, + children: impl Iterator, impl Iterator, Option>)>)>, + include_child_root: bool, + ) -> Input { + let mut result = ChildrenProofMap::default(); + let mut additional_roots = Vec::new(); + for (child_info, encoded_root, key_values) in children { + if include_child_root { + additional_roots.push((child_info.prefixed_storage_key().into_inner(), Some(encoded_root.clone()))); + } + result.insert(child_info.proof_info(), (encoded_root, key_values.collect())); + } + let mut top_values: Vec<_> = top.collect(); + top_values.extend(additional_roots); + result.insert(ChildInfo::top_trie().proof_info(), (top_encoded_root, top_values)); + + Input::QueryPlanWithValues(result) + } + + /// Build a query plan. + /// Iterator contains key. + /// Children iterator also contains children encoded root. + /// If `include_child_root` is set to true, we add the child trie query to the top + /// trie, that is usually what we want (unless we only want to prove something + /// local to a child trie. + pub fn query_plan( + top_encoded_root: Vec, + top: impl Iterator>, + children: impl Iterator, impl Iterator>)>, + include_child_root: bool, + ) -> Input { + let mut result = ChildrenProofMap::default(); + let mut additional_roots = Vec::new(); + for (child_info, encoded_root, keys) in children { + if include_child_root { + additional_roots.push(child_info.prefixed_storage_key().into_inner()); + } + result.insert(child_info.proof_info(), (encoded_root, keys.collect())); + } + let mut top_keys: Vec<_> = top.collect(); + top_keys.extend(additional_roots); + result.insert(ChildInfo::top_trie().proof_info(), (top_encoded_root, top_keys)); + + Input::QueryPlan(result) + } + + } /// Kind for a `Input` variant. From d496011dfa4420428ec29a9004d1fe3d36c94361 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Jun 2020 18:08:43 +0200 Subject: [PATCH 159/185] using another proof to register. --- primitives/state-machine/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 78f75e41f5554..1c4f5d7e2697f 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1346,9 +1346,9 @@ mod tests { assert!(remote_proof.verify(&input_check).unwrap()); // on child trie - let remote_backend = trie_backend::tests::test_trie_proof::(); + let remote_backend = trie_backend::tests::test_trie_proof::(); - let (recorder, root_input) = prove_child_read_for_query_plan_check( + let (recorder, _root_input) = prove_child_read_for_query_plan_check( remote_backend, &[b"value2"], vec![(child_info.clone(), &[b"value3"])], @@ -1467,8 +1467,6 @@ mod tests { P: FullBackendStorageProof, P::StorageProofReg: Clone, { - let child_info = ChildInfo::new_default(b"sub1"); - let child_info = &child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; From f4f21bdccab321875637a50205848884b8c551e4 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Jun 2020 18:31:15 +0200 Subject: [PATCH 160/185] quick renaming --- client/api/src/cht.rs | 2 +- client/finality-grandpa/src/tests.rs | 24 +++++++++---------- client/service/test/src/client/light.rs | 8 ++----- primitives/state-machine/src/lib.rs | 20 +++++++--------- .../state-machine/src/proving_backend.rs | 12 +++++----- primitives/state-machine/src/trie_backend.rs | 6 ++--- 6 files changed, 33 insertions(+), 39 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 36d8e2c5584b5..aafa332de8a21 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -38,7 +38,7 @@ use sp_state_machine::{ }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -type ProofCheckBackend = sp_state_machine::TrieBackend, H, SimpleProof>; +type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; /// The size of each CHT. This value is passed to every CHT-related function from /// production code. Other values are passed from tests. diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 37870bdf7656f..c994bf7cd9110 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -54,18 +54,18 @@ use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; use sp_state_machine::SimpleProof as StorageProof; -type ProvingBackend = sp_state_machine::TrieBackend, H, StorageProof>; +type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; type PeerData = - Mutex< - Option< - LinkHalf< - Block, - PeersFullClient, - LongestChain - > - > - >; +Mutex< +Option< +LinkHalf< +Block, +PeersFullClient, +LongestChain +> +> +>; type GrandpaPeer = Peer; struct GrandpaTestNet { @@ -254,7 +254,7 @@ impl AuthoritySetForFinalityProver for TestApi { ]); let proof = prove_read(backend, vec![b"authorities"]) .expect("failure proving read from in-memory storage backend"); - Ok(proof) + Ok(proof) } } @@ -265,7 +265,7 @@ impl AuthoritySetForFinalityChecker for TestApi { header: ::Header, proof: StorageProof, ) -> Result { - let results = read_proof_check::>, HashFor, _>( + let results = read_proof_check::>, HashFor, _>( *header.state_root(), proof, vec![b"authorities"] ) .expect("failure checking read proof for authorities"); diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 463fd7910255c..76e72793da6d2 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -65,11 +65,7 @@ use sp_core::{blake2_256, ChangesTrieConfiguration}; use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_state_machine::backend::{ProofRegFor, Backend as _}; -type ProvingBackend = sp_state_machine::TrieBackend< - sp_trie::MemoryDB, - BlakeTwo256, - StorageProof, ->; +type InMemoryProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; pub type DummyBlockchain = Blockchain; @@ -349,7 +345,7 @@ fn execution_proof_is_generated_and_checked() { ).unwrap(); // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::( + let execution_result = check_execution_proof_with_make_header::( &local_executor(), tasks_executor(), &RemoteCallRequest { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1c4f5d7e2697f..39e3daed3a060 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -97,13 +97,11 @@ pub type ChangesTrieTransaction = ( pub type InMemoryBackend = TrieBackend, H, SimpleProof>; /// Trie backend with in-memory storage and choice of proof. -/// TODO EMCH consider renaming to ProofCheckBackend -pub type InMemoryBackendWithProof = TrieBackend, H, P>; +pub type InMemoryProofCheckBackend = TrieBackend, H, P>; /// Trie backend with in-memory storage and choice of proof running over /// separate child backends. -/// TODO EMCH consider renaming to ProofCheckBackend -pub type InMemoryBackendWithFullProof = TrieBackend>, H, P>; +pub type InMemoryFullProofCheckBackend = TrieBackend>, H, P>; /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -1036,7 +1034,7 @@ mod tests { ).unwrap(); // check proof locally - let local_result = execution_proof_check::, BlakeTwo256, u64, _>( + let local_result = execution_proof_check::, BlakeTwo256, u64, _>( remote_root, remote_proof.into(), &mut Default::default(), @@ -1407,12 +1405,12 @@ mod tests { let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::, BlakeTwo256, _>( + let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::, BlakeTwo256, _>( + let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[&[0xff]], @@ -1432,13 +1430,13 @@ mod tests { child_info, &[b"value3"], ).unwrap(); - let local_result1 = read_child_proof_check::, BlakeTwo256, _>( + let local_result1 = read_child_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), child_info, &[b"value3"], ).unwrap(); - let local_result2 = read_child_proof_check::, BlakeTwo256, _>( + let local_result2 = read_child_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), child_info, @@ -1472,12 +1470,12 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::, BlakeTwo256, _>( + let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::, BlakeTwo256, _>( + let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[&[0xff]], diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 1415c7c7c3cf5..08a46c8dd9a38 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -282,7 +282,7 @@ impl Backend for ProvingBackend type Transaction = S::Overlay; type StorageProof = P; type ProofRegBackend = Self; - type ProofCheckBackend = TrieBackend, H, P>; + type ProofCheckBackend = crate::InMemoryProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.trie_backend.storage(key) @@ -395,7 +395,7 @@ impl Backend for ProvingBackend pub fn create_proof_check_backend( root: H::Out, proof: P, -) -> Result, H, P>, Box> +) -> Result, Box> where H: Hasher, H::Out: Codec, @@ -414,7 +414,7 @@ where pub fn create_full_proof_check_backend( root: H::Out, proof: P, -) -> Result>, H, P>, Box> +) -> Result, Box> where H: Hasher, H::Out: Codec, @@ -434,7 +434,7 @@ where #[cfg(test)] mod tests { - use crate::InMemoryBackendWithProof; + use crate::InMemoryProofCheckBackend; use crate::trie_backend::tests::test_trie_proof; use super::*; use crate::proving_backend::create_proof_check_backend; @@ -511,7 +511,7 @@ mod tests { } fn proof_recorded_and_checked_inner>() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackendWithProof::::default(); + let in_memory = InMemoryProofCheckBackend::::default(); let in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -547,7 +547,7 @@ mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = InMemoryBackendWithProof::::default(); + let in_memory = InMemoryProofCheckBackend::::default(); let in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2ad5c40b34b79..894c2cfe143cc 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -124,7 +124,7 @@ impl Backend for TrieBackend where H, Self::StorageProof, >; - type ProofCheckBackend = TrieBackend, H, P>; + type ProofCheckBackend = crate::InMemoryProofCheckBackend; fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) @@ -300,7 +300,7 @@ impl Backend for TrieBackend where } } -impl ProofCheckBackend for TrieBackend, H, P> +impl ProofCheckBackend for crate::InMemoryProofCheckBackend where H::Out: Ord + Codec, P: BackendStorageProof, @@ -315,7 +315,7 @@ impl ProofCheckBackend for TrieBackend, H, P> } } -impl ProofCheckBackend for TrieBackend>, H, P> +impl ProofCheckBackend for crate::InMemoryFullProofCheckBackend where H::Out: Ord + Codec, P: FullBackendStorageProof, From f8185e9f360ca33b315b5fdb8b90a33295dba760 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Jun 2020 18:36:55 +0200 Subject: [PATCH 161/185] clean warnings --- primitives/state-machine/src/proving_backend.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 08a46c8dd9a38..807cafce63787 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,7 +23,7 @@ use codec::{Decode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, + empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, ProofInput, RecordBackend, RegStorageProof, BackendStorageProof, record_all_keys, ProofInputKind, FullBackendStorageProof, }; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 894c2cfe143cc..ed8bf731be7af 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -29,7 +29,6 @@ use crate::{ StorageKey, StorageValue, Backend, backend::ProofCheckBackend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, }; -use sp_trie::MemoryDB; use parking_lot::RwLock; use std::marker::PhantomData; From f7b2fba356fd5e318bdbdf97dada9ab55141d06c Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Jun 2020 18:38:41 +0200 Subject: [PATCH 162/185] more warnings --- client/api/src/cht.rs | 2 +- client/finality-grandpa/src/tests.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index aafa332de8a21..193ae43148b44 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,7 +32,7 @@ use sp_trie; use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ - MemoryDB, backend::Backend as StateBackend, SimpleProof as StorageProof, + backend::Backend as StateBackend, SimpleProof as StorageProof, prove_read_on_proof_backend, read_proof_check, read_proof_check_on_proving_backend, SimpleProof, InMemoryBackend, }; diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index c994bf7cd9110..b043223cce540 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -43,7 +43,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; use sp_runtime::generic::{BlockId, DigestItem}; use sp_core::{H256, crypto::Public}; use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; -use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check, MemoryDB}; +use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; use authorities::AuthoritySet; use finality_proof::{ From a6e421459817b50d5bf66e5bdfcac9ccd5800a3e Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 11:28:40 +0200 Subject: [PATCH 163/185] Renamings. --- client/api/src/backend.rs | 8 +-- client/api/src/call_executor.rs | 6 +- client/api/src/lib.rs | 2 +- client/api/src/light.rs | 4 +- client/api/src/proof_provider.rs | 10 ++-- client/block-builder/src/lib.rs | 4 +- client/db/src/bench.rs | 6 +- client/db/src/lib.rs | 6 +- client/db/src/storage_cache.rs | 10 ++-- client/light/src/backend.rs | 6 +- client/light/src/call_executor.rs | 12 ++-- client/light/src/fetcher.rs | 4 +- client/network/src/light_client_handler.rs | 4 +- client/network/src/protocol.rs | 12 ++-- client/service/src/client/call_executor.rs | 8 +-- client/service/src/client/client.rs | 12 ++-- client/service/test/src/client/light.rs | 8 +-- .../api/proc-macro/src/impl_runtime_apis.rs | 6 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 8 +-- primitives/state-machine/src/backend.rs | 50 +++++------------ primitives/state-machine/src/lib.rs | 50 +++++++++-------- .../state-machine/src/proving_backend.rs | 56 +++++++++---------- primitives/state-machine/src/trie_backend.rs | 16 +++--- primitives/trie/src/lib.rs | 10 ++-- primitives/trie/src/storage_proof/compact.rs | 27 ++++----- primitives/trie/src/storage_proof/mod.rs | 43 ++++++-------- primitives/trie/src/storage_proof/multiple.rs | 9 ++- .../trie/src/storage_proof/query_plan.rs | 6 +- primitives/trie/src/storage_proof/simple.rs | 22 ++++---- 30 files changed, 197 insertions(+), 230 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 8edf94b4b4132..2a1ea9bb31988 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -41,18 +41,18 @@ use sp_consensus::BlockOrigin; use parking_lot::RwLock; pub use sp_state_machine::backend::Backend as StateBackend; -pub use sp_state_machine::backend::ProofRegFor; +pub use sp_state_machine::backend::ProofRawFor; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. pub type StateBackendFor = >::State; /// Extracts the proof for the given backend. -pub type ProofFor = as StateBackend>>::StorageProof; +pub type ProofFor = as StateBackend>>::StorageProof; -type ProofRegForSB = >>::ProofRegBackend; +type RegProofForSB = >>::RegProofBackend; -type ProofRegForB = ProofRegForSB, Block>; +type RegProofForB = RegProofForSB, Block>; /// Extracts the transaction for the given state backend. pub type TransactionForSB = >>::Transaction; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index bbf174f9acf13..0e6fac785df27 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -111,7 +111,7 @@ pub trait CallExecutor { overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] - ) -> Result<(Vec, sp_state_machine::backend::ProofRegFor>), sp_blockchain::Error> { + ) -> Result<(Vec, sp_state_machine::backend::ProofRawFor>), sp_blockchain::Error> { let proof_state = state.as_proof_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) @@ -124,13 +124,13 @@ pub trait CallExecutor { /// /// No changes are made. /// TODO EMCH try to remove P param and use the associated backend type? - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, proof_backend: &P, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8], - ) -> Result<(Vec, sp_state_machine::backend::ProofRegFor>), sp_blockchain::Error>; + ) -> Result<(Vec, sp_state_machine::backend::ProofRawFor>), sp_blockchain::Error>; /// Get runtime version if supported. fn native_runtime_version(&self) -> Option<&NativeVersion>; diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index e9a036a6b3f63..15170c4d84dd1 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -37,7 +37,7 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{SimpleProof, StorageProof, ExecutionStrategy, CloneableSpawn, ProofNodes}; +pub use sp_state_machine::{ProofCommon, SimpleProof, ExecutionStrategy, CloneableSpawn, ProofNodes}; /// Usage Information Provider interface /// diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 79f020c78e333..906d30d242abb 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -27,7 +27,7 @@ use sp_runtime::{ generic::BlockId }; use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; -use sp_state_machine::{SimpleProof, StorageProof}; +use sp_state_machine::SimpleProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, Error as ClientError, Result as ClientResult, @@ -190,7 +190,7 @@ pub trait Fetcher: Send + Sync { /// /// Implementations of this trait should not use any prunable blockchain data /// except that is passed to its methods. -pub trait FetchChecker: Send + Sync { +pub trait FetchChecker: Send + Sync { /// Check remote header proof. fn check_header_proof( &self, diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index c8065e478b5e0..2720eb208d9b1 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -23,16 +23,16 @@ use sp_runtime::{ }; use crate::{SimpleProof, ChangesProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; -use sp_trie::BackendStorageProof; +use sp_trie::BackendProof; /// Interface for providing block proving utilities. -pub trait ProofProvider>> { +pub trait ProofProvider>> { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, id: &BlockId, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning /// read proof. @@ -41,7 +41,7 @@ pub trait ProofProvider id: &BlockId, child_info: &ChildInfo, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash /// AND returning execution proof. @@ -52,7 +52,7 @@ pub trait ProofProvider id: &BlockId, method: &str, call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, Proof::StorageProofReg)>; + ) -> sp_blockchain::Result<(Vec, Proof::ProofRaw)>; /// Reads given header and generates CHT-based header proof. fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, SimpleProof)>; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index bb7294f76ddb2..af6c8478a8eb1 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -53,12 +53,12 @@ pub struct BuiltBlock, /// An optional proof that was recorded while building the block. - pub proof: Option>>, + pub proof: Option>>, } impl>> BuiltBlock { /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option>>) { + pub fn into_inner(self) -> (Block, StorageChanges, Option>>) { (self.block, self.storage_changes, self.proof) } } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 521640776fdd6..3b0ae3f94a6f4 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -27,7 +27,7 @@ use sp_trie::{MemoryDB, prefixed_key}; use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::{Backend as StateBackend, ProofRegStateFor}, SimpleProof}; +use sp_state_machine::{DBValue, backend::{Backend as StateBackend, RegProofStateFor}, SimpleProof}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -119,7 +119,7 @@ impl StateBackend> for BenchmarkingState { type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type StorageProof = as StateBackend>>::StorageProof; - type ProofRegBackend = as StateBackend>>::ProofRegBackend; + type RegProofBackend = as StateBackend>>::RegProofBackend; type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -280,7 +280,7 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } - fn from_reg_state(self, previous: ProofRegStateFor>) -> Option { + fn from_reg_state(self, previous: RegProofStateFor>) -> Option { self.state.borrow_mut().take().and_then(|s| s.from_reg_state(previous)) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 71a9f6f582931..7c0dff11930c0 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -77,7 +77,7 @@ use sp_runtime::traits::{ use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, StorageCollection, ChildStorageCollection, SimpleProof, - backend::{Backend as StateBackend, ProofRegStateFor}, StateMachineStats, + backend::{Backend as StateBackend, RegProofStateFor}, StateMachineStats, }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; @@ -154,7 +154,7 @@ impl StateBackend> for RefTrackingState { type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type StorageProof = as StateBackend>>::StorageProof; - type ProofRegBackend = as StateBackend>>::ProofRegBackend; + type RegProofBackend = as StateBackend>>::RegProofBackend; type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -253,7 +253,7 @@ impl StateBackend> for RefTrackingState { self.state().child_keys(child_info, prefix) } - fn from_reg_state(mut self, previous: ProofRegStateFor>) -> Option { + fn from_reg_state(mut self, previous: RegProofStateFor>) -> Option { let state = std::mem::replace(&mut self.state, Default::default()).expect("Non dropped state"); state.from_reg_state(previous) } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 70de8f0a79dd6..d829be2f9f3e3 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; use sp_state_machine::{ - backend::{Backend as StateBackend, ProofRegStateFor}, StorageKey, StorageValue, + backend::{Backend as StateBackend, RegProofStateFor}, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; use log::trace; @@ -496,7 +496,7 @@ impl>, B: BlockT> StateBackend> for Cachin type Error = S::Error; type Transaction = S::Transaction; type StorageProof = S::StorageProof; - type ProofRegBackend = S::ProofRegBackend; + type RegProofBackend = S::RegProofBackend; type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -654,7 +654,7 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.child_keys(child_info, prefix) } - fn from_reg_state(self, previous: ProofRegStateFor>) -> Option { + fn from_reg_state(self, previous: RegProofStateFor>) -> Option { self.state.from_reg_state(previous) } @@ -739,7 +739,7 @@ impl>, B: BlockT> StateBackend> for Syncin type Error = S::Error; type Transaction = S::Transaction; type StorageProof = S::StorageProof; - type ProofRegBackend = S::ProofRegBackend; + type RegProofBackend = S::RegProofBackend; type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -846,7 +846,7 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().usage_info() } - fn from_reg_state(mut self, previous: ProofRegStateFor>) -> Option { + fn from_reg_state(mut self, previous: RegProofStateFor>) -> Option { self.sync().and_then(|s| s.from_reg_state(previous)) } } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 165fa3711abb8..e7236bd32545d 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -29,7 +29,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - backend::{Backend as StateBackend, ProofRegStateFor}, InMemoryBackend, ChangesTrieTransaction, + backend::{Backend as StateBackend, RegProofStateFor}, InMemoryBackend, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; @@ -381,7 +381,7 @@ impl StateBackend for GenesisOrUnavailableState type Error = ClientError; type Transaction = as StateBackend>::Transaction; type StorageProof = as StateBackend>::StorageProof; - type ProofRegBackend = as StateBackend>::ProofRegBackend; + type RegProofBackend = as StateBackend>::RegProofBackend; type ProofCheckBackend = as StateBackend>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> ClientResult>> { @@ -511,7 +511,7 @@ impl StateBackend for GenesisOrUnavailableState sp_state_machine::UsageInfo::empty() } - fn from_reg_state(self, previous: ProofRegStateFor) -> Option { + fn from_reg_state(self, previous: RegProofStateFor) -> Option { match self { GenesisOrUnavailableState::Genesis(state) => state.from_reg_state(previous), GenesisOrUnavailableState::Unavailable => None, diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index a0d13c68f587d..c22874330aca4 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -32,9 +32,9 @@ use sp_state_machine::{ self, OverlayedChanges, ExecutionStrategy, execution_proof_check_on_proof_backend, ExecutionManager, CloneableSpawn, InMemoryBackend, }; -use sp_state_machine::backend::{Backend as StateBackend, ProofRegFor}; +use sp_state_machine::backend::{Backend as StateBackend, ProofRawFor}; use hash_db::Hasher; -use sp_state_machine::{SimpleProof as StorageProof, MergeableStorageProof}; +use sp_state_machine::{SimpleProof as StorageProof, MergeableProof}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; @@ -158,13 +158,13 @@ impl CallExecutor for } } - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, _proof_backend: &P, _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8], - ) -> ClientResult<(Vec, ProofRegFor>)> { + ) -> ClientResult<(Vec, ProofRawFor>)> { Err(ClientError::NotAvailableOnLightClient) } @@ -183,7 +183,7 @@ pub fn prove_execution( executor: &E, method: &str, call_data: &[u8], -) -> ClientResult<(Vec, ProofRegFor>)> +) -> ClientResult<(Vec, ProofRawFor>)> where Block: BlockT, S: StateBackend>, @@ -211,7 +211,7 @@ pub fn prove_execution( method, call_data, )?; - let total_proof = >>::merge( + let total_proof = >>::merge( vec![init_proof, exec_proof], ); diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 02a48d8d172dd..436be0aa8840b 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -33,9 +33,9 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, CloneableSpawn, BackendStorageProof, InMemoryBackend, + read_child_proof_check, CloneableSpawn, BackendProof, InMemoryBackend, }; -pub use sp_state_machine::{SimpleProof as StorageProof, StorageProof as StorageProofT}; +pub use sp_state_machine::{SimpleProof as StorageProof, ProofCommon}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; pub use sc_client_api::{ diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index a6d9a06468fd7..276fa4c88da94 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -57,7 +57,7 @@ use nohash_hasher::IntMap; use prost::Message; use sc_client_api::{ SimpleProof as StorageProof, - StorageProof as StorageProofT, + ProofCommon, light::{ self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, @@ -1330,7 +1330,7 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::{StorageProof as StorageProofT, RemoteReadChildRequest, FetchChecker, SimpleProof as StorageProof}; + use sc_client_api::{ProofCommon, RemoteReadChildRequest, FetchChecker, SimpleProof as StorageProof}; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; use std::{ diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index e89c5d28fd9c1..df94a7ae6b066 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -56,7 +56,7 @@ use std::sync::Arc; use std::fmt::Write; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; -use sc_client_api::{ChangesProof, StorageProof}; +use sc_client_api::{ChangesProof, ProofCommon}; use util::LruHashSet; use wasm_timer::Instant; @@ -1473,7 +1473,7 @@ impl Protocol { error ); self.peerset_handle.report_peer(who.clone(), rep::RPC_FAILED); - StorageProof::empty() + ProofCommon::empty() } }; @@ -1619,7 +1619,7 @@ impl Protocol { request.block, error ); - StorageProof::empty() + ProofCommon::empty() } }; self.send_message( @@ -1675,7 +1675,7 @@ impl Protocol { request.block, error ); - StorageProof::empty() + ProofCommon::empty() } }; self.send_message( @@ -1704,7 +1704,7 @@ impl Protocol { request.block, error ); - (Default::default(), StorageProof::empty()) + (Default::default(), ProofCommon::empty()) } }; self.send_message( @@ -1764,7 +1764,7 @@ impl Protocol { max_block: Zero::zero(), proof: vec![], roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), + roots_proof: ProofCommon::empty(), } } }; diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index ebdf0b18813b4..a2cc457e72e18 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -23,7 +23,7 @@ use sp_runtime::{ }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::{Backend as _, ProofRegFor}, + backend::{Backend as _, ProofRawFor}, }; use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; @@ -193,7 +193,7 @@ where // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) }; - use sp_state_machine::backend::ProofRegBackend; + use sp_state_machine::backend::RegProofBackend; let (recorder_state, input_state) = backend.extract_recorder(); *recorder = recorder_state; input.consolidate(input_state).map_err(|e| format!("{:?}", e))?; @@ -241,13 +241,13 @@ where .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, proof_backend: &P, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] - ) -> Result<(Vec, ProofRegFor>), sp_blockchain::Error> { + ) -> Result<(Vec, ProofRawFor>), sp_blockchain::Error> { sp_state_machine::prove_execution_on_proof_backend::<_, _, NumberFor, _>( proof_backend, overlay, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index b610af8e98ba3..d0e5b7c5a469a 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -18,7 +18,7 @@ //! Substrate Client -use sc_client_api::backend::{ProofFor, ProofRegFor}; +use sc_client_api::backend::{ProofFor, ProofRawFor}; use std::{ marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, @@ -45,7 +45,7 @@ use sp_state_machine::{ DBValue, backend::Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, SimpleProof as StorageProof, - MergeableStorageProof, + MergeableProof, }; use sc_executor::RuntimeVersion; use sp_consensus::{ @@ -1183,7 +1183,7 @@ impl ProofProvider> for Client, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result>> { + ) -> sp_blockchain::Result>> { self.state_at(id) .and_then(|state| prove_read(state, keys) .map_err(Into::into)) @@ -1194,7 +1194,7 @@ impl ProofProvider> for Client, child_info: &ChildInfo, keys: &mut dyn Iterator, - ) -> sp_blockchain::Result>> { + ) -> sp_blockchain::Result>> { self.state_at(id) .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) @@ -1205,7 +1205,7 @@ impl ProofProvider> for Client, method: &str, call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, ProofRegFor>)> { + ) -> sp_blockchain::Result<(Vec, ProofRawFor>)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. // @@ -1224,7 +1224,7 @@ impl ProofProvider> for Client>::merge( + Ok((r, ProofRawFor::>::merge( vec![p, code_proof], ))) }) diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 3742f6fc681c6..58373c2130139 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -45,7 +45,7 @@ use sc_client_api::{blockchain::Info, backend::NewBlockState, Backend as ClientB AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, BlockImportOperation, RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, - SimpleProof as StorageProof, StorageProof as _}; + SimpleProof as StorageProof, ProofCommon}; use sp_externalities::Extensions; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::{ @@ -63,7 +63,7 @@ use substrate_test_runtime_client::{ use sp_core::{blake2_256, ChangesTrieConfiguration}; use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; -use sp_state_machine::backend::{ProofRegFor, Backend as _}; +use sp_state_machine::backend::{ProofRawFor, Backend as _}; type InMemoryProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; @@ -243,13 +243,13 @@ impl CallExecutor for DummyCallExecutor { unreachable!() } - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, _proof_backend: &P, _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8], - ) -> Result<(Vec, ProofRegFor>), ClientError> { + ) -> Result<(Vec, ProofRawFor>), ClientError> { unreachable!() } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 88639d4e4ad2b..2bab789a2be75 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -285,14 +285,14 @@ fn generate_runtime_api_base_structures() -> Result { self.recorder = Some(std::cell::RefCell::new(Default::default())); } - fn extract_proof(&mut self) -> Option<#crate_::ProofRegFor>> { - use #crate_::RegStorageProof; + fn extract_proof(&mut self) -> Option<#crate_::ProofRawFor>> { + use #crate_::RecordableProof; self.recorder .take() .and_then(|recorder| { let #crate_::ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); let input = std::mem::replace(input, #crate_::ProofInput::None); - <>>::ProofRegBackend as #crate_::ProofRegBackend<#crate_::HashFor>>::extract_proof_reg( + <>>::RegProofBackend as #crate_::RegProofBackend<#crate_::HashFor>>::extract_proof_reg( &recorder, input, ).ok() diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 727c33931c40f..758e5defe810b 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -99,7 +99,7 @@ fn implement_common_api_traits( unimplemented!("`record_proof` not implemented for runtime api mocks") } - fn extract_proof(&mut self) -> Option<#crate_::ProofRegFor>> { + fn extract_proof(&mut self) -> Option<#crate_::ProofRawFor>> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 0cfdb4daf6578..9a2754bad1df2 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -39,8 +39,8 @@ extern crate self as sp_api; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - OverlayedChanges, StorageProof, StorageProofKind, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, - ProofInput, backend::{ProofRegFor, ProofRegBackend}, RegStorageProof, + OverlayedChanges, ProofCommon, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, + ProofInput, backend::{ProofRawFor, RegProofBackend}, RecordableProof, }; #[doc(hidden)] #[cfg(feature = "std")] @@ -381,7 +381,7 @@ pub trait ApiExt: ApiErrorExt { /// This stops the proof recording. /// /// If `record_proof` was not called before, this will return `None`. - fn extract_proof(&mut self) -> Option>>; + fn extract_proof(&mut self) -> Option>>; /// Convert the api object into the storage changes that were done while executing runtime /// api functions. @@ -527,7 +527,7 @@ pub struct ProofRecorder>, Block: BlockT> { /// The recorder to use over the db use by trie db. /// TODO EMCH this the sync recorder and we got a mechanism of extract / merge for it /// when it should only be reusing it, but merge still needed for input. - pub recorder: sp_state_machine::backend::ProofRegStateFor>, + pub recorder: sp_state_machine::backend::RegProofStateFor>, /// The additional input needed for the proof. pub input: ProofInput, } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index bdc0b67f70387..c705466c4d06c 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,16 +21,15 @@ use hash_db::Hasher; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; use crate::{UsageInfo, StorageKey, StorageValue, StorageCollection}; -use sp_trie::{ProofInput, BackendStorageProof}; +use sp_trie::{ProofInput, BackendProof}; /// Access the state of the proof backend of a backend. -pub type ProofRegStateFor = <>::ProofRegBackend as ProofRegBackend>::State; +pub type RegProofStateFor = <>::RegProofBackend as RegProofBackend>::State; -/// Access the state of the proof backend of a backend. -pub type ProofRegFor = <>::StorageProof as BackendStorageProof>::StorageProofReg; +/// Access the raw proof of a backend. +pub type ProofRawFor = <>::StorageProof as BackendProof>::ProofRaw; /// Access the state of the proof backend of a backend. -/// TODO should not be an alias pub type ProofFor = >::StorageProof; /// A state backend is used to read state data and can have changes committed @@ -45,12 +44,10 @@ pub trait Backend: Sized + std::fmt::Debug { type Transaction: Consolidate + Default + Send; /// The actual proof produced. - type StorageProof: BackendStorageProof; - -// + sp_trie::WithRegStorageProof; + type StorageProof: BackendProof; /// Type of proof backend. - type ProofRegBackend: ProofRegBackend; + type RegProofBackend: RegProofBackend; /// Type of proof backend. type ProofCheckBackend: ProofCheckBackend; @@ -169,14 +166,15 @@ pub trait Backend: Sized + std::fmt::Debug { } /// Try convert into a proof backend. - fn as_proof_backend(self) -> Option { + fn as_proof_backend(self) -> Option { self.from_reg_state(Default::default()) } /// Try convert into a proof backend. /// We can optionally use a previous proof backend to avoid having to merge /// proof later. - fn from_reg_state(self, previous: ProofRegStateFor) -> Option; + /// TODO EMCH consider adding previous input. + fn from_reg_state(self, previous: RegProofStateFor) -> Option; /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. @@ -237,22 +235,6 @@ pub trait Backend: Sized + std::fmt::Debug { } } -/// Backend that can be instantiated from its state. -/// TODO EMCH does not seem use at this point -pub trait InstantiableStateBackend: Backend - where - H: Hasher, -{ - /// Storage to use to instantiate. - type Storage; - - /// Instantiation method. - fn new(storage: Self::Storage, state: H::Out) -> Self; - - /// Extract state out of the backend. - fn extract_state(self) -> (Self::Storage, H::Out); -} - /// Backend that can be instantiated from intital content. pub trait GenesisStateBackend: Backend where @@ -263,24 +245,22 @@ pub trait GenesisStateBackend: Backend } /// Backend used to register a proof record. -pub trait ProofRegBackend: crate::backend::Backend +pub trait RegProofBackend: crate::backend::Backend where H: Hasher, { /// State of a backend. /// TODO try to merge with RecordBackendFor (aka remove the arc rwlock in code) type State: Default + Send + Sync + Clone; - // + Into> - // + From> + /// Extract proof after running operation to prove. - fn extract_proof(&self) -> Result<>::StorageProofReg, Box>; + fn extract_proof(&self) -> Result<>::ProofRaw, Box>; /// Get current recording state. fn extract_recorder(self) -> (Self::State, ProofInput); /// Extract from the state and input. - /// TODO EMCH fusing state and record could avoid this - fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::StorageProofReg, Box>; + fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::ProofRaw, Box>; } /// Backend used to produce proof. @@ -303,7 +283,7 @@ impl<'a, T, H> Backend for &'a T type Error = T::Error; type Transaction = T::Transaction; type StorageProof = T::StorageProof; - type ProofRegBackend = T::ProofRegBackend; + type RegProofBackend = T::RegProofBackend; type ProofCheckBackend = T::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -380,7 +360,7 @@ impl<'a, T, H> Backend for &'a T (*self).usage_info() } - fn from_reg_state(self, _previous: ProofRegStateFor) -> Option { + fn from_reg_state(self, _previous: RegProofStateFor) -> Option { // cannot move out of reference, consider cloning when needed. None } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 39e3daed3a060..6a0c58978f59d 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -45,9 +45,9 @@ mod stats; mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, - TrieNodesStorageProof, StorageProof, StorageProofKind, ChildrenProofMap, - ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RegStorageProof, - SimpleProof, CompactProof, BackendStorageProof, MergeableStorageProof}; + TrieNodesStorageProof, ProofCommon, StorageProofKind, ChildrenProofMap, + ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RecordableProof, + SimpleProof, CompactProof, BackendProof, MergeableProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; pub use read_only::{ReadOnlyExternalities, InspectState}; @@ -80,7 +80,7 @@ pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use sp_core::traits::CloneableSpawn; -use backend::{Backend, ProofRegBackend, ProofCheckBackend, ProofRegFor}; +use backend::{Backend, RegProofBackend, ProofCheckBackend, ProofRawFor}; type CallResult = Result, E>; @@ -470,7 +470,7 @@ pub fn prove_execution( method: &str, call_data: &[u8], runtime_code: &RuntimeCode, -) -> Result<(Vec, ProofRegFor), Box> +) -> Result<(Vec, ProofRawFor), Box> where B: Backend, H: Hasher, @@ -508,9 +508,9 @@ pub fn prove_execution_on_proof_backend( method: &str, call_data: &[u8], runtime_code: &RuntimeCode, -) -> Result<(Vec, ProofRegFor), Box> +) -> Result<(Vec, ProofRawFor), Box> where - P: ProofRegBackend, + P: RegProofBackend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, @@ -609,7 +609,7 @@ where pub fn prove_read( backend: B, keys: I, -) -> Result, Box> +) -> Result, Box> where B: Backend, H: Hasher, @@ -628,7 +628,7 @@ where pub fn prove_read_for_query_plan_check( backend: B, keys: I, -) -> Result<(crate::backend::ProofRegStateFor, ProofInput), Box> +) -> Result<(crate::backend::RegProofStateFor, ProofInput), Box> where B: Backend, H: Hasher, @@ -654,7 +654,7 @@ pub fn prove_child_read( backend: B, child_info: &ChildInfo, keys: I, -) -> Result, Box> +) -> Result, Box> where B: Backend, H: Hasher, @@ -671,9 +671,9 @@ where pub fn prove_read_on_proof_backend( proving_backend: &P, keys: I, -) -> Result, Box> +) -> Result, Box> where - P: ProofRegBackend, + P: RegProofBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, @@ -692,9 +692,9 @@ pub fn prove_child_read_on_proof_backend( proving_backend: &P, child_info: &ChildInfo, keys: I, -) -> Result, Box> +) -> Result, Box> where - P: ProofRegBackend, + P: RegProofBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, @@ -713,7 +713,7 @@ pub fn prove_child_read_for_query_plan_check( backend: B, top_keys: I, child_keys: I3, -) -> Result<(crate::backend::ProofRegStateFor, ProofInput), Box> +) -> Result<(crate::backend::RegProofStateFor, ProofInput), Box> where B: Backend, H: Hasher, @@ -832,7 +832,7 @@ mod tests { use super::changes_trie::Configuration as ChangesTrieConfig; use sp_core::{map, traits::{Externalities, RuntimeCode}}; use sp_runtime::traits::BlakeTwo256; - use sp_trie::{Layout, SimpleProof, SimpleFullProof, BackendStorageProof, FullBackendStorageProof}; + use sp_trie::{Layout, SimpleProof, SimpleFullProof, BackendProof, FullBackendProof}; type CompactProof = sp_trie::CompactProof>; type CompactFullProof = sp_trie::CompactFullProof>; @@ -1012,7 +1012,7 @@ mod tests { prove_execution_and_proof_check_works_inner::(); prove_execution_and_proof_check_works_inner::(); } - fn prove_execution_and_proof_check_works_inner>() { + fn prove_execution_and_proof_check_works_inner>() { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1304,7 +1304,7 @@ mod tests { #[test] fn prove_read_and_proof_check_works_query_plan() { - use sp_trie::{CheckableStorageProof, ProofInput}; + use sp_trie::{VerifiableProof, ProofInput}; fn extract_recorder(recorder: std::sync::Arc>) -> T { match std::sync::Arc::try_unwrap(recorder) { @@ -1332,7 +1332,7 @@ mod tests { std::iter::empty::<(_, _, std::iter::Empty<_>)>(), true, ); - let remote_proof = >::extract_proof(&recorder, input).unwrap(); + let remote_proof = >::extract_proof(&recorder, input).unwrap(); let input_check = ProofInput::query_plan_with_values( remote_root.encode(), @@ -1360,7 +1360,7 @@ mod tests { vec![(child_info.clone(), remote_root_child.encode(), vec![b"value3".to_vec()].into_iter())].into_iter(), include_roots, ); - let remote_proof = >::extract_proof(&recorder, input).unwrap(); + let remote_proof = >::extract_proof(&recorder, input).unwrap(); let input_check = ProofInput::query_plan_with_values( remote_root.encode(), @@ -1394,8 +1394,8 @@ mod tests { } fn prove_read_and_proof_check_works_inner

() where - P: BackendStorageProof, - P::StorageProofReg: Clone, + P: BackendProof, + P::ProofRaw: Clone, { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; @@ -1462,19 +1462,21 @@ mod tests { } fn prove_read_and_proof_on_fullbackend_works_inner

() where - P: FullBackendStorageProof, - P::StorageProofReg: Clone, + P: FullBackendProof, + P::ProofRaw: Clone, { // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); +// TODO EMCH use InMemoryFullProofCheckBackend // check proof locally let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); +// TODO EMCH use InMemoryFullProofCheckBackend let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 807cafce63787..be0b6f7aa5250 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -24,14 +24,14 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, RecordBackendFor, - ProofInput, RecordBackend, RegStorageProof, BackendStorageProof, - record_all_keys, ProofInputKind, FullBackendStorageProof, + ProofInput, RecordBackend, RecordableProof, BackendProof, + record_all_keys, ProofInputKind, FullBackendProof, }; pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, DBValue}; -use crate::backend::{Backend, ProofRegStateFor, ProofRegBackend}; +use crate::backend::{Backend, RegProofStateFor, RegProofBackend}; use sp_core::storage::{ChildInfo, ChildInfoProof}; use std::marker::PhantomData; @@ -123,7 +123,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub struct ProvingBackend< S: TrieBackendStorage, H: Hasher, - P: BackendStorageProof, + P: BackendProof, > { trie_backend: TrieBackend>, H, P>, _ph: PhantomData

, @@ -141,7 +141,7 @@ impl<'a, S, H, P> ProvingBackend<&'a S, H, P> S: TrieBackendStorage, H: Hasher, H::Out: Codec, - P: BackendStorageProof, + P: BackendProof, { /// Create new proving backend. pub fn new(backend: &'a TrieBackend) -> Self { @@ -160,7 +160,7 @@ impl<'a, S, H, P> ProvingBackend<&'a S, H, P> proof_recorder, _ph: PhantomData, }; - match P::StorageProofReg::INPUT_KIND { + match P::ProofRaw::INPUT_KIND { ProofInputKind::ChildTrieRoots => { ProvingBackend { trie_backend: TrieBackend::new_with_roots(recorder, root), @@ -184,7 +184,7 @@ impl ProvingBackend S: TrieBackendStorage, H: Hasher, H::Out: Codec, - P: BackendStorageProof, + P: BackendProof, { /// Create new proving backend with the given recorder. pub fn from_backend_with_recorder( @@ -197,7 +197,7 @@ impl ProvingBackend proof_recorder, _ph: PhantomData, }; - match P::StorageProofReg::INPUT_KIND { + match P::ProofRaw::INPUT_KIND { ProofInputKind::ChildTrieRoots => { ProvingBackend { trie_backend: TrieBackend::new_with_roots(recorder, root), @@ -232,7 +232,7 @@ impl, H: Hasher, R: RecordBackend> TrieBackendStorag } } -impl, H: Hasher, P: BackendStorageProof> std::fmt::Debug +impl, H: Hasher, P: BackendProof> std::fmt::Debug for ProvingBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -240,31 +240,31 @@ impl, H: Hasher, P: BackendStorageProof> std::fmt::D } } -impl ProofRegBackend for ProvingBackend +impl RegProofBackend for ProvingBackend where S: TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, - P: BackendStorageProof, + P: BackendProof, { type State = SyncRecordBackendFor; - fn extract_proof(&self) -> Result<>::StorageProofReg, Box> { + fn extract_proof(&self) -> Result<>::ProofRaw, Box> { let input = self.trie_backend.extract_registered_roots(); - >::StorageProofReg::extract_proof( + >::ProofRaw::extract_proof( &self.trie_backend.essence().backend_storage().proof_recorder.read(), input, ).map_err(|e| Box::new(e) as Box) } - fn extract_recorder(self) -> (ProofRegStateFor, ProofInput) { + fn extract_recorder(self) -> (RegProofStateFor, ProofInput) { let input = self.trie_backend.extract_registered_roots(); let recorder = self.trie_backend.into_storage().proof_recorder; (recorder, input) } - fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::StorageProofReg, Box> { - <>::StorageProofReg>::extract_proof( + fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::ProofRaw, Box> { + <>::ProofRaw>::extract_proof( & recorder_state.read(), input, ).map_err(|e| Box::new(e) as Box) @@ -276,12 +276,12 @@ impl Backend for ProvingBackend S: TrieBackendStorage, H: Hasher, H::Out: Ord + Codec, - P: BackendStorageProof, + P: BackendProof, { type Error = String; type Transaction = S::Overlay; type StorageProof = P; - type ProofRegBackend = Self; + type RegProofBackend = Self; type ProofCheckBackend = crate::InMemoryProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -370,7 +370,7 @@ impl Backend for ProvingBackend self.trie_backend.usage_info() } - fn from_reg_state(self, previous_recorder: ProofRegStateFor) -> Option { + fn from_reg_state(self, previous_recorder: RegProofStateFor) -> Option { let root = self.trie_backend.essence().root().clone(); let storage = self.trie_backend.into_storage(); let current_recorder = storage.proof_recorder; @@ -399,7 +399,7 @@ pub fn create_proof_check_backend( where H: Hasher, H::Out: Codec, - P: BackendStorageProof, + P: BackendProof, { let db = proof.into_partial_db() .map_err(|e| Box::new(format!("{}", e)) as Box)?; @@ -418,7 +418,7 @@ pub fn create_full_proof_check_backend( where H: Hasher, H::Out: Codec, - P: FullBackendStorageProof, + P: FullBackendProof, { use std::ops::Deref; let db = proof.into_partial_full_db() @@ -439,12 +439,12 @@ mod tests { use super::*; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - use sp_trie::{SimpleProof, StorageProof as _}; + use sp_trie::{SimpleProof, ProofCommon}; use sp_runtime::traits::BlakeTwo256; type CompactProof = sp_trie::CompactProof>; - fn test_proving>( + fn test_proving>( trie_backend: &TrieBackend, BlakeTwo256, P>, ) -> ProvingBackend<&PrefixedMemoryDB, BlakeTwo256, P> { ProvingBackend::new(trie_backend) @@ -455,7 +455,7 @@ mod tests { proof_is_empty_until_value_is_read_inner::(); proof_is_empty_until_value_is_read_inner::(); } - fn proof_is_empty_until_value_is_read_inner>() { + fn proof_is_empty_until_value_is_read_inner>() { let trie_backend = test_trie_proof::

(); assert!(test_proving(&trie_backend).extract_proof().unwrap().is_empty()); } @@ -465,7 +465,7 @@ mod tests { proof_is_non_empty_after_value_is_read_inner::(); proof_is_non_empty_after_value_is_read_inner::(); } - fn proof_is_non_empty_after_value_is_read_inner>() { + fn proof_is_non_empty_after_value_is_read_inner>() { let trie_backend = test_trie_proof::

(); let backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); @@ -492,7 +492,7 @@ mod tests { passes_through_backend_calls_inner::(); passes_through_backend_calls_inner::(); } - fn passes_through_backend_calls_inner>() { + fn passes_through_backend_calls_inner>() { let trie_backend = test_trie_proof::

(); let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); @@ -509,7 +509,7 @@ mod tests { proof_recorded_and_checked_inner::(); proof_recorded_and_checked_inner::(); } - fn proof_recorded_and_checked_inner>() { + fn proof_recorded_and_checked_inner>() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryProofCheckBackend::::default(); let in_memory = in_memory.update(vec![(None, contents)]); @@ -535,7 +535,7 @@ mod tests { proof_recorded_and_checked_with_child_inner::(); proof_recorded_and_checked_with_child_inner::(); } - fn proof_recorded_and_checked_with_child_inner>() { + fn proof_recorded_and_checked_with_child_inner>() { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index ed8bf731be7af..8e8ef593b6d94 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,9 +20,9 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, - ChildrenProofMap, ProofInput, BackendStorageProof, FullBackendStorageProof}; + ChildrenProofMap, ProofInput, BackendProof, FullBackendProof}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use crate::backend::{ProofRegStateFor}; +use crate::backend::{RegProofStateFor}; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; use crate::{ @@ -113,12 +113,12 @@ impl Backend for TrieBackend where H: Hasher, S: TrieBackendStorage, H::Out: Ord + Codec, - P: BackendStorageProof, + P: BackendProof, { type Error = String; type Transaction = S::Overlay; type StorageProof = P; - type ProofRegBackend = crate::proving_backend::ProvingBackend< + type RegProofBackend = crate::proving_backend::ProvingBackend< S, H, Self::StorageProof, @@ -279,7 +279,7 @@ impl Backend for TrieBackend where (root, is_default, write_overlay) } - fn from_reg_state(self, recorder: ProofRegStateFor) -> Option { + fn from_reg_state(self, recorder: RegProofStateFor) -> Option { let root = self.essence.root().clone(); Some(crate::proving_backend::ProvingBackend::from_backend_with_recorder( self.essence.into_storage(), @@ -302,7 +302,7 @@ impl Backend for TrieBackend where impl ProofCheckBackend for crate::InMemoryProofCheckBackend where H::Out: Ord + Codec, - P: BackendStorageProof, + P: BackendProof, { fn create_proof_check_backend( root: H::Out, @@ -317,7 +317,7 @@ impl ProofCheckBackend for crate::InMemoryProofCheckBackend ProofCheckBackend for crate::InMemoryFullProofCheckBackend where H::Out: Ord + Codec, - P: FullBackendStorageProof, + P: FullBackendProof, { fn create_proof_check_backend( root: H::Out, @@ -368,7 +368,7 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie_proof>() + pub(crate) fn test_trie_proof>() -> TrieBackend, BlakeTwo256, P> { let (mdb, root) = test_db(); TrieBackend::new(mdb, root) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index f262650418935..58c6975e7447d 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -34,11 +34,11 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, +pub use storage_proof::{Common as ProofCommon, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, compact::Flat as CompactProof, simple::Full as SimpleFullProof, compact::Full as CompactFullProof, - query_plan::KnownQueryPlanAndValues as QueryPlanProof, CheckableStorageProof, - Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, RegStorageProof, FullBackendStorageProof, - BackendStorageProof, MergeableStorageProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, + query_plan::KnownQueryPlanAndValues as QueryPlanProof, Verifiable as VerifiableProof, + Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, Recordable as RecordableProof, FullBackendProof, + BackendProof, Mergeable as MergeableProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ @@ -54,7 +54,7 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// Access record backend for a given backend storage proof. /// TODO EMCH check if can be use at other place (rg 'as BackendS') /// TODO seems rather useless we use the reg one moste of the time, not exposing it ? -pub type RecordBackendFor = <

>::StorageProofReg as RegStorageProof>::RecordBackend; +pub type RecordBackendFor = <

>::ProofRaw as RecordableProof>::RecordBackend; #[derive(Default)] /// substrate trie layout diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 83d1f2d93f809..3820b5ac3fe8d 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -81,7 +81,7 @@ impl sp_std::fmt::Debug for Full { pub struct FullForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>); -impl StorageProof for Flat { +impl Common for Flat { fn empty() -> Self { Flat(Default::default(), PhantomData) } @@ -91,7 +91,7 @@ impl StorageProof for Flat { } } -impl StorageProof for Full { +impl Common for Full { fn empty() -> Self { Full(Default::default(), PhantomData) } @@ -101,7 +101,7 @@ impl StorageProof for Full { } } -impl StorageProof for FullForMerge { +impl Common for FullForMerge { fn empty() -> Self { FullForMerge(Default::default()) } @@ -112,7 +112,7 @@ impl StorageProof for FullForMerge { } /// Note that this implementation assumes all proof are from a same state. -impl MergeableStorageProof for FullForMerge { +impl Mergeable for FullForMerge { fn merge(proofs: I) -> Self where I: IntoIterator { // TODO EMCH optimize all merge to init to first element let mut child_sets = ChildrenProofMap::<(ProofMapTrieNodes, Vec)>::default(); @@ -131,8 +131,7 @@ impl MergeableStorageProof for FullForMerge { } } -// TODO EMCH can remove Default bound with manual impl on recorder -impl RegStorageProof for Flat +impl Recordable for Flat where T: TrieLayout, TrieHash: Decode, @@ -158,7 +157,7 @@ impl RegStorageProof for Flat } } -impl RegStorageProof for Full +impl Recordable for Full where T: TrieLayout, TrieHash: Decode, @@ -184,7 +183,7 @@ impl RegStorageProof for Full } } -impl RegStorageProof for FullForMerge +impl Recordable for FullForMerge where H: Hasher, H::Out: Encode, @@ -212,12 +211,12 @@ impl RegStorageProof for FullForMerge } } -impl BackendStorageProof for Flat +impl BackendProof for Flat where T: TrieLayout, TrieHash: Codec, { - type StorageProofReg = FullForMerge; + type ProofRaw = FullForMerge; fn into_partial_db(self) -> Result> { let mut db = MemoryDB::default(); @@ -235,12 +234,12 @@ impl BackendStorageProof for Flat } } -impl BackendStorageProof for Full +impl BackendProof for Full where T: TrieLayout, TrieHash: Codec, { - type StorageProofReg = FullForMerge; + type ProofRaw = FullForMerge; fn into_partial_db(self) -> Result> { let mut db = MemoryDB::default(); @@ -258,7 +257,7 @@ impl BackendStorageProof for Full } } -impl FullBackendStorageProof for Full +impl FullBackendProof for Full where T: TrieLayout, TrieHash: Codec, @@ -401,8 +400,6 @@ impl TryInto for Full { } /// Container recording trie nodes and their encoded hash. -/// TODO remove Encode by relieving mergeable storage proof from the -/// constraint to bring back btreemap? #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ProofMapTrieNodes(pub BTreeMap, DBValue>); diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 63fdcaeb23e27..7f310996a97d6 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -236,8 +236,6 @@ impl Input { Input::QueryPlan(result) } - - } /// Kind for a `Input` variant. @@ -253,8 +251,8 @@ pub enum InputKind { QueryPlanWithValues, } -/// Trait for proofs that can be use as a partial backend for verification. -pub trait StorageProof: sp_std::fmt::Debug + Sized { +/// Basic trait for proofs. +pub trait Common: sp_std::fmt::Debug + Sized { /// Returns a new empty proof. /// /// An empty proof is capable of only proving trivial statements (ie. that an empty set of @@ -266,7 +264,7 @@ pub trait StorageProof: sp_std::fmt::Debug + Sized { } /// Trait for proofs that can be merged. -pub trait MergeableStorageProof: StorageProof { +pub trait Mergeable: Common { /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. @@ -274,7 +272,7 @@ pub trait MergeableStorageProof: StorageProof { } /// Trait for proofs that can be recorded against a trie backend. -pub trait RegStorageProof: StorageProof { +pub trait Recordable: Common { /// Variant of enum input to use. const INPUT_KIND: InputKind; @@ -286,38 +284,30 @@ pub trait RegStorageProof: StorageProof { /// (usually to compact the proof). fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result; } -/* -/// Associate a different proof kind for recording proof. -/// The recorded proof will need to be convertible to this type. -/// -/// This trait is not strictly needed but ensure simple proof construction -/// rules (a single possible registration proof). -/// -/// TODO EMCH really consider removing. -pub trait WithRegStorageProof: Sized { - /// Associated proof to register. - type RegStorageProof: Into + RegStorageProof; -} -*/ -pub trait BackendStorageProof: Codec + StorageProof { - /// The proof format use while registering proof. - type StorageProofReg: RegStorageProof - + MergeableStorageProof - + Into; // TODO EMCH consider removing this conv or make it a try into?? + +/// Proof that could be use as a backend to execute action +/// other a `MemoryDB`. +pub trait BackendProof: Codec + Common { + /// Intermediate proof format before getting finalize + type ProofRaw: Recordable + + Mergeable + + Into; /// Extract a flat trie db from the proof. /// Fail on invalid proof content. fn into_partial_db(self) -> Result>; } -pub trait FullBackendStorageProof: BackendStorageProof { +/// Proof that could be use as a backend to execute action +/// other one `MemoryDB` per child proofs. +pub trait FullBackendProof: BackendProof { /// Extract a trie db with children info from the proof. /// Fail on invalid proof content. fn into_partial_full_db(self) -> Result>>; } /// Trait for proofs that can use to create a partial trie backend. -pub trait CheckableStorageProof: Codec + StorageProof { +pub trait Verifiable: Codec + Common { /// Run proof validation when the proof allows immediate /// verification. fn verify(self, input: &Input) -> Result; @@ -484,7 +474,6 @@ impl IntoIterator for ChildrenProofMap { } /// Container recording trie nodes. -/// TODO EMCH looks unused pub struct RecordMapTrieNodes(HashMap>); impl sp_std::default::Default for RecordMapTrieNodes { diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 884060cc98b28..9ac8985e5e8bb 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -134,7 +134,7 @@ impl Encode for MultipleStorageProof { } } -impl StorageProof for MultipleStorageProof { +impl Common for MultipleStorageProof { fn empty() -> Self { match D::KIND { StorageProofKind::Flat => @@ -240,8 +240,7 @@ impl RecordBackend for MultipleRecorder { } } -// TODO EMCH can remove Default bound with manual impl on recorder -impl RegStorageProof for MultipleStorageProof +impl Recordable for MultipleStorageProof where H: Hasher, H::Out: Codec, @@ -288,13 +287,13 @@ impl RegStorageProof for MultipleStorageProof } } -impl BackendStorageProof for MultipleStorageProof +impl BackendProof for MultipleStorageProof where H: Hasher, H::Out: Codec, D: DefaultKind, { - type StorageProofReg = super::compact::FullForMerge; + type ProofRaw = super::compact::FullForMerge; fn into_partial_db(self) -> Result> { match self { diff --git a/primitives/trie/src/storage_proof/query_plan.rs b/primitives/trie/src/storage_proof/query_plan.rs index 9a353772506a3..9912498be608c 100644 --- a/primitives/trie/src/storage_proof/query_plan.rs +++ b/primitives/trie/src/storage_proof/query_plan.rs @@ -46,7 +46,7 @@ impl Clone for KnownQueryPlanAndValues { } } -impl StorageProof for KnownQueryPlanAndValues { +impl Common for KnownQueryPlanAndValues { fn empty() -> Self { KnownQueryPlanAndValues(Default::default(), PhantomData) } @@ -56,7 +56,7 @@ impl StorageProof for KnownQueryPlanAndValues { } } -impl RegStorageProof for KnownQueryPlanAndValues +impl Recordable for KnownQueryPlanAndValues where T: TrieConfiguration, TrieHash: Decode, @@ -91,7 +91,7 @@ impl RegStorageProof for KnownQueryPlanAndValues } } -impl CheckableStorageProof for KnownQueryPlanAndValues +impl Verifiable for KnownQueryPlanAndValues where T: TrieConfiguration, TrieHash: Decode, diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index 09091ffed0b97..e8d2da3d05e8d 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -43,7 +43,7 @@ impl Flat { // TODO EMCH tets that proof nodes encode to the same as flat (to validate change in grandpa) -impl StorageProof for Flat { +impl Common for Flat { fn empty() -> Self { Flat(Default::default()) } @@ -53,7 +53,7 @@ impl StorageProof for Flat { } } -impl StorageProof for Full { +impl Common for Full { fn empty() -> Self { Full(Default::default()) } @@ -63,7 +63,7 @@ impl StorageProof for Full { } } -impl MergeableStorageProof for Flat { +impl Mergeable for Flat { fn merge(proofs: I) -> Self where I: IntoIterator { let mut unique_set = BTreeSet::>::default(); for proof in proofs { @@ -73,7 +73,7 @@ impl MergeableStorageProof for Flat { } } -impl MergeableStorageProof for Full { +impl Mergeable for Full { fn merge(proofs: I) -> Self where I: IntoIterator { let mut child_sets = ChildrenProofMap::>>::default(); for children in proofs { @@ -89,7 +89,7 @@ impl MergeableStorageProof for Full { } } -impl RegStorageProof for Flat { +impl Recordable for Flat { const INPUT_KIND: InputKind = InputKind::None; type RecordBackend = super::FlatRecorder; @@ -103,7 +103,7 @@ impl RegStorageProof for Flat { } } -impl RegStorageProof for Full { +impl Recordable for Full { const INPUT_KIND: InputKind = InputKind::None; type RecordBackend = super::FullRecorder; @@ -121,8 +121,8 @@ impl RegStorageProof for Full { } } -impl BackendStorageProof for Flat { - type StorageProofReg = Self; +impl BackendProof for Flat { + type ProofRaw = Self; fn into_partial_db(self) -> Result> { use hash_db::HashDB; @@ -134,8 +134,8 @@ impl BackendStorageProof for Flat { } } -impl BackendStorageProof for Full { - type StorageProofReg = Self; +impl BackendProof for Full { + type ProofRaw = Self; fn into_partial_db(self) -> Result> { use hash_db::HashDB; @@ -150,7 +150,7 @@ impl BackendStorageProof for Full { } } -impl FullBackendStorageProof for Full { +impl FullBackendProof for Full { fn into_partial_full_db(self) -> Result>> { use hash_db::HashDB; let mut result = ChildrenProofMap::default(); From 2e372d37d9ac08a73ef8a1fe2e2bceca5f6e1cd8 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 15:19:14 +0200 Subject: [PATCH 164/185] switch collect of input to be in trait instead of hacky consolidate. --- client/db/src/bench.rs | 11 +++-- client/db/src/lib.rs | 8 +++- client/db/src/storage_cache.rs | 16 +++++-- client/light/src/backend.rs | 8 +++- client/service/src/client/call_executor.rs | 12 ++--- .../api/proc-macro/src/impl_runtime_apis.rs | 5 ++- primitives/state-machine/src/backend.rs | 21 +++++---- .../state-machine/src/proving_backend.rs | 20 ++++++--- primitives/state-machine/src/trie_backend.rs | 44 +++++++++++++++++-- primitives/storage/src/lib.rs | 9 +++- primitives/trie/src/storage_proof/mod.rs | 6 +++ 11 files changed, 123 insertions(+), 37 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 3b0ae3f94a6f4..4ee77709d7f1c 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -27,7 +27,8 @@ use sp_trie::{MemoryDB, prefixed_key}; use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::{Backend as StateBackend, RegProofStateFor}, SimpleProof}; +use sp_state_machine::{DBValue, backend::{Backend as StateBackend, RegProofStateFor}, + SimpleProof, ProofInput}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -280,8 +281,12 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } - fn from_reg_state(self, previous: RegProofStateFor>) -> Option { - self.state.borrow_mut().take().and_then(|s| s.from_reg_state(previous)) + fn from_reg_state( + self, + previous: RegProofStateFor>, + previous_input: ProofInput, + ) -> Option { + self.state.borrow_mut().take().and_then(|s| s.from_reg_state(previous, previous_input)) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7c0dff11930c0..92384f955901e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -253,9 +253,13 @@ impl StateBackend> for RefTrackingState { self.state().child_keys(child_info, prefix) } - fn from_reg_state(mut self, previous: RegProofStateFor>) -> Option { + fn from_reg_state( + mut self, + previous: RegProofStateFor>, + previous_input: sp_state_machine::ProofInput, + ) -> Option { let state = std::mem::replace(&mut self.state, Default::default()).expect("Non dropped state"); - state.from_reg_state(previous) + state.from_reg_state(previous, previous_input) } fn register_overlay_stats(&mut self, stats: &StateMachineStats) { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index d829be2f9f3e3..24bd3c7c8bb59 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -654,8 +654,12 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.child_keys(child_info, prefix) } - fn from_reg_state(self, previous: RegProofStateFor>) -> Option { - self.state.from_reg_state(previous) + fn from_reg_state( + self, + previous: RegProofStateFor>, + previous_input: sp_state_machine::ProofInput, + ) -> Option { + self.state.from_reg_state(previous, previous_input) } fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { @@ -846,8 +850,12 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().usage_info() } - fn from_reg_state(mut self, previous: RegProofStateFor>) -> Option { - self.sync().and_then(|s| s.from_reg_state(previous)) + fn from_reg_state( + mut self, + previous: RegProofStateFor>, + previous_input: sp_state_machine::ProofInput, + ) -> Option { + self.sync().and_then(|s| s.from_reg_state(previous, previous_input)) } } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index e7236bd32545d..43e0998ffddf2 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -511,9 +511,13 @@ impl StateBackend for GenesisOrUnavailableState sp_state_machine::UsageInfo::empty() } - fn from_reg_state(self, previous: RegProofStateFor) -> Option { + fn from_reg_state( + self, + previous: RegProofStateFor, + previous_input: sp_state_machine::ProofInput, + ) -> Option { match self { - GenesisOrUnavailableState::Genesis(state) => state.from_reg_state(previous), + GenesisOrUnavailableState::Genesis(state) => state.from_reg_state(previous, previous_input), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index a2cc457e72e18..1e7ff8033d559 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -171,10 +171,12 @@ where let state = self.backend.state_at(*at)?; - let backend = state.from_reg_state(std::mem::replace(recorder, Default::default())) - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; + let backend = state.from_reg_state( + std::mem::replace(recorder, Default::default()), + std::mem::replace(input, Default::default()), + ).ok_or_else(|| + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box + )?; let result = { let mut state_machine = StateMachine::new( @@ -196,7 +198,7 @@ where use sp_state_machine::backend::RegProofBackend; let (recorder_state, input_state) = backend.extract_recorder(); *recorder = recorder_state; - input.consolidate(input_state).map_err(|e| format!("{:?}", e))?; + *input = input_state; result }, None => { diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 2bab789a2be75..257f7e6e0e2c9 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -292,7 +292,10 @@ fn generate_runtime_api_base_structures() -> Result { .and_then(|recorder| { let #crate_::ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); let input = std::mem::replace(input, #crate_::ProofInput::None); - <>>::RegProofBackend as #crate_::RegProofBackend<#crate_::HashFor>>::extract_proof_reg( + < + >>::RegProofBackend + as #crate_::RegProofBackend<#crate_::HashFor> + >::extract_proof_reg( &recorder, input, ).ok() diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index c705466c4d06c..738df012cd262 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -46,10 +46,10 @@ pub trait Backend: Sized + std::fmt::Debug { /// The actual proof produced. type StorageProof: BackendProof; - /// Type of proof backend. + /// Type of backend for recording proof. type RegProofBackend: RegProofBackend; - /// Type of proof backend. + /// Type of backend for using a proof. type ProofCheckBackend: ProofCheckBackend; /// Get keyed storage or None if there is nothing associated. @@ -167,14 +167,17 @@ pub trait Backend: Sized + std::fmt::Debug { /// Try convert into a proof backend. fn as_proof_backend(self) -> Option { - self.from_reg_state(Default::default()) + self.from_reg_state(Default::default(), Default::default()) } /// Try convert into a proof backend. /// We can optionally use a previous proof backend to avoid having to merge /// proof later. - /// TODO EMCH consider adding previous input. - fn from_reg_state(self, previous: RegProofStateFor) -> Option; + fn from_reg_state( + self, + previous: RegProofStateFor, + previous_input: ProofInput, + ) -> Option; /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. @@ -251,16 +254,16 @@ pub trait RegProofBackend: crate::backend::Backend { /// State of a backend. /// TODO try to merge with RecordBackendFor (aka remove the arc rwlock in code) - type State: Default + Send + Sync + Clone; + type State: Default; /// Extract proof after running operation to prove. - fn extract_proof(&self) -> Result<>::ProofRaw, Box>; + fn extract_proof(&self) -> Result, Box>; /// Get current recording state. fn extract_recorder(self) -> (Self::State, ProofInput); /// Extract from the state and input. - fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::ProofRaw, Box>; + fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result, Box>; } /// Backend used to produce proof. @@ -360,7 +363,7 @@ impl<'a, T, H> Backend for &'a T (*self).usage_info() } - fn from_reg_state(self, _previous: RegProofStateFor) -> Option { + fn from_reg_state(self, _previous: RegProofStateFor, _input: ProofInput) -> Option { // cannot move out of reference, consider cloning when needed. None } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index be0b6f7aa5250..b939bd36d39db 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -31,7 +31,7 @@ pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, DBValue}; -use crate::backend::{Backend, RegProofStateFor, RegProofBackend}; +use crate::backend::{Backend, RegProofStateFor, RegProofBackend, ProofRawFor}; use sp_core::storage::{ChildInfo, ChildInfoProof}; use std::marker::PhantomData; @@ -125,7 +125,7 @@ pub struct ProvingBackend< H: Hasher, P: BackendProof, > { - trie_backend: TrieBackend>, H, P>, + pub(crate) trie_backend: TrieBackend>, H, P>, _ph: PhantomData

, } @@ -249,7 +249,7 @@ impl RegProofBackend for ProvingBackend { type State = SyncRecordBackendFor; - fn extract_proof(&self) -> Result<>::ProofRaw, Box> { + fn extract_proof(&self) -> Result, Box> { let input = self.trie_backend.extract_registered_roots(); >::ProofRaw::extract_proof( &self.trie_backend.essence().backend_storage().proof_recorder.read(), @@ -263,7 +263,7 @@ impl RegProofBackend for ProvingBackend (recorder, input) } - fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result<>::ProofRaw, Box> { + fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result, Box> { <>::ProofRaw>::extract_proof( & recorder_state.read(), input, @@ -370,7 +370,7 @@ impl Backend for ProvingBackend self.trie_backend.usage_info() } - fn from_reg_state(self, previous_recorder: RegProofStateFor) -> Option { + fn from_reg_state(self, previous_recorder: RegProofStateFor, previous_input: ProofInput) -> Option { let root = self.trie_backend.essence().root().clone(); let storage = self.trie_backend.into_storage(); let current_recorder = storage.proof_recorder; @@ -387,7 +387,15 @@ impl Backend for ProvingBackend } else { None } - } + }.filter(|backend| { + match previous_input { + ProofInput::ChildTrieRoots(roots) => { + backend.trie_backend.push_registered_roots(roots) + }, + ProofInput::None => true, + _ => false, + } + }) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 8e8ef593b6d94..aa5548abed809 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -76,7 +76,29 @@ impl, H: Hasher, P> TrieBackend where H::Out: ProofInput::None } } - + /// Set previously registered roots. + /// Return false if conflict. + pub fn push_registered_roots(&self, previous: ChildrenProofMap>) -> bool { + if let Some(register_roots) = self.essence.register_roots() { + let mut roots = register_roots.write(); + for (child_info_proof, encoded_root) in previous { + if let Some(child_info) = child_info_proof.as_child_info() { + if let Some(existing_root) = roots.get(&child_info) { + if Some(&encoded_root) != existing_root.as_ref() { + return false; + } + } + roots.insert(child_info, Some(encoded_root)); + } else { + return false; + } + } + true + } else { + false + } + } + /// Get backend essence reference. pub fn essence(&self) -> &TrieBackendEssence { &self.essence @@ -279,13 +301,27 @@ impl Backend for TrieBackend where (root, is_default, write_overlay) } - fn from_reg_state(self, recorder: RegProofStateFor) -> Option { + fn from_reg_state( + self, + recorder: RegProofStateFor, + previous_input: ProofInput, + ) -> Option { let root = self.essence.root().clone(); - Some(crate::proving_backend::ProvingBackend::from_backend_with_recorder( + let backend = crate::proving_backend::ProvingBackend::from_backend_with_recorder( self.essence.into_storage(), root, recorder, - )) + ); + match previous_input { + ProofInput::ChildTrieRoots(roots) => { + if !backend.trie_backend.push_registered_roots(roots) { + return None; + } + }, + ProofInput::None => (), + _ => return None, + } + Some(backend) } fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index be3350be71311..770f2efe4b59c 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -167,7 +167,7 @@ pub mod well_known_keys { /// Child information needed for proof construction. /// -/// It contains `ChildInfo` strictly needed for proofs. +/// It contains only `ChildInfo` content that is strictly needed for proofs. /// /// It could also be use for specific proof usage. #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] @@ -302,6 +302,13 @@ impl ChildInfoProof { ChildInfoProof::Default(..) => ChildType::ParentKeyId, } } + + /// Get child info if it can be resolve without additional context. + pub fn as_child_info(self) -> Option { + match self { + ChildInfoProof::Default(parent_key) => Some(ChildInfo::ParentKeyId(parent_key)), + } + } } /// Type of child. diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 7f310996a97d6..ee0fdb07e3bb7 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -133,6 +133,11 @@ pub enum Input { QueryPlan(ChildrenProofMap<(Vec, Vec>)>), } +impl Default for Input { + fn default() -> Self { + Input::None + } +} impl Input { /// Get input kind for a given input. pub fn kind(&self) -> InputKind { @@ -151,6 +156,7 @@ impl Input { /// /// Merging query plan inputs is not allowed (unimplemented), /// but could be. + /// TODO EMCH Unused?? #[must_use] pub fn consolidate(&mut self, other: Self) -> Result<()> { let incompatible_types = || Err(error("Incompatible types for consolidating proofs")); From e414fc525f408f7068dcccdbf5ddf06a22b36cb3 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 15:46:23 +0200 Subject: [PATCH 165/185] Same state as rec, will remove reg backend state --- primitives/state-machine/src/lib.rs | 8 +++--- .../state-machine/src/proving_backend.rs | 26 +++++++++++++------ 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 6a0c58978f59d..17a698b27e86c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1306,12 +1306,12 @@ mod tests { fn prove_read_and_proof_check_works_query_plan() { use sp_trie::{VerifiableProof, ProofInput}; - fn extract_recorder(recorder: std::sync::Arc>) -> T { +/* fn extract_recorder(recorder: std::sync::Arc>) -> T { match std::sync::Arc::try_unwrap(recorder) { Ok(r) => r.into_inner(), Err(arc) => arc.read().clone(), } - } + }*/ let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; @@ -1321,7 +1321,7 @@ mod tests { let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_root_child = remote_backend.child_storage_root(child_info, std::iter::empty()).0; let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); - let recorder = extract_recorder(recorder); +// let recorder = extract_recorder(recorder); let mut root_map = ChildrenProofMap::default(); root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); assert!(ProofInput::ChildTrieRoots(root_map) == root_input); @@ -1351,7 +1351,7 @@ mod tests { &[b"value2"], vec![(child_info.clone(), &[b"value3"])], ).unwrap(); - let recorder = extract_recorder(recorder); +// let recorder = extract_recorder(recorder); let test_with_roots = |include_roots: bool| { let input = ProofInput::query_plan( diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index b939bd36d39db..7da68172b182c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -36,7 +36,8 @@ use sp_core::storage::{ChildInfo, ChildInfoProof}; use std::marker::PhantomData; /// Clonable recorder backend with inner mutability. -type SyncRecordBackendFor = Arc>>; +type SyncRecordBackendFor = RecordBackendFor; +//type SyncRecordBackendFor = Arc>>; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -157,7 +158,7 @@ impl<'a, S, H, P> ProvingBackend<&'a S, H, P> let root = essence.root().clone(); let recorder = ProofRecorderBackend { backend: essence.backend_storage(), - proof_recorder, + proof_recorder: Arc::new(RwLock::new(proof_recorder)), _ph: PhantomData, }; match P::ProofRaw::INPUT_KIND { @@ -194,7 +195,7 @@ impl ProvingBackend ) -> Self { let recorder = ProofRecorderBackend { backend, - proof_recorder, + proof_recorder: Arc::new(RwLock::new(proof_recorder)), _ph: PhantomData, }; match P::ProofRaw::INPUT_KIND { @@ -259,13 +260,16 @@ impl RegProofBackend for ProvingBackend fn extract_recorder(self) -> (RegProofStateFor, ProofInput) { let input = self.trie_backend.extract_registered_roots(); - let recorder = self.trie_backend.into_storage().proof_recorder; + let recorder = match std::sync::Arc::try_unwrap(self.trie_backend.into_storage().proof_recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), // TODO EMCH this should have only one handle (refcell should work in fact) -> try panic qed + }; (recorder, input) } fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result, Box> { <>::ProofRaw>::extract_proof( - & recorder_state.read(), + recorder_state, input, ).map_err(|e| Box::new(e) as Box) } @@ -375,18 +379,24 @@ impl Backend for ProvingBackend let storage = self.trie_backend.into_storage(); let current_recorder = storage.proof_recorder; let backend = storage.backend; - if std::sync::Arc::ptr_eq(¤t_recorder, &previous_recorder) { + // TODO EMCH can also replace if current is empty +/* if std::sync::Arc::ptr_eq(¤t_recorder, &previous_recorder) { Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) } else { let previous_recorder = match Arc::try_unwrap(previous_recorder) { Ok(r) => r.into_inner(), Err(arc) => arc.read().clone(), - }; + };*/ if current_recorder.write().merge(previous_recorder) { + let current_recorder = match Arc::try_unwrap(current_recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), // TODO EMCH panic here + }; + Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) } else { None - } +// } }.filter(|backend| { match previous_input { ProofInput::ChildTrieRoots(roots) => { From f5c525b4efba2bdec7eb7774cfe10e7b8a080998 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 16:21:15 +0200 Subject: [PATCH 166/185] Removal of associated state type. --- client/db/src/bench.rs | 4 +- client/db/src/lib.rs | 4 +- client/db/src/storage_cache.rs | 6 +-- client/light/src/backend.rs | 4 +- primitives/api/src/lib.rs | 4 +- primitives/state-machine/src/backend.rs | 17 ++++--- primitives/state-machine/src/lib.rs | 15 ++----- .../state-machine/src/proving_backend.rs | 45 +++++++------------ primitives/state-machine/src/trie_backend.rs | 4 +- 9 files changed, 40 insertions(+), 63 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 4ee77709d7f1c..a6df8644097b7 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -27,7 +27,7 @@ use sp_trie::{MemoryDB, prefixed_key}; use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::{Backend as StateBackend, RegProofStateFor}, +use sp_state_machine::{DBValue, backend::{Backend as StateBackend, RecordBackendFor}, SimpleProof, ProofInput}; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -283,7 +283,7 @@ impl StateBackend> for BenchmarkingState { fn from_reg_state( self, - previous: RegProofStateFor>, + previous: RecordBackendFor>, previous_input: ProofInput, ) -> Option { self.state.borrow_mut().take().and_then(|s| s.from_reg_state(previous, previous_input)) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 92384f955901e..5eaaaecd6dcbe 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -77,7 +77,7 @@ use sp_runtime::traits::{ use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, StorageCollection, ChildStorageCollection, SimpleProof, - backend::{Backend as StateBackend, RegProofStateFor}, StateMachineStats, + backend::{Backend as StateBackend, RecordBackendFor}, StateMachineStats, }; use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; @@ -255,7 +255,7 @@ impl StateBackend> for RefTrackingState { fn from_reg_state( mut self, - previous: RegProofStateFor>, + previous: RecordBackendFor>, previous_input: sp_state_machine::ProofInput, ) -> Option { let state = std::mem::replace(&mut self.state, Default::default()).expect("Non dropped state"); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 24bd3c7c8bb59..183b35e6e92f4 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -26,7 +26,7 @@ use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; use sp_state_machine::{ - backend::{Backend as StateBackend, RegProofStateFor}, StorageKey, StorageValue, + backend::{Backend as StateBackend, RecordBackendFor}, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, }; use log::trace; @@ -656,7 +656,7 @@ impl>, B: BlockT> StateBackend> for Cachin fn from_reg_state( self, - previous: RegProofStateFor>, + previous: RecordBackendFor>, previous_input: sp_state_machine::ProofInput, ) -> Option { self.state.from_reg_state(previous, previous_input) @@ -852,7 +852,7 @@ impl>, B: BlockT> StateBackend> for Syncin fn from_reg_state( mut self, - previous: RegProofStateFor>, + previous: RecordBackendFor>, previous_input: sp_state_machine::ProofInput, ) -> Option { self.sync().and_then(|s| s.from_reg_state(previous, previous_input)) diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 43e0998ffddf2..f413f552eb347 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -29,7 +29,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - backend::{Backend as StateBackend, RegProofStateFor}, InMemoryBackend, ChangesTrieTransaction, + backend::{Backend as StateBackend, RecordBackendFor}, InMemoryBackend, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; @@ -513,7 +513,7 @@ impl StateBackend for GenesisOrUnavailableState fn from_reg_state( self, - previous: RegProofStateFor, + previous: RecordBackendFor, previous_input: sp_state_machine::ProofInput, ) -> Option { match self { diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 9a2754bad1df2..31584b413214e 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -525,9 +525,7 @@ pub trait RuntimeApiInfo { #[cfg(feature = "std")] pub struct ProofRecorder>, Block: BlockT> { /// The recorder to use over the db use by trie db. - /// TODO EMCH this the sync recorder and we got a mechanism of extract / merge for it - /// when it should only be reusing it, but merge still needed for input. - pub recorder: sp_state_machine::backend::RegProofStateFor>, + pub recorder: sp_state_machine::backend::RecordBackendFor>, /// The additional input needed for the proof. pub input: ProofInput, } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 738df012cd262..37563015550b4 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -24,7 +24,7 @@ use crate::{UsageInfo, StorageKey, StorageValue, StorageCollection}; use sp_trie::{ProofInput, BackendProof}; /// Access the state of the proof backend of a backend. -pub type RegProofStateFor = <>::RegProofBackend as RegProofBackend>::State; +pub type RecordBackendFor = sp_trie::RecordBackendFor<>::StorageProof, H>; /// Access the raw proof of a backend. pub type ProofRawFor = <>::StorageProof as BackendProof>::ProofRaw; @@ -175,7 +175,7 @@ pub trait Backend: Sized + std::fmt::Debug { /// proof later. fn from_reg_state( self, - previous: RegProofStateFor, + previous: RecordBackendFor, previous_input: ProofInput, ) -> Option; @@ -252,18 +252,17 @@ pub trait RegProofBackend: crate::backend::Backend where H: Hasher, { - /// State of a backend. - /// TODO try to merge with RecordBackendFor (aka remove the arc rwlock in code) - type State: Default; - /// Extract proof after running operation to prove. fn extract_proof(&self) -> Result, Box>; /// Get current recording state. - fn extract_recorder(self) -> (Self::State, ProofInput); + fn extract_recorder(self) -> (RecordBackendFor, ProofInput); /// Extract from the state and input. - fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result, Box>; + fn extract_proof_reg( + recorder_state: &RecordBackendFor, + input: ProofInput, + ) -> Result, Box>; } /// Backend used to produce proof. @@ -363,7 +362,7 @@ impl<'a, T, H> Backend for &'a T (*self).usage_info() } - fn from_reg_state(self, _previous: RegProofStateFor, _input: ProofInput) -> Option { + fn from_reg_state(self, _previous: RecordBackendFor, _input: ProofInput) -> Option { // cannot move out of reference, consider cloning when needed. None } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 17a698b27e86c..b25dda34d6c9a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -46,7 +46,7 @@ mod read_only; pub use sp_trie::{trie_types::{Layout, TrieDBMut}, TrieMut, DBValue, MemoryDB, TrieNodesStorageProof, ProofCommon, StorageProofKind, ChildrenProofMap, - ProofInput, ProofInputKind, ProofNodes, RecordBackendFor, RecordableProof, + ProofInput, ProofInputKind, ProofNodes, RecordableProof, SimpleProof, CompactProof, BackendProof, MergeableProof}; pub use testing::TestExternalities; pub use basic::BasicExternalities; @@ -628,7 +628,7 @@ where pub fn prove_read_for_query_plan_check( backend: B, keys: I, -) -> Result<(crate::backend::RegProofStateFor, ProofInput), Box> +) -> Result<(crate::backend::RecordBackendFor, ProofInput), Box> where B: Backend, H: Hasher, @@ -713,7 +713,7 @@ pub fn prove_child_read_for_query_plan_check( backend: B, top_keys: I, child_keys: I3, -) -> Result<(crate::backend::RegProofStateFor, ProofInput), Box> +) -> Result<(crate::backend::RecordBackendFor, ProofInput), Box> where B: Backend, H: Hasher, @@ -1306,13 +1306,6 @@ mod tests { fn prove_read_and_proof_check_works_query_plan() { use sp_trie::{VerifiableProof, ProofInput}; -/* fn extract_recorder(recorder: std::sync::Arc>) -> T { - match std::sync::Arc::try_unwrap(recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), - } - }*/ - let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node. @@ -1321,7 +1314,6 @@ mod tests { let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_root_child = remote_backend.child_storage_root(child_info, std::iter::empty()).0; let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); -// let recorder = extract_recorder(recorder); let mut root_map = ChildrenProofMap::default(); root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); assert!(ProofInput::ChildTrieRoots(root_map) == root_input); @@ -1351,7 +1343,6 @@ mod tests { &[b"value2"], vec![(child_info.clone(), &[b"value3"])], ).unwrap(); -// let recorder = extract_recorder(recorder); let test_with_roots = |include_roots: bool| { let input = ProofInput::query_plan( diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 7da68172b182c..f5b762481ae96 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -31,14 +31,10 @@ pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, DBValue}; -use crate::backend::{Backend, RegProofStateFor, RegProofBackend, ProofRawFor}; +use crate::backend::{Backend, RegProofBackend, ProofRawFor}; use sp_core::storage::{ChildInfo, ChildInfoProof}; use std::marker::PhantomData; -/// Clonable recorder backend with inner mutability. -type SyncRecordBackendFor = RecordBackendFor; -//type SyncRecordBackendFor = Arc>>; - /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { pub(crate) backend: &'a TrieBackendEssence, @@ -152,7 +148,7 @@ impl<'a, S, H, P> ProvingBackend<&'a S, H, P> fn new_with_recorder( backend: &'a TrieBackend, - proof_recorder: SyncRecordBackendFor, + proof_recorder: RecordBackendFor, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -191,7 +187,7 @@ impl ProvingBackend pub fn from_backend_with_recorder( backend: S, root: H::Out, - proof_recorder: SyncRecordBackendFor, + proof_recorder: RecordBackendFor, ) -> Self { let recorder = ProofRecorderBackend { backend, @@ -248,8 +244,6 @@ impl RegProofBackend for ProvingBackend H::Out: Ord + Codec, P: BackendProof, { - type State = SyncRecordBackendFor; - fn extract_proof(&self) -> Result, Box> { let input = self.trie_backend.extract_registered_roots(); >::ProofRaw::extract_proof( @@ -258,7 +252,7 @@ impl RegProofBackend for ProvingBackend ).map_err(|e| Box::new(e) as Box) } - fn extract_recorder(self) -> (RegProofStateFor, ProofInput) { + fn extract_recorder(self) -> (RecordBackendFor, ProofInput) { let input = self.trie_backend.extract_registered_roots(); let recorder = match std::sync::Arc::try_unwrap(self.trie_backend.into_storage().proof_recorder) { Ok(r) => r.into_inner(), @@ -267,7 +261,7 @@ impl RegProofBackend for ProvingBackend (recorder, input) } - fn extract_proof_reg(recorder_state: &Self::State, input: ProofInput) -> Result, Box> { + fn extract_proof_reg(recorder_state: &RecordBackendFor, input: ProofInput) -> Result, Box> { <>::ProofRaw>::extract_proof( recorder_state, input, @@ -374,29 +368,24 @@ impl Backend for ProvingBackend self.trie_backend.usage_info() } - fn from_reg_state(self, previous_recorder: RegProofStateFor, previous_input: ProofInput) -> Option { + fn from_reg_state( + self, + previous_recorder: crate::backend::RecordBackendFor, + previous_input: ProofInput, + ) -> Option { let root = self.trie_backend.essence().root().clone(); let storage = self.trie_backend.into_storage(); let current_recorder = storage.proof_recorder; let backend = storage.backend; - // TODO EMCH can also replace if current is empty -/* if std::sync::Arc::ptr_eq(¤t_recorder, &previous_recorder) { + if current_recorder.write().merge(previous_recorder) { + let current_recorder = match Arc::try_unwrap(current_recorder) { + Ok(r) => r.into_inner(), + Err(arc) => arc.read().clone(), // TODO EMCH panic here + }; + Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) } else { - let previous_recorder = match Arc::try_unwrap(previous_recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), - };*/ - if current_recorder.write().merge(previous_recorder) { - let current_recorder = match Arc::try_unwrap(current_recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), // TODO EMCH panic here - }; - - Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) - } else { - None -// } + None }.filter(|backend| { match previous_input { ProofInput::ChildTrieRoots(roots) => { diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index aa5548abed809..76bb603cfd2ca 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -22,7 +22,7 @@ use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, ChildrenProofMap, ProofInput, BackendProof, FullBackendProof}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use crate::backend::{RegProofStateFor}; +use crate::backend::RecordBackendFor; use sp_core::storage::{ChildInfo, ChildInfoProof, ChildType}; use codec::{Codec, Decode, Encode}; use crate::{ @@ -303,7 +303,7 @@ impl Backend for TrieBackend where fn from_reg_state( self, - recorder: RegProofStateFor, + recorder: RecordBackendFor, previous_input: ProofInput, ) -> Option { let root = self.essence.root().clone(); From 9ef94dad3804ddf97fa159cdc176b16a4ef97ae2 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 16:35:52 +0200 Subject: [PATCH 167/185] Removing arc and code that could clone a record --- primitives/state-machine/src/proving_backend.rs | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index f5b762481ae96..e50659ab10a79 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -17,7 +17,6 @@ //! Proving state machine backend. -use std::sync::Arc; use parking_lot::RwLock; use codec::{Decode, Codec}; use log::debug; @@ -129,7 +128,7 @@ pub struct ProvingBackend< /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { backend: S, - proof_recorder: Arc>, + proof_recorder: RwLock, _ph: PhantomData, } @@ -154,7 +153,7 @@ impl<'a, S, H, P> ProvingBackend<&'a S, H, P> let root = essence.root().clone(); let recorder = ProofRecorderBackend { backend: essence.backend_storage(), - proof_recorder: Arc::new(RwLock::new(proof_recorder)), + proof_recorder: RwLock::new(proof_recorder), _ph: PhantomData, }; match P::ProofRaw::INPUT_KIND { @@ -191,7 +190,7 @@ impl ProvingBackend ) -> Self { let recorder = ProofRecorderBackend { backend, - proof_recorder: Arc::new(RwLock::new(proof_recorder)), + proof_recorder: RwLock::new(proof_recorder), _ph: PhantomData, }; match P::ProofRaw::INPUT_KIND { @@ -254,10 +253,7 @@ impl RegProofBackend for ProvingBackend fn extract_recorder(self) -> (RecordBackendFor, ProofInput) { let input = self.trie_backend.extract_registered_roots(); - let recorder = match std::sync::Arc::try_unwrap(self.trie_backend.into_storage().proof_recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), // TODO EMCH this should have only one handle (refcell should work in fact) -> try panic qed - }; + let recorder = self.trie_backend.into_storage().proof_recorder.into_inner(); (recorder, input) } @@ -378,10 +374,7 @@ impl Backend for ProvingBackend let current_recorder = storage.proof_recorder; let backend = storage.backend; if current_recorder.write().merge(previous_recorder) { - let current_recorder = match Arc::try_unwrap(current_recorder) { - Ok(r) => r.into_inner(), - Err(arc) => arc.read().clone(), // TODO EMCH panic here - }; + let current_recorder = current_recorder.into_inner(); Some(ProvingBackend::::from_backend_with_recorder(backend, root, current_recorder)) } else { From ad72850e680287e30876be12ff23f86404c6c48b Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 16:56:47 +0200 Subject: [PATCH 168/185] comment --- client/api/src/call_executor.rs | 1 - client/service/src/builder.rs | 4 ++-- primitives/state-machine/src/lib.rs | 6 +++--- primitives/state-machine/src/proving_backend.rs | 2 ++ 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 0e6fac785df27..5e94027f11d1c 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -123,7 +123,6 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. - /// TODO EMCH try to remove P param and use the associated backend type? fn prove_at_proof_backend_state>>( &self, proof_backend: &P, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 201320a5b5bbe..afe51a00d2fa3 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -951,7 +951,7 @@ ServiceBuilder< TImpQu: 'static + ImportQueue, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, TRpc: sc_rpc::RpcExtension, - // TODO EMCH this constraint should be lifted when client get generic over StateBackend and Proof + // This constraint should be lifted when client get generic over StateBackend and Proof TBackend::State: StateBackend, StorageProof = SimpleProof>, { @@ -1473,7 +1473,7 @@ ServiceBuilder< MallocSizeOfWasm + 'static, TRpc: sc_rpc::RpcExtension, - // TODO EMCH removable bound when generic + // This constraint should be lifted when client get generic over StateBackend and Proof TBackend::State: StateBackend, StorageProof = SimpleProof>, { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index b25dda34d6c9a..3b8bc83bd89bf 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1316,7 +1316,7 @@ mod tests { let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); let mut root_map = ChildrenProofMap::default(); root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); - assert!(ProofInput::ChildTrieRoots(root_map) == root_input); + assert!(ProofInput::ChildTrieRoots(root_map) == root_input); let input = ProofInput::query_plan( remote_root.encode(), @@ -1385,7 +1385,7 @@ mod tests { } fn prove_read_and_proof_check_works_inner

() where - P: BackendProof, + P: BackendProof, P::ProofRaw: Clone, { let child_info = ChildInfo::new_default(b"sub1"); @@ -1453,7 +1453,7 @@ mod tests { } fn prove_read_and_proof_on_fullbackend_works_inner

() where - P: FullBackendProof, + P: FullBackendProof, P::ProofRaw: Clone, { // fetch read proof from 'remote' full node diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e50659ab10a79..be7db2d22b17d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -128,6 +128,8 @@ pub struct ProvingBackend< /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend, H: Hasher, R: RecordBackend> { backend: S, + // Inner mutability require sync here due to sync constraint on TrieBackendStorage (itself + // related to HashDB). proof_recorder: RwLock, _ph: PhantomData, } From cb8583d38936280eb06efb0909da7afcb291e11e Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 16:57:34 +0200 Subject: [PATCH 169/185] Fix full backend test --- primitives/state-machine/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 3b8bc83bd89bf..ca3f3bdd30731 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1460,15 +1460,13 @@ mod tests { let remote_backend = trie_backend::tests::test_trie_proof::

(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); -// TODO EMCH use InMemoryFullProofCheckBackend // check proof locally - let local_result1 = read_proof_check::, BlakeTwo256, _>( + let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); -// TODO EMCH use InMemoryFullProofCheckBackend - let local_result2 = read_proof_check::, BlakeTwo256, _>( + let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[&[0xff]], From 9a009ab49cd1492c3441ae9f5878d86826cd3f0c Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 17:16:39 +0200 Subject: [PATCH 170/185] assert previous encoding is the same as SimpleProof. --- primitives/trie/src/lib.rs | 6 +-- primitives/trie/src/storage_proof/compact.rs | 3 -- primitives/trie/src/storage_proof/mod.rs | 56 +++----------------- primitives/trie/src/storage_proof/simple.rs | 9 +++- 4 files changed, 17 insertions(+), 57 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 58c6975e7447d..83a64a6c6aeab 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -52,9 +52,9 @@ pub use memory_db::prefixed_key; pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// Access record backend for a given backend storage proof. -/// TODO EMCH check if can be use at other place (rg 'as BackendS') -/// TODO seems rather useless we use the reg one moste of the time, not exposing it ? -pub type RecordBackendFor = <

>::ProofRaw as RecordableProof>::RecordBackend; +pub type RecordBackendFor = < +

>::ProofRaw as RecordableProof +>::RecordBackend; #[derive(Default)] /// substrate trie layout diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 3820b5ac3fe8d..8fdabf1c005f8 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -75,12 +75,9 @@ impl sp_std::fmt::Debug for Full { /// This is needed mainly for technical reasons (merge then compact proofs). /// (though if possible user should rather use a flat record /// backend in the different context and avoid merge). -/// TODO EMCH try no backend in this case. -/// TODO could move to simple #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct FullForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>); - impl Common for Flat { fn empty() -> Self { Flat(Default::default(), PhantomData) diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index ee0fdb07e3bb7..825ec89d9ab69 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use sp_std::collections::{btree_map::BTreeMap, btree_map, btree_map::Entry}; +use sp_std::collections::{btree_map::BTreeMap, btree_map::Entry}; use sp_std::collections::btree_set::BTreeSet; use sp_std::vec::Vec; use codec::{Codec, Encode, Decode, Input as CodecInput, Output as CodecOutput, Error as CodecError}; @@ -149,46 +149,6 @@ impl Input { } } - /// Updates input with new content. - /// Return false on failure. - /// Fails when the input type differs, except for `None` input - /// that is always reassignable. - /// - /// Merging query plan inputs is not allowed (unimplemented), - /// but could be. - /// TODO EMCH Unused?? - #[must_use] - pub fn consolidate(&mut self, other: Self) -> Result<()> { - let incompatible_types = || Err(error("Incompatible types for consolidating proofs")); - match self { - Input::None => { - *self = other; - }, - Input::ChildTrieRoots(children) => { - match other { - Input::None => (), - Input::ChildTrieRoots(children_other) => { - for (child_info, root) in children_other { - match children.entry(child_info) { - btree_map::Entry::Occupied(v) => if v.get() != &root { - return Err(error("Incompatible children root when consolidating proofs")); - }, - btree_map::Entry::Vacant(v) => { - v.insert(root); - }, - } - } - }, - Input::QueryPlan(..) => return incompatible_types(), - Input::QueryPlanWithValues(..) => return incompatible_types(), - } - }, - Input::QueryPlan(..) => return incompatible_types(), - Input::QueryPlanWithValues(..) => return incompatible_types(), - } - Ok(()) - } - /// Build a query plan with values. /// All tuples are key and optional value. /// Children iterator also contains children encoded root. @@ -198,7 +158,7 @@ impl Input { pub fn query_plan_with_values( top_encoded_root: Vec, top: impl Iterator, Option>)>, - children: impl Iterator, impl Iterator, Option>)>)>, + children: impl Iterator, impl Iterator, Option>)>)>, include_child_root: bool, ) -> Input { let mut result = ChildrenProofMap::default(); @@ -225,7 +185,7 @@ impl Input { pub fn query_plan( top_encoded_root: Vec, top: impl Iterator>, - children: impl Iterator, impl Iterator>)>, + children: impl Iterator, impl Iterator>)>, include_child_root: bool, ) -> Input { let mut result = ChildrenProofMap::default(); @@ -320,16 +280,14 @@ pub trait Verifiable: Codec + Common { } /// Trie encoded node recorder. -/// TODO EMCH consider using &mut and change reg storage (consume) proof -/// to implement without rc & sync, and encapsulate from calling -/// code. -/// TODO EMCH here we pass Hasher as parameter for convenience, but we only really need H::Out +/// Note that this trait and other could use H::Out as generic parameter, +/// but currently use Hasher for code readability. pub trait RecordBackend: Send + Sync + Clone + Default { /// Access recorded value, allow using the backend as a cache. fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option>; /// Record the actual value. fn record(&mut self, child_info: ChildInfo, key: H::Out, value: Option); - /// Merge two record, can fail. + /// Merge two records, returns false on failure. fn merge(&mut self, other: Self) -> bool; } @@ -383,7 +341,7 @@ impl RecordBackend for FullRecorder { for (child_info, other) in sp_std::mem::replace(&mut other.0, Default::default()) { match self.0.entry(child_info) { Entry::Occupied(mut entry) => { - for (key, value) in other.0 { + for (key, value) in other.0 { match entry.get_mut().entry(key) { HEntry::Occupied(entry) => { if entry.get() != &value { diff --git a/primitives/trie/src/storage_proof/simple.rs b/primitives/trie/src/storage_proof/simple.rs index e8d2da3d05e8d..64db91418be69 100644 --- a/primitives/trie/src/storage_proof/simple.rs +++ b/primitives/trie/src/storage_proof/simple.rs @@ -41,8 +41,6 @@ impl Flat { } } -// TODO EMCH tets that proof nodes encode to the same as flat (to validate change in grandpa) - impl Common for Flat { fn empty() -> Self { Flat(Default::default()) @@ -186,3 +184,10 @@ impl Into for Flat { Full(result) } } + +#[test] +fn flat_encoding_compatible() { + let nodes = ProofNodes::from([vec![1u8], vec![2u8, 3u8]]); + let flat = Flat::from_nodes(nodes.clone()); + assert_eq!(nodes.encode(), flat.encode()); +} From 078576ca7a324c1e9bd7913cf24703bf93681eba Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 17:35:10 +0200 Subject: [PATCH 171/185] Remove temporary variants of multiple proof --- primitives/trie/src/storage_proof/compact.rs | 2 - primitives/trie/src/storage_proof/multiple.rs | 157 ------------------ 2 files changed, 159 deletions(-) diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index 8fdabf1c005f8..d988e65c19e0b 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -111,7 +111,6 @@ impl Common for FullForMerge { /// Note that this implementation assumes all proof are from a same state. impl Mergeable for FullForMerge { fn merge(proofs: I) -> Self where I: IntoIterator { - // TODO EMCH optimize all merge to init to first element let mut child_sets = ChildrenProofMap::<(ProofMapTrieNodes, Vec)>::default(); for children in proofs { for (child_info, (mut proof, root)) in children.0.into_iter() { @@ -358,7 +357,6 @@ impl Into for FullForMerge fn into(self) -> super::simple::Flat { let mut result = ProofNodes::default(); for (_child_info, (nodes, _root)) in self.0 { - // TODO EMCH do not extend on first result.extend(nodes.0.into_iter().map(|(_k, v)| v)); } super::simple::Flat(result) diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 9ac8985e5e8bb..73c18bb699db2 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -27,12 +27,6 @@ pub enum StorageProofKind { /// Kind for `MultipleStorageProof::TrieSkipHashes`. TrieSkipHashes = 2, - - /// Kind for `MultipleStorageProof::FullForMerge`. - FullForMerge = 126, - - /// Kind for `MultipleStorageProof::QueryPlan`. - KnownQueryPlanAndValues = 127, } impl StorageProofKind { @@ -42,8 +36,6 @@ impl StorageProofKind { Some(match encoded { x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, - x if x == StorageProofKind::FullForMerge as u8 => StorageProofKind::FullForMerge, - x if x == StorageProofKind::KnownQueryPlanAndValues as u8 => StorageProofKind::KnownQueryPlanAndValues, _ => return None, }) } @@ -60,24 +52,6 @@ pub enum MultipleStorageProof { /// See `crate::storage_proof::compact::Flat`. TrieSkipHashes(super::compact::Flat>, PhantomData), - - /// See `crate::storage_proof::compact::FullForMerge`. - /// - /// This variant is temporary to allow producing known query proof over - /// substrate state machine, until it can be configured over a specific - /// proving backend. - /// The fundamental flaw here is that this leads to a partial implementation - /// of the proof verification. - FullForMerge(super::compact::FullForMerge), - - /// See `crate::storage_proof::query_plan::KnownQueryPlanAndValues`. - /// - /// This variant is temporary to allow producing known query proof over - /// substrate state machine, until it can be configured over a specific - /// proving backend. - /// The fundamental flaw here is that this leads to a partial implementation - /// of the proof verification. - KnownQueryPlanAndValues(super::query_plan::KnownQueryPlanAndValues>), } impl sp_std::fmt::Debug for MultipleStorageProof { @@ -85,8 +59,6 @@ impl sp_std::fmt::Debug for MultipleStorageProof { match self { MultipleStorageProof::Flat(v) => v.fmt(f), MultipleStorageProof::TrieSkipHashes(v, _) => v.fmt(f), - MultipleStorageProof::FullForMerge(v) => v.fmt(f), - MultipleStorageProof::KnownQueryPlanAndValues(v) => v.fmt(f), } } } @@ -114,10 +86,6 @@ impl Decode for MultipleStorageProof { Decode::decode(value)?, PhantomData, ), - StorageProofKind::FullForMerge => MultipleStorageProof::FullForMerge(Decode::decode(value)?), - StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::KnownQueryPlanAndValues( - Decode::decode(value)? - ), }) } } @@ -128,8 +96,6 @@ impl Encode for MultipleStorageProof { match self { MultipleStorageProof::Flat(p) => p.encode_to(dest), MultipleStorageProof::TrieSkipHashes(p, _) => p.encode_to(dest), - MultipleStorageProof::FullForMerge(p) => p.encode_to(dest), - MultipleStorageProof::KnownQueryPlanAndValues(p) => p.encode_to(dest), } } } @@ -141,21 +107,13 @@ impl Common for MultipleStorageProof { MultipleStorageProof::Flat(super::simple::Flat::empty()), StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes(super::compact::Flat::empty(), PhantomData), - StorageProofKind::FullForMerge => - MultipleStorageProof::FullForMerge(super::compact::FullForMerge::empty()), - StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::KnownQueryPlanAndValues( - super::query_plan::KnownQueryPlanAndValues::empty() - ), } } - fn is_empty(&self) -> bool { match self { MultipleStorageProof::Flat(data) => data.is_empty(), MultipleStorageProof::TrieSkipHashes(data, _) => data.is_empty(), - MultipleStorageProof::FullForMerge(data) => data.is_empty(), - MultipleStorageProof::KnownQueryPlanAndValues(data) => data.is_empty(), } } } @@ -171,8 +129,6 @@ impl MultipleRecorder { match kind { StorageProofKind::Flat => MultipleRecorder::Flat(Default::default(), D::KIND, PhantomData), StorageProofKind::TrieSkipHashes => MultipleRecorder::Full(Default::default(), D::KIND), - StorageProofKind::FullForMerge => MultipleRecorder::Full(Default::default(), D::KIND), - StorageProofKind::KnownQueryPlanAndValues => MultipleRecorder::Full(Default::default(), D::KIND), } } @@ -247,8 +203,6 @@ impl Recordable for MultipleStorageProof D: DefaultKind, { // Actually one could ignore this if he knows its type to be non compact. - // TODO EMCH try a const function over D, this have very little chance to work - // Maybe switch that to Option so we can put it to None here as it is variable const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; type RecordBackend = MultipleRecorder; @@ -268,20 +222,6 @@ impl Recordable for MultipleStorageProof )) } }, - StorageProofKind::FullForMerge => { - if let MultipleRecorder::Full(rec, _) = recorder { - return Ok(MultipleStorageProof::FullForMerge( - super::compact::FullForMerge::extract_proof(rec, input)?, - )) - } - }, - StorageProofKind::KnownQueryPlanAndValues => { - if let MultipleRecorder::Full(rec, _) = recorder { - return Ok(MultipleStorageProof::KnownQueryPlanAndValues( - super::query_plan::KnownQueryPlanAndValues::extract_proof(rec, input)?, - )) - } - }, } Err(missing_pack_input()) } @@ -299,7 +239,6 @@ impl BackendProof for MultipleStorageProof match self { MultipleStorageProof::Flat(p) => p.into_partial_db(), MultipleStorageProof::TrieSkipHashes(p, _) => p.into_partial_db(), - _ => panic!("misused multiproof"), // TODO EMCH this is a tradeoff for producing proof without checking but the corresponding variant should be removed. } } @@ -327,41 +266,16 @@ impl TryInto>> for MultipleStorageProof TryInto for MultipleStorageProof { - type Error = super::Error; - - fn try_into(self) -> Result { - match self { - MultipleStorageProof::FullForMerge(p) => Ok(p), - _ => Err(incompatible_type()), - } - } -} - -impl TryInto>> for MultipleStorageProof { - type Error = super::Error; - - fn try_into(self) -> Result>> { - match self { - MultipleStorageProof::KnownQueryPlanAndValues(p) => Ok(p), - _ => Err(incompatible_type()), - } - } -} - impl MultipleStorageProof { /// Get kind type for the storage proof variant. pub fn kind(&self) -> StorageProofKind { match self { MultipleStorageProof::Flat(_) => StorageProofKind::Flat, MultipleStorageProof::TrieSkipHashes(_, _) => StorageProofKind::TrieSkipHashes, - MultipleStorageProof::FullForMerge(_) => StorageProofKind::FullForMerge, - MultipleStorageProof::KnownQueryPlanAndValues(_) => StorageProofKind::KnownQueryPlanAndValues, } } } - impl Into> for super::compact::FullForMerge where H::Out: Codec, @@ -370,77 +284,6 @@ impl Into> for super::comp match D::KIND { StorageProofKind::Flat => MultipleStorageProof::Flat(self.into()), StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes(self.into(), PhantomData), - StorageProofKind::FullForMerge => MultipleStorageProof::FullForMerge(self), - // we cannot convert, actually this should not be in storage proof kind TODO EMCH - // this was only here to be able to product query plan without using different backend. - // User shall therefore register and try into: but target is that user uses the query_plan - // backend. - StorageProofKind::KnownQueryPlanAndValues => MultipleStorageProof::FullForMerge(self), } } } - - -/* - /// Create in-memory storage of proof check backend. - /// - /// Behave similarily to `into_partial_db`. - pub fn into_partial_flat_db(self) -> Result> - where - H: Hasher, - H::Out: Decode, - { - let mut db = MemoryDB::default(); - let mut db_empty = true; - match self { - s@MultipleStorageProof::Flat(..) => { - for item in s.iter_nodes_flatten() { - db.insert(EMPTY_PREFIX, &item[..]); - } - }, - MultipleStorageProof::Full(children) => { - for (_child_info, proof) in children.into_iter() { - for item in proof.into_iter() { - db.insert(EMPTY_PREFIX, &item); - } - } - }, - MultipleStorageProof::TrieSkipHashesForMerge(children) => { - for (_child_info, (proof, _root)) in children.into_iter() { - for (key, value) in proof.0.into_iter() { - let key = Decode::decode(&mut &key[..])?; - db.emplace(key, EMPTY_PREFIX, value); - } - } - }, - MultipleStorageProof::TrieSkipHashesFull(children) => { - for (_child_info, proof) in children.into_iter() { - // Note that this does check all hashes so using a trie backend - // for further check is not really good (could use a direct value backend). - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - if db_empty { - db_empty = false; - db = child_db; - } else { - db.consolidate(child_db); - } - } - }, - MultipleStorageProof::TrieSkipHashes(children) => { - for proof in children.into_iter() { - let (_root, child_db) = crate::unpack_proof_to_memdb::>(proof.as_slice())?; - if db_empty { - db_empty = false; - db = child_db; - } else { - db.consolidate(child_db); - } - } - }, - MultipleStorageProof::KnownQueryPlanAndValues(_children) => { - return Err(no_partial_db_support()); - }, - } - Ok(db) - } -*/ From 9c3c299a446a1dd19cf7f54a538906ae23369468 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 17:44:27 +0200 Subject: [PATCH 172/185] remove some proofs todos --- primitives/trie/src/storage_proof/compact.rs | 26 +++++++++++--------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index d988e65c19e0b..e7b727dfef038 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -283,18 +283,24 @@ impl Into> for Full { } } -// TODO switch to try into (works only for one compact flat) -impl Into> for Flat { - fn into(mut self) -> Full { - assert!(self.0.len() == 1); // works only if only top trie +impl TryInto> for Flat { + type Error = super::Error; + + fn try_into(mut self) -> Result> { + if self.0.len() > 1 { + return Err(super::error( + "Can only convert compact flat proof if it is only top storage" + )); + } let mut result = ChildrenProofMap::default(); - result.insert(ChildInfoProof::top_trie(), self.0.pop().expect("asserted above; qed")); - Full(result, PhantomData) + if let Some(v) = self.0.pop() { + result.insert(ChildInfoProof::top_trie(), v); + } + Ok(Full(result, PhantomData)) } } impl FullForMerge { - // TODO EMCH use try_into! fn to_full(self) -> Result> where L: TrieLayout, @@ -311,7 +317,6 @@ impl FullForMerge { Ok(Full(result, PhantomData)) } - // TODO EMCH use try_into! fn to_flat(self) -> Result> where L: TrieLayout, @@ -334,10 +339,9 @@ impl Into> for FullForMerge L: TrieLayout, TrieHash: Codec, { - // TODO consider only using try into (may not be very straightforward with backend) fn into(self) -> Full { self.to_full() - .expect("Full for merge was recorded on a correct state") + .expect("Full for merge was recorded on a valid state; qed") } } @@ -348,7 +352,7 @@ impl Into> for FullForMerge { fn into(self) -> Flat { self.to_flat() - .expect("Full for merge was recorded on a correct state") + .expect("Full for merge was recorded on a valid state; qed") } } From 3000ea67493f720e159e389fd652980b773ed278 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 20:32:12 +0200 Subject: [PATCH 173/185] renaming reg 'registering' to rec 'recording' --- client/api/src/backend.rs | 2 +- client/api/src/call_executor.rs | 2 +- client/db/src/bench.rs | 8 ++++---- client/db/src/lib.rs | 8 ++++---- client/db/src/storage_cache.rs | 16 +++++++-------- client/light/src/backend.rs | 8 ++++---- client/light/src/call_executor.rs | 2 +- client/service/src/client/call_executor.rs | 6 +++--- client/service/test/src/client/light.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 4 ++-- primitives/api/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 20 +++++++++---------- primitives/state-machine/src/lib.rs | 8 ++++---- .../state-machine/src/proving_backend.rs | 10 +++++----- primitives/state-machine/src/trie_backend.rs | 6 +++--- 15 files changed, 52 insertions(+), 52 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 2a1ea9bb31988..8743c283021c5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -50,7 +50,7 @@ pub type StateBackendFor = >::State; /// Extracts the proof for the given backend. pub type ProofFor = as StateBackend>>::StorageProof; -type RegProofForSB = >>::RegProofBackend; +type RegProofForSB = >>::RecProofBackend; type RegProofForB = RegProofForSB, Block>; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 5e94027f11d1c..0ecbdaa9475d0 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -123,7 +123,7 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, proof_backend: &P, overlay: &mut OverlayedChanges, diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index a6df8644097b7..04ad8ee7a4dee 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -120,7 +120,7 @@ impl StateBackend> for BenchmarkingState { type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type StorageProof = as StateBackend>>::StorageProof; - type RegProofBackend = as StateBackend>>::RegProofBackend; + type RecProofBackend = as StateBackend>>::RecProofBackend; type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -281,12 +281,12 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } - fn from_reg_state( + fn from_previous_rec_state( self, previous: RecordBackendFor>, previous_input: ProofInput, - ) -> Option { - self.state.borrow_mut().take().and_then(|s| s.from_reg_state(previous, previous_input)) + ) -> Option { + self.state.borrow_mut().take().and_then(|s| s.from_previous_rec_state(previous, previous_input)) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 5eaaaecd6dcbe..fc2a02d7ee75a 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -154,7 +154,7 @@ impl StateBackend> for RefTrackingState { type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type StorageProof = as StateBackend>>::StorageProof; - type RegProofBackend = as StateBackend>>::RegProofBackend; + type RecProofBackend = as StateBackend>>::RecProofBackend; type ProofCheckBackend = as StateBackend>>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -253,13 +253,13 @@ impl StateBackend> for RefTrackingState { self.state().child_keys(child_info, prefix) } - fn from_reg_state( + fn from_previous_rec_state( mut self, previous: RecordBackendFor>, previous_input: sp_state_machine::ProofInput, - ) -> Option { + ) -> Option { let state = std::mem::replace(&mut self.state, Default::default()).expect("Non dropped state"); - state.from_reg_state(previous, previous_input) + state.from_previous_rec_state(previous, previous_input) } fn register_overlay_stats(&mut self, stats: &StateMachineStats) { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 183b35e6e92f4..a51a13ecbc360 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -496,7 +496,7 @@ impl>, B: BlockT> StateBackend> for Cachin type Error = S::Error; type Transaction = S::Transaction; type StorageProof = S::StorageProof; - type RegProofBackend = S::RegProofBackend; + type RecProofBackend = S::RecProofBackend; type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -654,12 +654,12 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.child_keys(child_info, prefix) } - fn from_reg_state( + fn from_previous_rec_state( self, previous: RecordBackendFor>, previous_input: sp_state_machine::ProofInput, - ) -> Option { - self.state.from_reg_state(previous, previous_input) + ) -> Option { + self.state.from_previous_rec_state(previous, previous_input) } fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { @@ -743,7 +743,7 @@ impl>, B: BlockT> StateBackend> for Syncin type Error = S::Error; type Transaction = S::Transaction; type StorageProof = S::StorageProof; - type RegProofBackend = S::RegProofBackend; + type RecProofBackend = S::RecProofBackend; type ProofCheckBackend = S::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -850,12 +850,12 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().usage_info() } - fn from_reg_state( + fn from_previous_rec_state( mut self, previous: RecordBackendFor>, previous_input: sp_state_machine::ProofInput, - ) -> Option { - self.sync().and_then(|s| s.from_reg_state(previous, previous_input)) + ) -> Option { + self.sync().and_then(|s| s.from_previous_rec_state(previous, previous_input)) } } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index f413f552eb347..b32e5facfda60 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -381,7 +381,7 @@ impl StateBackend for GenesisOrUnavailableState type Error = ClientError; type Transaction = as StateBackend>::Transaction; type StorageProof = as StateBackend>::StorageProof; - type RegProofBackend = as StateBackend>::RegProofBackend; + type RecProofBackend = as StateBackend>::RecProofBackend; type ProofCheckBackend = as StateBackend>::ProofCheckBackend; fn storage(&self, key: &[u8]) -> ClientResult>> { @@ -511,13 +511,13 @@ impl StateBackend for GenesisOrUnavailableState sp_state_machine::UsageInfo::empty() } - fn from_reg_state( + fn from_previous_rec_state( self, previous: RecordBackendFor, previous_input: sp_state_machine::ProofInput, - ) -> Option { + ) -> Option { match self { - GenesisOrUnavailableState::Genesis(state) => state.from_reg_state(previous, previous_input), + GenesisOrUnavailableState::Genesis(state) => state.from_previous_rec_state(previous, previous_input), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index c22874330aca4..5eece98f4616f 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -158,7 +158,7 @@ impl CallExecutor for } } - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, _proof_backend: &P, _overlay: &mut OverlayedChanges, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 1e7ff8033d559..7501af532b25f 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -171,7 +171,7 @@ where let state = self.backend.state_at(*at)?; - let backend = state.from_reg_state( + let backend = state.from_previous_rec_state( std::mem::replace(recorder, Default::default()), std::mem::replace(input, Default::default()), ).ok_or_else(|| @@ -195,7 +195,7 @@ where // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) }; - use sp_state_machine::backend::RegProofBackend; + use sp_state_machine::backend::RecProofBackend; let (recorder_state, input_state) = backend.extract_recorder(); *recorder = recorder_state; *input = input_state; @@ -243,7 +243,7 @@ where .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, proof_backend: &P, overlay: &mut OverlayedChanges, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 58373c2130139..679ce4c115cb4 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -243,7 +243,7 @@ impl CallExecutor for DummyCallExecutor { unreachable!() } - fn prove_at_proof_backend_state>>( + fn prove_at_proof_backend_state>>( &self, _proof_backend: &P, _overlay: &mut OverlayedChanges, diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 257f7e6e0e2c9..248d3f529da37 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -293,8 +293,8 @@ fn generate_runtime_api_base_structures() -> Result { let #crate_::ProofRecorder{ recorder, input } = &mut *recorder.borrow_mut(); let input = std::mem::replace(input, #crate_::ProofInput::None); < - >>::RegProofBackend - as #crate_::RegProofBackend<#crate_::HashFor> + >>::RecProofBackend + as #crate_::RecProofBackend<#crate_::HashFor> >::extract_proof_reg( &recorder, input, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 31584b413214e..2133edc95b837 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -40,7 +40,7 @@ extern crate self as sp_api; #[cfg(feature = "std")] pub use sp_state_machine::{ OverlayedChanges, ProofCommon, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, - ProofInput, backend::{ProofRawFor, RegProofBackend}, RecordableProof, + ProofInput, backend::{ProofRawFor, RecProofBackend}, RecordableProof, }; #[doc(hidden)] #[cfg(feature = "std")] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 37563015550b4..f7d00a4d0454a 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -47,7 +47,7 @@ pub trait Backend: Sized + std::fmt::Debug { type StorageProof: BackendProof; /// Type of backend for recording proof. - type RegProofBackend: RegProofBackend; + type RecProofBackend: RecProofBackend; /// Type of backend for using a proof. type ProofCheckBackend: ProofCheckBackend; @@ -165,19 +165,19 @@ pub trait Backend: Sized + std::fmt::Debug { all } - /// Try convert into a proof backend. - fn as_proof_backend(self) -> Option { - self.from_reg_state(Default::default(), Default::default()) + /// Try convert into a recording proof backend. + fn as_proof_backend(self) -> Option { + self.from_previous_rec_state(Default::default(), Default::default()) } - /// Try convert into a proof backend. + /// Try convert into a recording proof backend from previous recording state. /// We can optionally use a previous proof backend to avoid having to merge /// proof later. - fn from_reg_state( + fn from_previous_rec_state( self, previous: RecordBackendFor, previous_input: ProofInput, - ) -> Option; + ) -> Option; /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. @@ -248,7 +248,7 @@ pub trait GenesisStateBackend: Backend } /// Backend used to register a proof record. -pub trait RegProofBackend: crate::backend::Backend +pub trait RecProofBackend: crate::backend::Backend where H: Hasher, { @@ -285,7 +285,7 @@ impl<'a, T, H> Backend for &'a T type Error = T::Error; type Transaction = T::Transaction; type StorageProof = T::StorageProof; - type RegProofBackend = T::RegProofBackend; + type RecProofBackend = T::RecProofBackend; type ProofCheckBackend = T::ProofCheckBackend; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -362,7 +362,7 @@ impl<'a, T, H> Backend for &'a T (*self).usage_info() } - fn from_reg_state(self, _previous: RecordBackendFor, _input: ProofInput) -> Option { + fn from_previous_rec_state(self, _previous: RecordBackendFor, _input: ProofInput) -> Option { // cannot move out of reference, consider cloning when needed. None } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ca3f3bdd30731..a7859c9ef6313 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -80,7 +80,7 @@ pub use in_memory_backend::new_in_mem; pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use sp_core::traits::CloneableSpawn; -use backend::{Backend, RegProofBackend, ProofCheckBackend, ProofRawFor}; +use backend::{Backend, RecProofBackend, ProofCheckBackend, ProofRawFor}; type CallResult = Result, E>; @@ -510,7 +510,7 @@ pub fn prove_execution_on_proof_backend( runtime_code: &RuntimeCode, ) -> Result<(Vec, ProofRawFor), Box> where - P: RegProofBackend, + P: RecProofBackend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, @@ -673,7 +673,7 @@ pub fn prove_read_on_proof_backend( keys: I, ) -> Result, Box> where - P: RegProofBackend, + P: RecProofBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, @@ -694,7 +694,7 @@ pub fn prove_child_read_on_proof_backend( keys: I, ) -> Result, Box> where - P: RegProofBackend, + P: RecProofBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index be7db2d22b17d..d4f8d25d099c8 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -30,7 +30,7 @@ pub use sp_trie::{Recorder, ChildrenProofMap, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, DBValue}; -use crate::backend::{Backend, RegProofBackend, ProofRawFor}; +use crate::backend::{Backend, RecProofBackend, ProofRawFor}; use sp_core::storage::{ChildInfo, ChildInfoProof}; use std::marker::PhantomData; @@ -238,7 +238,7 @@ impl, H: Hasher, P: BackendProof> std::fmt::Debug } } -impl RegProofBackend for ProvingBackend +impl RecProofBackend for ProvingBackend where S: TrieBackendStorage, H: Hasher, @@ -277,7 +277,7 @@ impl Backend for ProvingBackend type Error = String; type Transaction = S::Overlay; type StorageProof = P; - type RegProofBackend = Self; + type RecProofBackend = Self; type ProofCheckBackend = crate::InMemoryProofCheckBackend; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -366,11 +366,11 @@ impl Backend for ProvingBackend self.trie_backend.usage_info() } - fn from_reg_state( + fn from_previous_rec_state( self, previous_recorder: crate::backend::RecordBackendFor, previous_input: ProofInput, - ) -> Option { + ) -> Option { let root = self.trie_backend.essence().root().clone(); let storage = self.trie_backend.into_storage(); let current_recorder = storage.proof_recorder; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 76bb603cfd2ca..db8fb1d4ef83e 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -140,7 +140,7 @@ impl Backend for TrieBackend where type Error = String; type Transaction = S::Overlay; type StorageProof = P; - type RegProofBackend = crate::proving_backend::ProvingBackend< + type RecProofBackend = crate::proving_backend::ProvingBackend< S, H, Self::StorageProof, @@ -301,11 +301,11 @@ impl Backend for TrieBackend where (root, is_default, write_overlay) } - fn from_reg_state( + fn from_previous_rec_state( self, recorder: RecordBackendFor, previous_input: ProofInput, - ) -> Option { + ) -> Option { let root = self.essence.root().clone(); let backend = crate::proving_backend::ProvingBackend::from_backend_with_recorder( self.essence.into_storage(), From dba2bb1dcf64e8ca85e1d6f3dc68a6203be1c01b Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 12 Jun 2020 20:33:33 +0200 Subject: [PATCH 174/185] minor doc change --- primitives/state-machine/src/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index f7d00a4d0454a..0a7ac6a3b8111 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -265,7 +265,7 @@ pub trait RecProofBackend: crate::backend::Backend ) -> Result, Box>; } -/// Backend used to produce proof. +/// Backend used to utilize a proof. pub trait ProofCheckBackend: Sized + crate::backend::Backend where H: Hasher, From 2bc0dad621cb9557bc3843b84395c1f0f5bad36f Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jun 2020 10:06:35 +0200 Subject: [PATCH 175/185] rename trieskiphashes to compact --- primitives/trie/src/storage_proof/compact.rs | 1 - primitives/trie/src/storage_proof/multiple.rs | 40 +++++++++---------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index e7b727dfef038..a036fcebd2764 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -70,7 +70,6 @@ impl sp_std::fmt::Debug for Full { /// Proof cotaining an intermediate representation of state /// which is mergeable and can be converted to compact representation. -/// Compatible with `TrieSkipHashes` and `TrieSkipHashesFull` proofs. /// /// This is needed mainly for technical reasons (merge then compact proofs). /// (though if possible user should rather use a flat record diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index 73c18bb699db2..b544656a05887 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -25,8 +25,8 @@ pub enum StorageProofKind { /// Kind for `MultipleStorageProof::Flat`. Flat = 1, - /// Kind for `MultipleStorageProof::TrieSkipHashes`. - TrieSkipHashes = 2, + /// Kind for `MultipleStorageProof::Compact`. + Compact = 2, } impl StorageProofKind { @@ -35,7 +35,7 @@ impl StorageProofKind { pub fn from_byte(encoded: u8) -> Option { Some(match encoded { x if x == StorageProofKind::Flat as u8 => StorageProofKind::Flat, - x if x == StorageProofKind::TrieSkipHashes as u8 => StorageProofKind::TrieSkipHashes, + x if x == StorageProofKind::Compact as u8 => StorageProofKind::Compact, _ => return None, }) } @@ -51,14 +51,14 @@ pub enum MultipleStorageProof { Flat(super::simple::Flat), /// See `crate::storage_proof::compact::Flat`. - TrieSkipHashes(super::compact::Flat>, PhantomData), + Compact(super::compact::Flat>, PhantomData), } impl sp_std::fmt::Debug for MultipleStorageProof { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { match self { MultipleStorageProof::Flat(v) => v.fmt(f), - MultipleStorageProof::TrieSkipHashes(v, _) => v.fmt(f), + MultipleStorageProof::Compact(v, _) => v.fmt(f), } } } @@ -82,7 +82,7 @@ impl Decode for MultipleStorageProof { Ok(match StorageProofKind::from_byte(kind) .ok_or_else(|| codec::Error::from("Invalid storage kind"))? { StorageProofKind::Flat => MultipleStorageProof::Flat(Decode::decode(value)?), - StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes( + StorageProofKind::Compact => MultipleStorageProof::Compact( Decode::decode(value)?, PhantomData, ), @@ -95,7 +95,7 @@ impl Encode for MultipleStorageProof { (self.kind() as u8).encode_to(dest); match self { MultipleStorageProof::Flat(p) => p.encode_to(dest), - MultipleStorageProof::TrieSkipHashes(p, _) => p.encode_to(dest), + MultipleStorageProof::Compact(p, _) => p.encode_to(dest), } } } @@ -105,15 +105,15 @@ impl Common for MultipleStorageProof { match D::KIND { StorageProofKind::Flat => MultipleStorageProof::Flat(super::simple::Flat::empty()), - StorageProofKind::TrieSkipHashes => - MultipleStorageProof::TrieSkipHashes(super::compact::Flat::empty(), PhantomData), + StorageProofKind::Compact => + MultipleStorageProof::Compact(super::compact::Flat::empty(), PhantomData), } } fn is_empty(&self) -> bool { match self { MultipleStorageProof::Flat(data) => data.is_empty(), - MultipleStorageProof::TrieSkipHashes(data, _) => data.is_empty(), + MultipleStorageProof::Compact(data, _) => data.is_empty(), } } } @@ -128,7 +128,7 @@ impl MultipleRecorder { pub fn new_recorder(kind: StorageProofKind) -> Self { match kind { StorageProofKind::Flat => MultipleRecorder::Flat(Default::default(), D::KIND, PhantomData), - StorageProofKind::TrieSkipHashes => MultipleRecorder::Full(Default::default(), D::KIND), + StorageProofKind::Compact => MultipleRecorder::Full(Default::default(), D::KIND), } } @@ -149,14 +149,14 @@ impl Default for MultipleRecorder { impl Clone for MultipleRecorder { fn clone(&self) -> Self { + use MultipleRecorder::{Flat, Full}; match self { - MultipleRecorder::Flat(data, kind, _) => MultipleRecorder::Flat(data.clone(), *kind, PhantomData), - MultipleRecorder::Full(data, kind) => MultipleRecorder::Full(data.clone(), *kind), + Flat(data, kind, _) => Flat(data.clone(), *kind, PhantomData), + Full(data, kind) => Full(data.clone(), *kind), } } } - impl RecordBackend for MultipleRecorder { fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option> { match self { @@ -214,9 +214,9 @@ impl Recordable for MultipleStorageProof return Ok(MultipleStorageProof::Flat(super::simple::Flat::extract_proof(rec, input)?)) } }, - StorageProofKind::TrieSkipHashes => { + StorageProofKind::Compact => { if let MultipleRecorder::Full(rec, _) = recorder { - return Ok(MultipleStorageProof::TrieSkipHashes( + return Ok(MultipleStorageProof::Compact( super::compact::Flat::extract_proof(rec, input)?, PhantomData, )) @@ -238,7 +238,7 @@ impl BackendProof for MultipleStorageProof fn into_partial_db(self) -> Result> { match self { MultipleStorageProof::Flat(p) => p.into_partial_db(), - MultipleStorageProof::TrieSkipHashes(p, _) => p.into_partial_db(), + MultipleStorageProof::Compact(p, _) => p.into_partial_db(), } } @@ -260,7 +260,7 @@ impl TryInto>> for MultipleStorageProof Result>> { match self { - MultipleStorageProof::TrieSkipHashes(p, _) => Ok(p), + MultipleStorageProof::Compact(p, _) => Ok(p), _ => Err(incompatible_type()), } } @@ -271,7 +271,7 @@ impl MultipleStorageProof { pub fn kind(&self) -> StorageProofKind { match self { MultipleStorageProof::Flat(_) => StorageProofKind::Flat, - MultipleStorageProof::TrieSkipHashes(_, _) => StorageProofKind::TrieSkipHashes, + MultipleStorageProof::Compact(_, _) => StorageProofKind::Compact, } } } @@ -283,7 +283,7 @@ impl Into> for super::comp fn into(self) -> MultipleStorageProof { match D::KIND { StorageProofKind::Flat => MultipleStorageProof::Flat(self.into()), - StorageProofKind::TrieSkipHashes => MultipleStorageProof::TrieSkipHashes(self.into(), PhantomData), + StorageProofKind::Compact => MultipleStorageProof::Compact(self.into(), PhantomData), } } } From 522775cd2cf6c3ae0ba002f7723ad780e812bbd9 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jun 2020 11:29:24 +0200 Subject: [PATCH 176/185] Break lines. --- client/api/src/backend.rs | 8 ++- client/api/src/call_executor.rs | 9 ++- client/block-builder/src/lib.rs | 12 +++- client/finality-grandpa/src/finality_proof.rs | 10 +-- client/light/src/backend.rs | 3 +- client/network/src/chain.rs | 5 +- client/network/src/light_client_handler.rs | 3 +- client/service/src/client/call_executor.rs | 9 +-- .../api/proc-macro/src/impl_runtime_apis.rs | 6 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 4 +- primitives/api/src/lib.rs | 6 +- primitives/state-machine/src/backend.rs | 8 ++- .../state-machine/src/changes_trie/storage.rs | 7 +- primitives/state-machine/src/lib.rs | 64 +++++++++++++++---- .../state-machine/src/proving_backend.rs | 5 +- .../state-machine/src/trie_backend_essence.rs | 48 ++++++++------ primitives/trie/src/lib.rs | 14 ++-- primitives/trie/src/storage_proof/mod.rs | 11 +++- 18 files changed, 167 insertions(+), 65 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 8743c283021c5..1de19338e54fe 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -48,11 +48,13 @@ use std::marker::PhantomData; pub type StateBackendFor = >::State; /// Extracts the proof for the given backend. -pub type ProofFor = as StateBackend>>::StorageProof; +pub type ProofFor = < + RecProofForB as StateBackend> +>::StorageProof; -type RegProofForSB = >>::RecProofBackend; +type RecProofForSB = >>::RecProofBackend; -type RegProofForB = RegProofForSB, Block>; +type RecProofForB = RecProofForSB, Block>; /// Extracts the transaction for the given state backend. pub type TransactionForSB = >>::Transaction; diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 0ecbdaa9475d0..8e2fd97f4f91c 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -32,6 +32,7 @@ use sp_core::{NativeOrEncoded,offchain::storage::OffchainOverlayedChanges}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; +use sp_state_machine::backend::ProofRawFor; /// Executor Provider pub trait ExecutorProvider { @@ -93,7 +94,9 @@ pub trait CallExecutor { initialize_block: InitializeBlock<'a, B>, execution_manager: ExecutionManager, native_call: Option, - proof_recorder: Option<&RefCell>::State, B>>>, + proof_recorder: Option<&RefCell< + ProofRecorder<>::State, B> + >>, extensions: Option, ) -> sp_blockchain::Result> where ExecutionManager: Clone; @@ -111,7 +114,7 @@ pub trait CallExecutor { overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] - ) -> Result<(Vec, sp_state_machine::backend::ProofRawFor>), sp_blockchain::Error> { + ) -> Result<(Vec, ProofRawFor>), sp_blockchain::Error> { let proof_state = state.as_proof_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) @@ -129,7 +132,7 @@ pub trait CallExecutor { overlay: &mut OverlayedChanges, method: &str, call_data: &[u8], - ) -> Result<(Vec, sp_state_machine::backend::ProofRawFor>), sp_blockchain::Error>; + ) -> Result<(Vec, ProofRawFor>), sp_blockchain::Error>; /// Get runtime version if supported. fn native_runtime_version(&self) -> Option<&NativeVersion>; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index af6c8478a8eb1..3b44b987fe425 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -56,9 +56,17 @@ pub struct BuiltBlock>>, } -impl>> BuiltBlock { +impl BuiltBlock + where + Block: BlockT, + StateBackend: backend::StateBackend>, +{ /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option>>) { + pub fn into_inner(self) -> ( + Block, + StorageChanges, + Option>>, + ) { (self.block, self.storage_changes, self.proof) } } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 5603826ca9f25..e417e5f228313 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -70,7 +70,8 @@ pub trait AuthoritySetForFinalityProver: Send + Sync { } /// Trait that combines `StorageProvider` and `ProofProvider` -pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync +pub trait StorageAndProofProvider: StorageProvider + + ProofProvider + Send + Sync where Block: BlockT, BE: Backend + Send + Sync, @@ -85,9 +86,10 @@ impl StorageAndProofProvider for P {} /// Implementation of AuthoritySetForFinalityProver. -impl AuthoritySetForFinalityProver for Arc> +impl AuthoritySetForFinalityProver for Arc> where BE: Backend + Send + Sync + 'static, + Block: BlockT, { fn authorities(&self, block: &BlockId) -> ClientResult { let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); @@ -874,8 +876,8 @@ pub(crate) mod tests { &blockchain, 0, auth3, - &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().into_nodes().into_iter().next().map(|x| x[0]) { + &ClosureAuthoritySetForFinalityChecker(|hash, _header, proof: StorageProof| + match proof.clone().into_nodes().into_iter().next().map(|x| x[0]) { Some(50) => Ok(auth5.clone()), Some(70) => Ok(auth7.clone()), _ => unreachable!("no other proofs should be checked: {}", hash), diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index b32e5facfda60..73983874d3eb6 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -517,7 +517,8 @@ impl StateBackend for GenesisOrUnavailableState previous_input: sp_state_machine::ProofInput, ) -> Option { match self { - GenesisOrUnavailableState::Genesis(state) => state.from_previous_rec_state(previous, previous_input), + GenesisOrUnavailableState::Genesis(state) => state + .from_previous_rec_state(previous, previous_input), GenesisOrUnavailableState::Unavailable => None, } } diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index eb605affd3611..2ec2dd941cdb6 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -23,8 +23,9 @@ use sc_client_api::{BlockBackend, ProofProvider, SimpleProof as StorageProof}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; /// Local client abstraction for the network. -pub trait Client: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync +pub trait Client: HeaderBackend + ProofProvider + + BlockIdTo + BlockBackend + HeaderMetadata + + Send + Sync {} impl Client for T diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs index 276fa4c88da94..fef54012010f0 100644 --- a/client/network/src/light_client_handler.rs +++ b/client/network/src/light_client_handler.rs @@ -1330,7 +1330,8 @@ mod tests { swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, yamux }; - use sc_client_api::{ProofCommon, RemoteReadChildRequest, FetchChecker, SimpleProof as StorageProof}; + use sc_client_api::{ProofCommon, RemoteReadChildRequest, FetchChecker, + SimpleProof as StorageProof}; use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; use std::{ diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 7501af532b25f..4ec3755526e48 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -140,7 +140,9 @@ where initialize_block: InitializeBlock<'a, Block>, execution_manager: ExecutionManager, native_call: Option, - recorder: Option<&RefCell>::State, Block>>>, + recorder: Option<&RefCell< + ProofRecorder<>::State, Block> + >>, extensions: Option, ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { match initialize_block { @@ -174,9 +176,8 @@ where let backend = state.from_previous_rec_state( std::mem::replace(recorder, Default::default()), std::mem::replace(input, Default::default()), - ).ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; + ).ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box)?; let result = { let mut state_machine = StateMachine::new( diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 248d3f529da37..17542711dd44a 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -285,7 +285,9 @@ fn generate_runtime_api_base_structures() -> Result { self.recorder = Some(std::cell::RefCell::new(Default::default())); } - fn extract_proof(&mut self) -> Option<#crate_::ProofRawFor>> { + fn extract_proof( + &mut self, + ) -> Option<#crate_::ProofRawFor>> { use #crate_::RecordableProof; self.recorder .take() @@ -295,7 +297,7 @@ fn generate_runtime_api_base_structures() -> Result { < >>::RecProofBackend as #crate_::RecProofBackend<#crate_::HashFor> - >::extract_proof_reg( + >::extract_proof_rec( &recorder, input, ).ok() diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 758e5defe810b..00a35a9bd704a 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -99,7 +99,9 @@ fn implement_common_api_traits( unimplemented!("`record_proof` not implemented for runtime api mocks") } - fn extract_proof(&mut self) -> Option<#crate_::ProofRawFor>> { + fn extract_proof( + &mut self, + ) -> Option<#crate_::ProofRawFor>> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 2133edc95b837..5a40fb2fe9437 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -531,7 +531,11 @@ pub struct ProofRecorder>, Block: BlockT> { } #[cfg(feature = "std")] -impl>, Block: BlockT> Default for ProofRecorder { +impl Default for ProofRecorder + where + Backend: StateBackend>, + Block: BlockT, +{ fn default() -> Self { ProofRecorder { recorder: Default::default(), diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 0a7ac6a3b8111..783aeb29eecc0 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -259,7 +259,7 @@ pub trait RecProofBackend: crate::backend::Backend fn extract_recorder(self) -> (RecordBackendFor, ProofInput); /// Extract from the state and input. - fn extract_proof_reg( + fn extract_proof_rec( recorder_state: &RecordBackendFor, input: ProofInput, ) -> Result, Box>; @@ -362,7 +362,11 @@ impl<'a, T, H> Backend for &'a T (*self).usage_info() } - fn from_previous_rec_state(self, _previous: RecordBackendFor, _input: ProofInput) -> Option { + fn from_previous_rec_state( + self, + _previous: RecordBackendFor, + _input: ProofInput, + ) -> Option { // cannot move out of reference, consider cloning when needed. None } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index cc1ecd8fc8912..b858996736d89 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -207,7 +207,12 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + _child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index a7859c9ef6313..9cc3df17ae304 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1324,7 +1324,10 @@ mod tests { std::iter::empty::<(_, _, std::iter::Empty<_>)>(), true, ); - let remote_proof = >::extract_proof(&recorder, input).unwrap(); + let remote_proof = >::extract_proof( + &recorder, + input, + ).unwrap(); let input_check = ProofInput::query_plan_with_values( remote_root.encode(), @@ -1348,15 +1351,26 @@ mod tests { let input = ProofInput::query_plan( remote_root.encode(), vec![b"value2".to_vec()].into_iter(), - vec![(child_info.clone(), remote_root_child.encode(), vec![b"value3".to_vec()].into_iter())].into_iter(), + vec![( + child_info.clone(), + remote_root_child.encode(), + vec![b"value3".to_vec()].into_iter(), + )].into_iter(), include_roots, ); - let remote_proof = >::extract_proof(&recorder, input).unwrap(); + let remote_proof = >::extract_proof( + &recorder, + input, + ).unwrap(); let input_check = ProofInput::query_plan_with_values( remote_root.encode(), vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), - vec![(child_info.clone(), remote_root_child.encode(), vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter())].into_iter(), + vec![( + child_info.clone(), + remote_root_child.encode(), + vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter(), + )].into_iter(), include_roots, ); @@ -1365,7 +1379,11 @@ mod tests { let input_check = ProofInput::query_plan_with_values( remote_root.encode(), vec![(b"value2".to_vec(), Some(vec![24u8]))].into_iter(), - vec![(child_info.clone(), remote_root_child.encode(), vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter())].into_iter(), + vec![( + child_info.clone(), + remote_root_child.encode(), + vec![(b"value3".to_vec(), Some(vec![142u8]))].into_iter(), + )].into_iter(), !include_roots, // not including child root in parent breaks extract ); @@ -1396,12 +1414,20 @@ mod tests { let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::, BlakeTwo256, _>( + let local_result1 = read_proof_check::< + InMemoryProofCheckBackend, + BlakeTwo256, + _, + >( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::, BlakeTwo256, _>( + let local_result2 = read_proof_check::< + InMemoryProofCheckBackend, + BlakeTwo256, + _, + >( remote_root, remote_proof.clone().into(), &[&[0xff]], @@ -1421,13 +1447,21 @@ mod tests { child_info, &[b"value3"], ).unwrap(); - let local_result1 = read_child_proof_check::, BlakeTwo256, _>( + let local_result1 = read_child_proof_check::< + InMemoryProofCheckBackend, + BlakeTwo256, + _, + >( remote_root, remote_proof.clone().into(), child_info, &[b"value3"], ).unwrap(); - let local_result2 = read_child_proof_check::, BlakeTwo256, _>( + let local_result2 = read_child_proof_check::< + InMemoryProofCheckBackend, + BlakeTwo256, + _, + >( remote_root, remote_proof.clone().into(), child_info, @@ -1461,12 +1495,20 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::, BlakeTwo256, _>( + let local_result1 = read_proof_check::< + InMemoryFullProofCheckBackend, + BlakeTwo256, + _, + >( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::, BlakeTwo256, _>( + let local_result2 = read_proof_check::< + InMemoryFullProofCheckBackend, + BlakeTwo256, + _, + >( remote_root, remote_proof.clone().into(), &[&[0xff]], diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d4f8d25d099c8..66790399a60c0 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -259,7 +259,10 @@ impl RecProofBackend for ProvingBackend (recorder, input) } - fn extract_proof_reg(recorder_state: &RecordBackendFor, input: ProofInput) -> Result, Box> { + fn extract_proof_rec( + recorder_state: &RecordBackendFor, + input: ProofInput, + ) -> Result, Box> { <>::ProofRaw>::extract_proof( recorder_state, input, diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 05233e4d9ee0a..03c385d9a3de5 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -32,10 +32,12 @@ use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Decode, Encode}; use parking_lot::RwLock; +type Result = std::result::Result; + /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } /// Patricia trie-based pairs storage essence. @@ -68,7 +70,10 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get trie backend for child trie. - pub fn child_backend<'a>(&'a self, child_info: &'a ChildInfo) -> ChildTrieBackendEssence<'a, S, H> { + pub fn child_backend<'a>( + &'a self, + child_info: &'a ChildInfo, + ) -> ChildTrieBackendEssence<'a, S, H> { ChildTrieBackendEssence{ essence: self, child_info: Some(child_info), @@ -121,12 +126,15 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { + pub fn next_storage_key(&self, key: &[u8]) -> Result> { self.next_storage_key_from_root(&self.root, None, key) } /// Access the root of the child storage in its parent trie - pub(crate) fn child_root_encoded(&self, child_info: &ChildInfo) -> Result, String> { + pub(crate) fn child_root_encoded( + &self, + child_info: &ChildInfo, + ) -> Result> { if let Some(cache) = self.register_roots.as_ref() { if let Some(result) = cache.read().get(child_info) { return Ok(result.clone()); @@ -143,7 +151,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + fn child_root(&self, child_info: &ChildInfo) -> Result> { if let Some(cache) = self.register_roots.as_ref() { if let Some(root) = cache.read().get(child_info) { let root = root.as_ref() @@ -169,7 +177,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, String> { + ) -> Result> { let hash = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), @@ -184,7 +192,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, child_info: Option<&ChildInfo>, key: &[u8], - ) -> Result, String> { + ) -> Result> { let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; let top_backend; @@ -228,7 +236,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { + pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); read_trie_value::, _>(&self.top_backend(), &self.root, key).map_err(map_e) @@ -239,14 +247,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, String> { + ) -> Result> { let root = self.child_root_encoded(child_info)? .unwrap_or(empty_child_trie_root::>().encode()); let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(child_info.keyspace(), &self.child_backend(child_info), &root, key) - .map_err(map_e) + read_child_trie_value::, _>( + child_info.keyspace(), + &self.child_backend(child_info), + &root, + key, + ).map_err(map_e) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. @@ -304,7 +316,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, child_info: Option<&ChildInfo>, ) { - let mut iter = move |db| -> Result<(), Box>> { + let mut iter = move |db| -> std::result::Result<(), Box>> { let trie = TrieDB::::new(db, root)?; for x in TrieDBIterator::new_prefixed(&trie, prefix)? { @@ -423,13 +435,13 @@ pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result>; } impl<'a, H: Hasher, S: TrieBackendStorage> TrieBackendStorage for &'a S { type Overlay = S::Overlay; - fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { >::get(self, child_info, key, prefix) } } @@ -438,7 +450,7 @@ impl<'a, H: Hasher, S: TrieBackendStorage> TrieBackendStorage for &'a S { impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { Storage::::get(self.deref(), key, prefix) } } @@ -447,7 +459,7 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -455,7 +467,7 @@ impl TrieBackendStorage for PrefixedMemoryDB { impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -468,7 +480,7 @@ impl TrieBackendStorage for ChildrenProofMap> { child_info: &ChildInfo, key: &H::Out, prefix: Prefix, - ) -> Result, String> { + ) -> Result> { let child_info_proof = child_info.proof_info(); Ok(self.deref().get(&child_info_proof).and_then(|s| hash_db::HashDB::get(s, key, prefix) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 83a64a6c6aeab..27a95a5c4be7d 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -34,12 +34,14 @@ pub use error::Error; pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -pub use storage_proof::{Common as ProofCommon, ChildrenProofMap, simple::ProofNodes, compact::FullForMerge, - compact::Flat as CompactProof, simple::Full as SimpleFullProof, compact::Full as CompactFullProof, - query_plan::KnownQueryPlanAndValues as QueryPlanProof, Verifiable as VerifiableProof, - Input as ProofInput, InputKind as ProofInputKind, RecordMapTrieNodes, Recordable as RecordableProof, FullBackendProof, - BackendProof, Mergeable as MergeableProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, - multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; +pub use storage_proof::{Common as ProofCommon, ChildrenProofMap, simple::ProofNodes, + compact::FullForMerge, compact::Flat as CompactProof, simple::Full as SimpleFullProof, + compact::Full as CompactFullProof, query_plan::KnownQueryPlanAndValues as QueryPlanProof, + Verifiable as VerifiableProof, Input as ProofInput, InputKind as ProofInputKind, + RecordMapTrieNodes, Recordable as RecordableProof, FullBackendProof, BackendProof, + Mergeable as MergeableProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, + multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, + simple::Flat as SimpleProof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 825ec89d9ab69..346e7606ec5ea 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -158,14 +158,21 @@ impl Input { pub fn query_plan_with_values( top_encoded_root: Vec, top: impl Iterator, Option>)>, - children: impl Iterator, impl Iterator, Option>)>)>, + children: impl Iterator, + impl Iterator, Option>)>, + )>, include_child_root: bool, ) -> Input { let mut result = ChildrenProofMap::default(); let mut additional_roots = Vec::new(); for (child_info, encoded_root, key_values) in children { if include_child_root { - additional_roots.push((child_info.prefixed_storage_key().into_inner(), Some(encoded_root.clone()))); + additional_roots.push(( + child_info.prefixed_storage_key().into_inner(), + Some(encoded_root.clone()), + )); } result.insert(child_info.proof_info(), (encoded_root, key_values.collect())); } From 75d03f7d279100c0c51243b5ae8c9c3a2a449e26 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jun 2020 12:21:46 +0200 Subject: [PATCH 177/185] test ui line mismatch --- primitives/api/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 5a40fb2fe9437..87d56f5c16677 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -71,7 +71,9 @@ pub use sp_std::{slice, mem}; use sp_std::result; #[doc(hidden)] pub use codec::{Encode, Decode}; +#[doc(hidden)] use sp_core::OpaqueMetadata; +#[doc(hidden)] #[cfg(feature = "std")] use std::{panic::UnwindSafe, cell::RefCell}; @@ -308,6 +310,7 @@ pub type StorageTransactionCache = >>::Transaction, HashFor, NumberFor >; +/// A type containing storage changes. #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges< @@ -373,7 +376,7 @@ pub trait ApiExt: ApiErrorExt { pred: P, ) -> Result where Self: Sized; - /// Start recording all accessed trie nodes for generating proofs. + /// Start record a proof. fn record_proof(&mut self); /// Extract the recorded proof. From de341c130b09f93f7e16531008f40393981e9926 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 15 Jun 2020 15:16:22 +0200 Subject: [PATCH 178/185] restore doc test --- client/basic-authorship/src/lib.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 02b4fb366320b..63020c0e68af7 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -22,13 +22,10 @@ //! //! ``` //! # use sc_basic_authorship::ProposerFactory; -//! # use sp_consensus::{Environment, Proposer, RecordProof, StorageProofKind}; +//! # use sp_consensus::{Environment, Proposer, RecordProof}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; -//! # use substrate_test_runtime_client::{ -//! # runtime::{Extrinsic, Transfer}, AccountKeyring, -//! # DefaultTestClientBuilderExt, TestClientBuilderExt, -//! # }; +//! # use substrate_test_runtime_client::{self, runtime::{Extrinsic, Transfer}, AccountKeyring}; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; //! # let client = Arc::new(substrate_test_runtime_client::new()); //! # let txpool = Arc::new(BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client.clone())), None).0); @@ -49,7 +46,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes(StorageProofKind::Flat), +//! RecordProof::Yes, //! ); //! //! // We wait until the proposition is performed. From f9474f62c7e3efd79c77519cffc9a907597d6ae9 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jun 2020 11:43:02 +0200 Subject: [PATCH 179/185] Expose proof from in memory backend type. --- client/api/src/cht.rs | 9 +- client/api/src/in_mem.rs | 4 +- client/db/src/storage_cache.rs | 92 ++++++++++--------- client/finality-grandpa/src/tests.rs | 10 +- client/light/src/backend.rs | 3 +- client/light/src/call_executor.rs | 3 +- client/light/src/fetcher.rs | 3 +- client/light/src/lib.rs | 2 + client/service/test/src/client/mod.rs | 6 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 5 +- primitives/api/src/lib.rs | 2 +- .../state-machine/src/changes_trie/build.rs | 6 +- primitives/state-machine/src/ext.rs | 2 +- primitives/state-machine/src/lib.rs | 5 +- .../state-machine/src/overlayed_changes.rs | 2 +- primitives/state-machine/src/testing.rs | 4 +- 16 files changed, 87 insertions(+), 71 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 193ae43148b44..4f2834f9bad15 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,9 +32,8 @@ use sp_trie; use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ - backend::Backend as StateBackend, SimpleProof as StorageProof, + backend::Backend as StateBackend, SimpleProof, InMemoryBackend, prove_read_on_proof_backend, read_proof_check, read_proof_check_on_proving_backend, - SimpleProof, InMemoryBackend, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -106,7 +105,7 @@ pub fn build_proof( cht_num: Header::Number, blocks: BlocksI, hashes: HashesI -) -> ClientResult +) -> ClientResult where Header: HeaderT, Hasher: hash_db::Hasher, @@ -118,7 +117,7 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); let proof_backend = storage.as_proof_backend() .expect("InMemoryState::as_proof_backend always returns Some; qed"); prove_read_on_proof_backend( @@ -132,7 +131,7 @@ pub fn check_proof( local_root: Header::Hash, local_number: Header::Number, remote_hash: Header::Hash, - remote_proof: StorageProof, + remote_proof: SimpleProof, ) -> ClientResult<()> where Header: HeaderT, diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 56825420931d4..c7cf2602e7b10 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -29,7 +29,7 @@ use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; use sp_runtime::{Justification, Storage}; use sp_state_machine::{ - ChangesTrieTransaction, InMemoryBackend, backend::Backend as StateBackend, StorageCollection, + ChangesTrieTransaction, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection, }; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; @@ -44,6 +44,8 @@ use crate::{ leaves::LeafSet, }; +type InMemoryBackend = sp_state_machine::InMemoryBackend; + struct PendingBlock { block: StoredBlock, state: NewBlockState, diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index a51a13ecbc360..08213be8310c7 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -890,10 +890,13 @@ impl SyncingCachingState { mod tests { use super::*; use sp_runtime::{ - traits::BlakeTwo256, testing::{H256, Block as RawBlock, ExtrinsicWrapper}, }; - use sp_state_machine::InMemoryBackend; + + type InMemoryBackend = sp_state_machine::InMemoryBackend< + sp_runtime::traits::BlakeTwo256, + sp_state_machine::SimpleProof, + >; type Block = RawBlock>; @@ -915,7 +918,7 @@ mod tests { // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] // state [ 5 5 4 3 2 2 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); @@ -930,14 +933,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h0), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h0), ); @@ -952,7 +955,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1b), ); @@ -967,7 +970,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1a), ); @@ -982,35 +985,35 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2a), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h3a), ); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1a), ); assert!(s.storage(&key).unwrap().is_none()); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); assert!(s.storage(&key).unwrap().is_none()); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1b), ); @@ -1019,7 +1022,7 @@ mod tests { // reorg to 3b // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); @@ -1033,7 +1036,7 @@ mod tests { true, ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h3a), ); @@ -1054,7 +1057,7 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); @@ -1069,14 +1072,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); @@ -1091,7 +1094,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); @@ -1106,7 +1109,7 @@ mod tests { ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2a), ); @@ -1126,21 +1129,21 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2a), ); @@ -1155,14 +1158,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h2b), ); @@ -1177,7 +1180,7 @@ mod tests { ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h3a), ); @@ -1191,7 +1194,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), + InMemoryBackend::default(), shared.clone(), Some(root_parent.clone()), ); let key = H256::random()[..].to_vec(); @@ -1229,7 +1232,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent), ); @@ -1273,7 +1276,7 @@ mod tests { let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(root_parent.clone()), ); @@ -1288,7 +1291,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h0), ); @@ -1303,7 +1306,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); @@ -1326,7 +1329,7 @@ mod tests { s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), shared.clone(), Some(h1), ); @@ -1341,11 +1344,12 @@ mod qc { use quickcheck::{quickcheck, TestResult, Arbitrary}; use super::*; - use sp_runtime::{ - traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, - }; - use sp_state_machine::InMemoryBackend; + use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; + + type InMemoryBackend = sp_state_machine::InMemoryBackend< + sp_runtime::traits::BlakeTwo256, + sp_state_machine::SimpleProof, + >; type Block = RawBlock>; @@ -1472,22 +1476,22 @@ mod qc { } } - fn head_state(&self, hash: H256) -> CachingState, Block> { + fn head_state(&self, hash: H256) -> CachingState { CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(hash), ) } - fn canon_head_state(&self) -> CachingState, Block> { + fn canon_head_state(&self) -> CachingState { self.head_state(self.canon.last().expect("Expected to be one commit").hash) } fn mutate_static( &mut self, action: Action, - ) -> CachingState, Block> { + ) -> CachingState { self.mutate(action).expect("Expected to provide only valid actions to the mutate_static") } @@ -1506,7 +1510,7 @@ mod qc { fn mutate( &mut self, action: Action, - ) -> Result, Block>, ()> { + ) -> Result, ()> { let state = match action { Action::Fork { depth, hash, changes } => { let pos = self.canon.len() as isize - depth as isize; @@ -1543,7 +1547,7 @@ mod qc { }; let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(parent), ); @@ -1582,7 +1586,7 @@ mod qc { } let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(parent_hash), ); @@ -1629,7 +1633,7 @@ mod qc { self.canon.push(node); let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::default(), self.shared.clone(), Some(fork_at), ); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 78ace05ac6e27..9aa06b737dc7a 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -52,9 +52,9 @@ use finality_proof::{ use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; use sc_consensus::LongestChain; -use sp_state_machine::SimpleProof as StorageProof; +use sp_state_machine::SimpleProof; -type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; +type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; type PeerData = Mutex< @@ -247,9 +247,9 @@ impl AuthoritySetForFinalityProver for TestApi { Ok(self.genesis_authorities.clone()) } - fn prove_authorities(&self, block: &BlockId) -> Result { + fn prove_authorities(&self, block: &BlockId) -> Result { let authorities = self.authorities(block)?; - let backend = >>::from(vec![ + let backend = , SimpleProof>>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); let proof = prove_read(backend, vec![b"authorities"]) @@ -263,7 +263,7 @@ impl AuthoritySetForFinalityChecker for TestApi { &self, _hash: ::Hash, header: ::Header, - proof: StorageProof, + proof: SimpleProof, ) -> Result { let results = read_proof_check::>, HashFor, _>( *header.state_root(), proof, vec![b"authorities"] diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 73983874d3eb6..c830833b152f7 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -29,7 +29,7 @@ use sp_core::ChangesTrieConfiguration; use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ - backend::{Backend as StateBackend, RecordBackendFor}, InMemoryBackend, ChangesTrieTransaction, + backend::{Backend as StateBackend, RecordBackendFor}, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; @@ -49,6 +49,7 @@ use sc_client_api::{ }; use super::blockchain::Blockchain; use hash_db::Hasher; +use super::InMemoryBackend; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index 5eece98f4616f..082226f8b7980 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -30,8 +30,9 @@ use sp_runtime::{ use sp_externalities::Extensions; use sp_state_machine::{ self, OverlayedChanges, ExecutionStrategy, execution_proof_check_on_proof_backend, - ExecutionManager, CloneableSpawn, InMemoryBackend, + ExecutionManager, CloneableSpawn, }; +use super::InMemoryBackend; use sp_state_machine::backend::{Backend as StateBackend, ProofRawFor}; use hash_db::Hasher; use sp_state_machine::{SimpleProof as StorageProof, MergeableProof}; diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 436be0aa8840b..7f85bf76455f4 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -33,8 +33,9 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, CloneableSpawn, BackendProof, InMemoryBackend, + read_child_proof_check, CloneableSpawn, BackendProof, }; +use super::InMemoryBackend; pub use sp_state_machine::{SimpleProof as StorageProof, ProofCommon}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs index deea642bd39d0..367663adcdd37 100644 --- a/client/light/src/lib.rs +++ b/client/light/src/lib.rs @@ -30,6 +30,8 @@ pub mod fetcher; pub use {backend::*, blockchain::*, call_executor::*, fetcher::*}; +type InMemoryBackend = sp_state_machine::InMemoryBackend; + /// Create an instance of fetch data checker. pub fn new_fetch_checker>( blockchain: Arc>, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 2124f0ced4122..dae7cef535adf 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -29,7 +29,7 @@ use substrate_test_runtime_client::{ BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, }; use sc_client_api::{ - StorageProvider, BlockBackend, in_mem, BlockchainEvents, + StorageProvider, BlockBackend, in_mem, BlockchainEvents, SimpleProof, }; use sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}; use sc_block_builder::BlockBuilderProvider; @@ -142,7 +142,7 @@ pub fn prepare_client_with_key_changes() -> ( } fn construct_block( - backend: &InMemoryBackend, + backend: &InMemoryBackend, number: BlockNumber, parent_hash: Hash, state_root: Hash, @@ -217,7 +217,7 @@ fn construct_block( (vec![].and(&Block { header, extrinsics: transactions }), hash) } -fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { +fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { construct_block( backend, 1, diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 00a35a9bd704a..4c8e6abb6eb7f 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -71,7 +71,10 @@ fn implement_common_api_traits( } impl #crate_::ApiExt<#block_type> for #self_ty { - type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; + type StateBackend = #crate_::InMemoryBackend< + #crate_::HashFor<#block_type>, + #crate_::SimpleProof, + >; fn map_api_result std::result::Result, R, E>( &self, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 87d56f5c16677..e94c570aca65f 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -40,7 +40,7 @@ extern crate self as sp_api; #[cfg(feature = "std")] pub use sp_state_machine::{ OverlayedChanges, ProofCommon, backend::Backend as StateBackend, ChangesTrieState, InMemoryBackend, - ProofInput, backend::{ProofRawFor, RecProofBackend}, RecordableProof, + ProofInput, backend::{ProofRawFor, RecProofBackend}, RecordableProof, SimpleProof, }; #[doc(hidden)] #[cfg(feature = "std")] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index fde7e7b134c87..14c55d724049f 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -331,20 +331,20 @@ fn prepare_digest_input<'a, H, Number>( #[cfg(test)] mod test { use sp_core::Blake2Hasher; - use crate::InMemoryBackend; + use crate::{InMemoryBackend, SimpleProof}; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use super::*; fn prepare_for_build(zero: u64) -> ( - InMemoryBackend, + InMemoryBackend, InMemoryStorage, OverlayedChanges, Configuration, ) { let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = vec![ + let backend: InMemoryBackend<_, _> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), (vec![102], vec![255]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 7e805250e726a..c3ed94f5c4a44 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -673,7 +673,7 @@ mod tests { }, InMemoryBackend, }; - type TestBackend = InMemoryBackend; + type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; fn prepare_overlay_with_changes() -> OverlayedChanges { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 9cc3df17ae304..f80fb42fdd3e1 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -94,9 +94,10 @@ pub type ChangesTrieTransaction = ( ); /// Trie backend with in-memory storage. -pub type InMemoryBackend = TrieBackend, H, SimpleProof>; +pub type InMemoryBackend = TrieBackend, H, P>; /// Trie backend with in-memory storage and choice of proof. +/// TODO EMCH replace by InMemoryBackend when not specific to check pub type InMemoryProofCheckBackend = TrieBackend, H, P>; /// Trie backend with in-memory storage and choice of proof running over @@ -1058,7 +1059,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from(initial); let backend = state.as_proof_backend().unwrap(); let mut overlay = OverlayedChanges::default(); diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 69f7a54233304..628be1088e0d7 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -834,7 +834,7 @@ mod tests { (b"dogglesworth".to_vec(), b"catXXX".to_vec()), (b"doug".to_vec(), b"notadog".to_vec()), ].into_iter().collect(); - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges { committed: vec![ (b"dog".to_vec(), Some(b"puppy".to_vec()).into()), diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 71124a68bb5cf..a6d7ba527c631 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -21,7 +21,7 @@ use std::any::{Any, TypeId}; use codec::Decode; use hash_db::Hasher; use crate::{ - backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, + backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, StorageKey, StorageValue, changes_trie::{ Configuration as ChangesTrieConfiguration, @@ -40,6 +40,8 @@ use sp_core::{ use codec::Encode; use sp_externalities::{Extensions, Extension}; +type InMemoryBackend = crate::InMemoryBackend; + /// Simple HashMap-based Externalities impl. pub struct TestExternalities where From 10ec40857298161543f1513fceeda797958e146e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jun 2020 14:39:29 +0200 Subject: [PATCH 180/185] line breaks. --- client/light/src/call_executor.rs | 4 ++- client/rpc/src/state/mod.rs | 3 +- client/rpc/src/state/state_full.rs | 3 +- client/service/src/chain_ops.rs | 3 +- client/service/test/src/client/light.rs | 21 ++++++++----- client/service/test/src/client/mod.rs | 10 ++++--- .../state-machine/src/changes_trie/mod.rs | 13 ++++++-- primitives/state-machine/src/lib.rs | 30 +++++-------------- .../state-machine/src/proving_backend.rs | 12 ++++++-- 9 files changed, 58 insertions(+), 41 deletions(-) diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index 082226f8b7980..813543b19da64 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -118,7 +118,9 @@ impl CallExecutor for initialize_block: InitializeBlock<'a, Block>, _manager: ExecutionManager, native_call: Option, - _recorder: Option<&RefCell>::State, Block>>>, + _recorder: Option<&RefCell< + ProofRecorder<>::State, Block> + >>, extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { // there's no actual way/need to specify native/wasm execution strategy on light node diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 374b846c3f322..0e98f05546613 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -175,7 +175,8 @@ pub fn new_full( where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + Client: ExecutorProvider + StorageProvider + + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 65e1a842f3d36..e240f5c183b94 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -218,7 +218,8 @@ impl FullState impl StateBackend for FullState where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + Client: ExecutorProvider + StorageProvider + + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index f68a9e41c0efd..ec87a5ce62ebb 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -35,7 +35,8 @@ use sp_consensus::{ import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, }; use sc_executor::{NativeExecutor, NativeExecutionDispatch}; -use sp_core::storage::{StorageKey, ChildType, ChildInfo, Storage, StorageChild, StorageMap, PrefixedStorageKey}; +use sp_core::storage::{StorageKey, ChildType, ChildInfo, Storage, StorageChild, StorageMap, + PrefixedStorageKey}; use sc_client_api::{StorageProvider, BlockBackend, UsageProvider}; use std::{io::{Read, Write, Seek}, pin::Pin, collections::HashMap}; diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 679ce4c115cb4..a529b57971618 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -233,7 +233,9 @@ impl CallExecutor for DummyCallExecutor { _initialize_block: InitializeBlock<'a, Block>, _execution_manager: ExecutionManager, _native_call: Option, - _proof_recorder: Option<&RefCell>::State, Block>>>, + _proof_recorder: Option<&RefCell< + ProofRecorder<>::State, Block> + >>, _extensions: Option, ) -> ClientResult> where ExecutionManager: Clone { unreachable!() @@ -577,12 +579,17 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::

{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_proof(&RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, remote_read_proof).unwrap() + .remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], + heap_pages as u8, + ); } #[test] diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index dae7cef535adf..158e7d1b85ce1 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -18,7 +18,7 @@ use parity_scale_codec::{Encode, Decode, Joiner}; use sc_executor::native_executor_instance; -use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend}; +use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy}; use substrate_test_runtime_client::{ prelude::*, runtime::{ @@ -29,7 +29,7 @@ use substrate_test_runtime_client::{ BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, }; use sc_client_api::{ - StorageProvider, BlockBackend, in_mem, BlockchainEvents, SimpleProof, + StorageProvider, BlockBackend, in_mem, BlockchainEvents, }; use sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}; use sc_block_builder::BlockBuilderProvider; @@ -56,6 +56,8 @@ use hex_literal::hex; mod light; mod db; +type InMemoryBackend = sp_state_machine::InMemoryBackend; + native_executor_instance!( Executor, substrate_test_runtime_client::runtime::api::dispatch, @@ -142,7 +144,7 @@ pub fn prepare_client_with_key_changes() -> ( } fn construct_block( - backend: &InMemoryBackend, + backend: &InMemoryBackend, number: BlockNumber, parent_hash: Hash, state_root: Hash, @@ -217,7 +219,7 @@ fn construct_block( (vec![].and(&Block { header, extrinsics: transactions }), hash) } -fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { +fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { construct_block( backend, 1, diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 75145d9da6c54..028386364f2e2 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -167,10 +167,19 @@ pub trait Storage: RootsStorage { /// Changes trie storage -> trie backend essence adapter. pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H, N> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> + where + H: Hasher, + N: BlockNumber, +{ type Overlay = sp_trie::MemoryDB; - fn get(&self, _child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + _child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index f80fb42fdd3e1..96ce6ff095d51 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -835,6 +835,7 @@ mod tests { use sp_runtime::traits::BlakeTwo256; use sp_trie::{Layout, SimpleProof, SimpleFullProof, BackendProof, FullBackendProof}; + type TestCheckBackend

= InMemoryProofCheckBackend; type CompactProof = sp_trie::CompactProof>; type CompactFullProof = sp_trie::CompactFullProof>; type QueryPlanProof = sp_trie::QueryPlanProof>; @@ -1035,7 +1036,7 @@ mod tests { ).unwrap(); // check proof locally - let local_result = execution_proof_check::, BlakeTwo256, u64, _>( + let local_result = execution_proof_check::, BlakeTwo256, u64, _>( remote_root, remote_proof.into(), &mut Default::default(), @@ -1314,7 +1315,8 @@ mod tests { let remote_backend = trie_backend::tests::test_trie_proof::(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_root_child = remote_backend.child_storage_root(child_info, std::iter::empty()).0; - let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]).unwrap(); + let (recorder, root_input) = prove_read_for_query_plan_check(remote_backend, &[b"value2"]) + .unwrap(); let mut root_map = ChildrenProofMap::default(); root_map.insert(ChildInfo::top_trie().proof_info(), remote_root.encode()); assert!(ProofInput::ChildTrieRoots(root_map) == root_input); @@ -1415,20 +1417,12 @@ mod tests { let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::< - InMemoryProofCheckBackend, - BlakeTwo256, - _, - >( + let local_result1 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::< - InMemoryProofCheckBackend, - BlakeTwo256, - _, - >( + let local_result2 = read_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), &[&[0xff]], @@ -1448,21 +1442,13 @@ mod tests { child_info, &[b"value3"], ).unwrap(); - let local_result1 = read_child_proof_check::< - InMemoryProofCheckBackend, - BlakeTwo256, - _, - >( + let local_result1 = read_child_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), child_info, &[b"value3"], ).unwrap(); - let local_result2 = read_child_proof_check::< - InMemoryProofCheckBackend, - BlakeTwo256, - _, - >( + let local_result2 = read_child_proof_check::, BlakeTwo256, _>( remote_root, remote_proof.clone().into(), child_info, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 66790399a60c0..ef04fc628d397 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -220,7 +220,12 @@ impl, H: Hasher, R: RecordBackend> TrieBackendStorag { type Overlay = S::Overlay; - fn get(&self, child_info: &ChildInfo, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + child_info: &ChildInfo, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { if let Some(v) = self.proof_recorder.read().get(child_info, key) { return Ok(v.clone()); } @@ -531,7 +536,10 @@ mod tests { let proof = proving.extract_proof().unwrap(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof.into()).unwrap(); + let proof_check = create_proof_check_backend::( + in_memory_root.into(), + proof.into(), + ).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); } From 7c789c9290cc6bfa8b7a7f6241da703d126909f5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 16 Jun 2020 17:25:06 +0200 Subject: [PATCH 181/185] few style fix --- client/finality-grandpa/src/tests.rs | 20 ++++++++++---------- client/light/src/call_executor.rs | 2 ++ client/network/src/protocol/message.rs | 2 +- client/rpc/src/state/state_full.rs | 3 ++- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 9aa06b737dc7a..aa74636d0dd25 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -57,15 +57,15 @@ use sp_state_machine::SimpleProof; type ProofCheckBackend = sp_state_machine::InMemoryProofCheckBackend; type PeerData = -Mutex< -Option< -LinkHalf< -Block, -PeersFullClient, -LongestChain -> -> ->; + Mutex< + Option< + LinkHalf< + Block, + PeersFullClient, + LongestChain + > + > + >; type GrandpaPeer = Peer; struct GrandpaTestNet { @@ -254,7 +254,7 @@ impl AuthoritySetForFinalityProver for TestApi { ]); let proof = prove_read(backend, vec![b"authorities"]) .expect("failure proving read from in-memory storage backend"); - Ok(proof) + Ok(proof) } } diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index 813543b19da64..5c6bd0c44cb91 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -147,6 +147,8 @@ impl CallExecutor for initialize_block, ExecutionManager::NativeWhenPossible, native_call, + // we are not passing the recorder at it would invole some additional + // type constraint when the client do not support proving None, extensions, ).map_err(|e| ClientError::Execution(Box::new(e.to_string()))), diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 703fb5b4a56c4..07cfe00b8869a 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -29,7 +29,7 @@ pub use self::generic::{ FromBlock, RemoteReadChildRequest, Roles, }; -/// Forme storage proof type, to be replace by +/// Former storage proof type, to be replace by /// `use sc_client_api::StorageProof`; type StorageProof = Vec>; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index e240f5c183b94..d0311a8632854 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -364,7 +364,8 @@ impl StateBackend for FullState Date: Wed, 17 Jun 2020 10:01:01 +0200 Subject: [PATCH 182/185] Fix trait issue from merge. --- client/api/src/lib.rs | 3 ++- client/service/src/builder.rs | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 15170c4d84dd1..22d39648b60ef 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -37,7 +37,8 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{ProofCommon, SimpleProof, ExecutionStrategy, CloneableSpawn, ProofNodes}; +pub use sp_state_machine::{ProofCommon, SimpleProof, ExecutionStrategy, CloneableSpawn, + ProofNodes, BackendProof}; /// Usage Information Provider interface /// diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 3298d0b6b8e7c..93951dafeab99 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1384,6 +1384,8 @@ fn gen_handler( as ProvideRuntimeApi>::Api: sp_session::SessionKeys + sp_api::Metadata, + // This constraint should be lifted when client get generic over StateBackend and Proof + TBackend::State: StateBackend, StorageProof = SimpleProof>, { use sc_rpc::{chain, state, author, system, offchain}; @@ -1480,6 +1482,8 @@ fn build_network( TExPool: MaintainedTransactionPool::Hash> + 'static, TBackend: sc_client_api::backend::Backend + 'static, TImpQu: ImportQueue + 'static, + // This constraint should be lifted when client get generic over StateBackend and Proof + TBackend::State: StateBackend, StorageProof = SimpleProof>, { let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { imports_external_transactions: !matches!(config.role, Role::Light), From 53988c4faebe0bf8b30d3b384151d4d2c0b6ddfe Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 17 Jun 2020 10:17:33 +0200 Subject: [PATCH 183/185] New lines break (from merge) --- client/finality-grandpa/src/finality_proof.rs | 6 ++++-- client/rpc/src/state/mod.rs | 3 ++- client/rpc/src/state/state_full.rs | 3 ++- client/service/test/src/client/light.rs | 11 ++++++----- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index e417e5f228313..c31bb95d7bde7 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -53,7 +53,8 @@ use sp_runtime::{ }; use sp_core::storage::StorageKey; use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, GRANDPA_AUTHORITIES_KEY}; +use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, + GRANDPA_AUTHORITIES_KEY}; use crate::justification::GrandpaJustification; use crate::VoterSet; @@ -116,7 +117,8 @@ pub trait AuthoritySetForFinalityChecker: Send + Sync { } /// FetchChecker-based implementation of AuthoritySetForFinalityChecker. -impl AuthoritySetForFinalityChecker for Arc> { +impl AuthoritySetForFinalityChecker for Arc> + where Block: BlockT { fn check_authorities_proof( &self, hash: Block::Hash, diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 0e98f05546613..b309e1a1da701 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -40,7 +40,8 @@ use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; pub use sc_rpc_api::child_state::*; -use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, ProofProvider, SimpleProof}; +use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend, + ProofProvider, SimpleProof}; use sp_blockchain::{HeaderMetadata, HeaderBackend}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index d0311a8632854..f8e17d22c3545 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -26,7 +26,8 @@ use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::r use sc_rpc_api::state::ReadProof; use sc_client_api::backend::Backend; -use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend}; +use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, + CachedHeaderMetadata, HeaderBackend}; use sc_client_api::BlockchainEvents; use sp_core::{ Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index a529b57971618..9b141113a562d 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -616,11 +616,12 @@ fn storage_child_read_proof_is_generated_and_checked() { #[test] fn header_proof_is_generated_and_checked() { let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::

{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); + assert_eq!((&local_checker as &dyn FetchChecker) + .check_header_proof(&RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); } #[test] From 996efa46537872547e4f9b88c769b0243ea26957 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 17 Jun 2020 11:57:08 +0200 Subject: [PATCH 184/185] doc update --- primitives/state-machine/src/backend.rs | 33 +++-- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 10 +- .../state-machine/src/proving_backend.rs | 21 +-- primitives/state-machine/src/trie_backend.rs | 8 +- primitives/trie/src/lib.rs | 2 +- primitives/trie/src/storage_proof/compact.rs | 6 +- primitives/trie/src/storage_proof/mod.rs | 124 ++++++------------ primitives/trie/src/storage_proof/multiple.rs | 4 +- 9 files changed, 79 insertions(+), 131 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 783aeb29eecc0..ea829539598ba 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -23,13 +23,13 @@ use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}}; use crate::{UsageInfo, StorageKey, StorageValue, StorageCollection}; use sp_trie::{ProofInput, BackendProof}; -/// Access the state of the proof backend of a backend. +/// Access the state of the recording proof backend of a backend. pub type RecordBackendFor = sp_trie::RecordBackendFor<>::StorageProof, H>; /// Access the raw proof of a backend. pub type ProofRawFor = <>::StorageProof as BackendProof>::ProofRaw; -/// Access the state of the proof backend of a backend. +/// Access the proof of a backend. pub type ProofFor = >::StorageProof; /// A state backend is used to read state data and can have changes committed @@ -43,13 +43,13 @@ pub trait Backend: Sized + std::fmt::Debug { /// Storage changes to be applied if committing type Transaction: Consolidate + Default + Send; - /// The actual proof produced. + /// Proof to use with this backend. type StorageProof: BackendProof; - /// Type of backend for recording proof. + /// Associated backend for recording proof. type RecProofBackend: RecProofBackend; - /// Type of backend for using a proof. + /// Associated backend for using a proof. type ProofCheckBackend: ProofCheckBackend; /// Get keyed storage or None if there is nothing associated. @@ -171,8 +171,7 @@ pub trait Backend: Sized + std::fmt::Debug { } /// Try convert into a recording proof backend from previous recording state. - /// We can optionally use a previous proof backend to avoid having to merge - /// proof later. + /// Using a previous proof backend avoids a costier merge of proof later. fn from_previous_rec_state( self, previous: RecordBackendFor, @@ -247,25 +246,33 @@ pub trait GenesisStateBackend: Backend fn new(storage: sp_core::storage::Storage) -> Self; } -/// Backend used to register a proof record. +/// Backend used to record a proof. pub trait RecProofBackend: crate::backend::Backend where H: Hasher, { /// Extract proof after running operation to prove. + /// The proof extracted is raw and can be merge before + /// being converted into final proof format. fn extract_proof(&self) -> Result, Box>; - /// Get current recording state. + /// Extract current recording state. fn extract_recorder(self) -> (RecordBackendFor, ProofInput); - /// Extract from the state and input. + /// Extract proof from a recording state. fn extract_proof_rec( recorder_state: &RecordBackendFor, input: ProofInput, - ) -> Result, Box>; + ) -> Result, Box> { + use sp_trie::RecordableProof; + <>::ProofRaw>::extract_proof( + recorder_state, + input, + ).map_err(|e| Box::new(e) as Box) + } } -/// Backend used to utilize a proof. +/// Backend used to run a proof. pub trait ProofCheckBackend: Sized + crate::backend::Backend where H: Hasher, @@ -367,7 +374,7 @@ impl<'a, T, H> Backend for &'a T _previous: RecordBackendFor, _input: ProofInput, ) -> Option { - // cannot move out of reference, consider cloning when needed. + // Cannot move out of reference, consider cloning if needed. None } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ff31eae9822fd..7f48b8196caa7 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -66,7 +66,7 @@ where new_in_mem_proof::() } -/// Create a new empty instance of in-memory backend, specifying proof type. +/// Create a new empty instance of in-memory backend, for a parameterized proof type. pub fn new_in_mem_proof() -> TrieBackend, H, P> where H::Out: Codec + Ord, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 96ce6ff095d51..f578b42799dc2 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -96,12 +96,10 @@ pub type ChangesTrieTransaction = ( /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H, P>; -/// Trie backend with in-memory storage and choice of proof. -/// TODO EMCH replace by InMemoryBackend when not specific to check +/// Proof check trie backend with in-memory storage. pub type InMemoryProofCheckBackend = TrieBackend, H, P>; -/// Trie backend with in-memory storage and choice of proof running over -/// separate child backends. +/// Proof check trie backend with in-memory storage using separate child backends. pub type InMemoryFullProofCheckBackend = TrieBackend>, H, P>; /// Strategy for executing a call into the runtime. @@ -649,7 +647,6 @@ where Ok(proof_backend.extract_recorder()) } - /// Generate child storage read proof. pub fn prove_child_read( backend: B, @@ -745,8 +742,6 @@ where Ok(proof_backend.extract_recorder()) } - - /// Check storage read proof, generated by `prove_read` call. pub fn read_proof_check( root: H::Out, @@ -1508,7 +1503,6 @@ mod tests { assert_eq!(local_result2, false); } - #[test] fn child_storage_uuid() { diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ef04fc628d397..2c75d00e6bdd3 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -184,8 +184,10 @@ impl ProvingBackend H::Out: Codec, P: BackendProof, { - /// Create new proving backend with the given recorder. - pub fn from_backend_with_recorder( + /// Create new proving backend from a given recorder. + /// This does not manage root registration and can + /// leave new recorder in a inconsistent state. + pub(crate) fn from_backend_with_recorder( backend: S, root: H::Out, proof_recorder: RecordBackendFor, @@ -263,16 +265,6 @@ impl RecProofBackend for ProvingBackend let recorder = self.trie_backend.into_storage().proof_recorder.into_inner(); (recorder, input) } - - fn extract_proof_rec( - recorder_state: &RecordBackendFor, - input: ProofInput, - ) -> Result, Box> { - <>::ProofRaw>::extract_proof( - recorder_state, - input, - ).map_err(|e| Box::new(e) as Box) - } } impl Backend for ProvingBackend @@ -401,7 +393,7 @@ impl Backend for ProvingBackend } } -/// Create flat proof check backend. +/// Create proof check backend. pub fn create_proof_check_backend( root: H::Out, proof: P, @@ -420,7 +412,8 @@ where } } -/// Create proof check backend. +/// Create proof check backend with different backend for each +/// child trie. pub fn create_full_proof_check_backend( root: H::Out, proof: P, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index db8fb1d4ef83e..f00b357517590 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -48,7 +48,7 @@ impl, H: Hasher, P> TrieBackend where H::Out: } /// Create a trie backend that also record visited trie roots. - /// to pack proofs and does small caching of child trie root)). + /// Visited trie roots allow packing proofs and does cache child trie roots. pub fn new_with_roots(storage: S, root: H::Out) -> Self { let register_roots = Some(RwLock::new(Default::default())); TrieBackend { @@ -76,9 +76,11 @@ impl, H: Hasher, P> TrieBackend where H::Out: ProofInput::None } } + /// Set previously registered roots. - /// Return false if conflict. - pub fn push_registered_roots(&self, previous: ChildrenProofMap>) -> bool { + /// Return false if there is some conflicting information (roots should not change + /// for a given `StateMachine` instante). + pub(crate) fn push_registered_roots(&self, previous: ChildrenProofMap>) -> bool { if let Some(register_roots) = self.essence.register_roots() { let mut roots = register_roots.write(); for (child_info_proof, encoded_root) in previous { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 27a95a5c4be7d..2ee953220015e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -38,7 +38,7 @@ pub use storage_proof::{Common as ProofCommon, ChildrenProofMap, simple::ProofNo compact::FullForMerge, compact::Flat as CompactProof, simple::Full as SimpleFullProof, compact::Full as CompactFullProof, query_plan::KnownQueryPlanAndValues as QueryPlanProof, Verifiable as VerifiableProof, Input as ProofInput, InputKind as ProofInputKind, - RecordMapTrieNodes, Recordable as RecordableProof, FullBackendProof, BackendProof, + Recordable as RecordableProof, FullBackendProof, BackendProof, Mergeable as MergeableProof, RecordBackend, multiple::FlatDefault as ProofFlatDefault, multiple::StorageProofKind, multiple::MultipleStorageProof as TrieNodesStorageProof, simple::Flat as SimpleProof}; diff --git a/primitives/trie/src/storage_proof/compact.rs b/primitives/trie/src/storage_proof/compact.rs index a036fcebd2764..3261130c72bcd 100644 --- a/primitives/trie/src/storage_proof/compact.rs +++ b/primitives/trie/src/storage_proof/compact.rs @@ -58,7 +58,7 @@ impl Clone for Flat { /// Compacted proof with child trie . /// -/// This currently mainly provided for test purpose and extensibility. +/// This currently mainly provided for test purpose and extensibility. #[derive(PartialEq, Eq, Clone, Encode, Decode)] pub struct Full(ChildrenProofMap, PhantomData); @@ -72,8 +72,6 @@ impl sp_std::fmt::Debug for Full { /// which is mergeable and can be converted to compact representation. /// /// This is needed mainly for technical reasons (merge then compact proofs). -/// (though if possible user should rather use a flat record -/// backend in the different context and avoid merge). #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct FullForMerge(ChildrenProofMap<(ProofMapTrieNodes, Vec)>); @@ -399,7 +397,7 @@ impl TryInto for Full { /// Container recording trie nodes and their encoded hash. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -pub struct ProofMapTrieNodes(pub BTreeMap, DBValue>); +struct ProofMapTrieNodes(pub BTreeMap, DBValue>); impl sp_std::default::Default for ProofMapTrieNodes { fn default() -> Self { diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 346e7606ec5ea..59c1a8954e06f 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -36,10 +36,10 @@ type CodecResult = sp_std::result::Result; #[cfg(feature = "std")] #[derive(PartialEq, Eq, Clone, Debug)] pub enum Error { - /// Error produce by storage proof logic. + /// Error produced by storage proof logic. /// It is formatted in std to simplify type. Proof(&'static str), - /// Error produce by trie manipulation. + /// Error produced by trie manipulation. Trie(String), } @@ -49,9 +49,9 @@ impl std::error::Error for Error { } #[cfg(not(feature = "std"))] #[derive(PartialEq, Eq, Clone, Debug)] pub enum Error { - /// Error produce by storage proof logic. + /// Error produced by storage proof logic. Proof, - /// Error produce by trie manipulation. + /// Error produced by trie manipulation. Trie, } @@ -68,7 +68,8 @@ impl sp_std::fmt::Display for Error { #[cfg(feature = "std")] impl sp_std::convert::From> for Error { fn from(e: sp_std::boxed::Box) -> Self { - // Only trie error is build from box so we use a tiny shortcut here. + // Only trie error is build from box so we do a tiny simplification here + // by generalizing. Error::Trie(format!("{}", e)) } } @@ -114,7 +115,7 @@ const fn incompatible_type() -> Error { #[derive(Clone, Eq, PartialEq)] -/// Additional information needed for packing or unpacking storage proof. +/// Additional information needed to manage a storage proof. /// These do not need to be part of the proof but are required /// when processing the proof. pub enum Input { @@ -237,6 +238,10 @@ pub trait Common: sp_std::fmt::Debug + Sized { } /// Trait for proofs that can be merged. +/// +/// Merging can be a non negligeable additional cost. +/// So when possible, user should rather share recording context +/// than merge multiple recorded proofs. pub trait Mergeable: Common { /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input @@ -244,7 +249,7 @@ pub trait Mergeable: Common { fn merge(proofs: I) -> Self where I: IntoIterator; } -/// Trait for proofs that can be recorded against a trie backend. +/// Trait for proofs that can be recorded against a `RecordBackend`. pub trait Recordable: Common { /// Variant of enum input to use. const INPUT_KIND: InputKind; @@ -252,43 +257,47 @@ pub trait Recordable: Common { /// The data structure for recording proof entries. type RecordBackend: RecordBackend; - /// Extracts the gathered unordered encoded trie nodes. - /// Depending on `kind`, encoded trie nodes can change - /// (usually to compact the proof). + /// Extracts the gathered proof. + /// The input provided must match the kind specified by `Recordable::INPUT_KIND`. fn extract_proof(recorder: &Self::RecordBackend, input: Input) -> Result; } /// Proof that could be use as a backend to execute action -/// other a `MemoryDB`. +/// on a backend. pub trait BackendProof: Codec + Common { - /// Intermediate proof format before getting finalize + /// Intermediate proof format that is recorded + /// and mergeable. type ProofRaw: Recordable + Mergeable + Into; - /// Extract a flat trie db from the proof. - /// Fail on invalid proof content. + /// Extract a trie db from the proof. + /// This mainly allows running proof against + /// a trie backend (memorydb containing unordered + /// gathered encoded node in this case). + /// Can fail on invalid proof content. fn into_partial_db(self) -> Result>; } /// Proof that could be use as a backend to execute action -/// other one `MemoryDB` per child proofs. +/// on a backend, with a different backend per child proofs. pub trait FullBackendProof: BackendProof { - /// Extract a trie db with children info from the proof. - /// Fail on invalid proof content. + /// Extract a trie dbs with children info from the proof. + /// Can fail on invalid proof content. fn into_partial_full_db(self) -> Result>>; } -/// Trait for proofs that can use to create a partial trie backend. +/// Trait for proofs that simply provides validity information. pub trait Verifiable: Codec + Common { - /// Run proof validation when the proof allows immediate - /// verification. + /// Run proof validation, return verification result. + /// Error is returned for invalid input, or bad proof format. fn verify(self, input: &Input) -> Result; } -/// Trie encoded node recorder. -/// Note that this trait and other could use H::Out as generic parameter, -/// but currently use Hasher for code readability. +/// Trie encoded node recorder trait. +/// +/// This trait does not strictly need H as generic parameter and could use H::Out, +/// but currently use Hasher makes code more readable. pub trait RecordBackend: Send + Sync + Clone + Default { /// Access recorded value, allow using the backend as a cache. fn get(&self, child_info: &ChildInfo, key: &H::Out) -> Option>; @@ -298,15 +307,14 @@ pub trait RecordBackend: Send + Sync + Clone + Default { fn merge(&mut self, other: Self) -> bool; } -/// Records are separated by child trie, this is needed for -/// proof compaction. +/// Trie node recorder with child trie isolation, keeping child trie origin +/// is needed for proof compaction. pub struct FullRecorder(ChildrenMap>); -/// Single storage for all recoded nodes (as in +/// Trie node recorder with a single storage for all recoded nodes (as in /// state db column). -/// That this variant exists only for performance -/// (on less map access than in `Full`), but is not strictly -/// necessary. +/// This variant exists only for performance, but is not strictly necessary. +/// (`FullRecorder` cost an additional map access) pub struct FlatRecorder(RecordMapTrieNodes); impl Default for FlatRecorder { @@ -396,20 +404,6 @@ impl RecordBackend for FlatRecorder { } } -/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to -/// be traversed in any particular order. -pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, -} - -impl Iterator for StorageProofNodeIterator { - type Item = Vec; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - /// Type for storing a map of child trie proof related information. /// A few utilities methods are defined. #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] @@ -444,8 +438,8 @@ impl IntoIterator for ChildrenProofMap { } } -/// Container recording trie nodes. -pub struct RecordMapTrieNodes(HashMap>); +/// Container recording trie nodes. TODO EMCH make it a type alias. +struct RecordMapTrieNodes(HashMap>); impl sp_std::default::Default for RecordMapTrieNodes { fn default() -> Self { @@ -459,7 +453,6 @@ impl Clone for RecordMapTrieNodes { } } - impl sp_std::ops::Deref for RecordMapTrieNodes { type Target = HashMap>; @@ -483,42 +476,3 @@ impl HashDBRef for RecordMapTrieNodes { self.0.get(key).map(Option::is_some).unwrap_or(false) } } - -/// Container recording trie nodes and their encoded hash. -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct ProofMapTrieNodes(pub HashMap, DBValue>); - -impl sp_std::default::Default for ProofMapTrieNodes { - fn default() -> Self { - ProofMapTrieNodes(Default::default()) - } -} - -impl sp_std::ops::Deref for ProofMapTrieNodes { - type Target = HashMap, DBValue>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl sp_std::ops::DerefMut for ProofMapTrieNodes { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl HashDBRef for ProofMapTrieNodes - where - H::Out: Encode, -{ - fn get(&self, key: &H::Out, _prefix: hash_db::Prefix) -> Option { - let key = key.encode(); - self.0.get(&key).cloned() - } - - fn contains(&self, key: &H::Out, _prefix: hash_db::Prefix) -> bool { - let key = key.encode(); - self.0.contains_key(&key) - } -} diff --git a/primitives/trie/src/storage_proof/multiple.rs b/primitives/trie/src/storage_proof/multiple.rs index b544656a05887..7795e844318f5 100644 --- a/primitives/trie/src/storage_proof/multiple.rs +++ b/primitives/trie/src/storage_proof/multiple.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// ! Trie storage proofs allowing using different proofs. +// ! Enumeration to use different storage proofs from a single type. use super::*; use sp_std::convert::TryInto; @@ -202,7 +202,7 @@ impl Recordable for MultipleStorageProof H::Out: Codec, D: DefaultKind, { - // Actually one could ignore this if he knows its type to be non compact. + // This could be ignored in case it is knowned that the type is not compact. const INPUT_KIND: InputKind = InputKind::ChildTrieRoots; type RecordBackend = MultipleRecorder; From 45c5fcf57bb76c83cc3e3734f58bedff5da7f28e Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 17 Jun 2020 12:01:56 +0200 Subject: [PATCH 185/185] Remove todo --- primitives/trie/src/storage_proof/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/primitives/trie/src/storage_proof/mod.rs b/primitives/trie/src/storage_proof/mod.rs index 59c1a8954e06f..348026f04264a 100644 --- a/primitives/trie/src/storage_proof/mod.rs +++ b/primitives/trie/src/storage_proof/mod.rs @@ -438,7 +438,8 @@ impl IntoIterator for ChildrenProofMap { } } -/// Container recording trie nodes. TODO EMCH make it a type alias. +/// Container recording trie nodes. Only here to factor `HashDBRef` methods +/// between `FullRecorder` and `FlatRecorder`. struct RecordMapTrieNodes(HashMap>); impl sp_std::default::Default for RecordMapTrieNodes {