From 2d77ca1e512b660e8d6e43ea1ded2c94f25ba893 Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Mon, 9 Jun 2025 14:18:18 -0400 Subject: [PATCH 1/6] BlockImport impl --- internal/client/api/backend.go | 12 + .../client/consensus/common/block_import.go | 50 + .../client/consensus/grandpa/aux_schema.go | 89 ++ internal/client/consensus/grandpa/grandpa.go | 250 ++- internal/client/consensus/grandpa/import.go | 1383 +++++++++++++++++ .../client/consensus/grandpa/notification.go | 10 + .../client/utils/notification/notification.go | 15 + internal/primitives/api/api.go | 16 + .../primitives/consensus/grandpa/grandpa.go | 150 +- internal/primitives/crypto/hashing/hashing.go | 32 + internal/primitives/runtime/runtime.go | 15 + internal/primitives/version/version.go | 18 +- 12 files changed, 2035 insertions(+), 5 deletions(-) create mode 100644 internal/client/consensus/grandpa/import.go diff --git a/internal/client/api/backend.go b/internal/client/api/backend.go index 35b110fb1f..72aa11e7df 100644 --- a/internal/client/api/backend.go +++ b/internal/client/api/backend.go @@ -254,6 +254,18 @@ type AuxStore interface { GetAux(key []byte) ([]byte, error) } +// / Provides access to storage primitives +// pub trait StorageProvider> { +type StorageProvider[H runtime.Hash, N runtime.Number, Hasher runtime.Hasher[H]] interface { + /// Given a block's `Hash` and a key, return the value under the key in that block. + // fn storage( + // &self, + // hash: Block::Hash, + // key: &StorageKey, + // ) -> sp_blockchain::Result>; + Storage(hash H, key storage.StorageKey) (*storage.StorageData, error) +} + // Backend is the client backend. // // Manages the data layer. diff --git a/internal/client/consensus/common/block_import.go b/internal/client/consensus/common/block_import.go index a198bf36f2..2cf399c67d 100644 --- a/internal/client/consensus/common/block_import.go +++ b/internal/client/consensus/common/block_import.go @@ -183,3 +183,53 @@ type BlockImportParams[H runtime.Hash, N runtime.Number, E runtime.Extrinsic, He // Cached full header hash (with post-digests applied). PostHash *H } + +// / Get the full header hash (with post-digests applied). +// +// pub fn post_hash(&self) -> Block::Hash { +// if let Some(hash) = self.post_hash { +// hash +// } else { +// self.post_header().hash() +// } +// } +func (b *BlockImportParams[H, N, E, Header]) GetPostHash() H { + if b.PostHash != nil { + return *b.PostHash + } + return b.GetPostHeader().Hash() +} + +/// Get the post header. +// pub fn post_header(&self) -> Block::Header { +// if self.post_digests.is_empty() { +// self.header.clone() +// } else { +// let mut hdr = self.header.clone(); +// for digest_item in &self.post_digests { +// hdr.digest_mut().push(digest_item.clone()); +// } + +// hdr +// } +// } +func (b *BlockImportParams[H, N, E, Header]) GetPostHeader() Header { + if len(b.PostDigests) == 0 { + return b.Header.Clone().(Header) + } + hdr := b.Header.Clone().(Header) + for _, digestItem := range b.PostDigests { + hdr.DigestMut().Push(digestItem) + } + return hdr +} + +// / Check if this block contains state import action +// +// pub fn with_state(&self) -> bool { +// matches!(self.state_action, StateAction::ApplyChanges(StorageChanges::Import(_))) +// } +func (b *BlockImportParams[H, N, E, Header]) WithState() bool { + _, ok := b.StateAction.(StateActionApplyChanges) + return ok +} diff --git a/internal/client/consensus/grandpa/aux_schema.go b/internal/client/consensus/grandpa/aux_schema.go index 005e072a80..3d6ad757c4 100644 --- a/internal/client/consensus/grandpa/aux_schema.go +++ b/internal/client/consensus/grandpa/aux_schema.go @@ -7,6 +7,8 @@ import ( "fmt" "github.com/ChainSafe/gossamer/internal/client/api" + shareddata "github.com/ChainSafe/gossamer/internal/client/consensus/common/shared-data" + primitives "github.com/ChainSafe/gossamer/internal/primitives/consensus/grandpa" "github.com/ChainSafe/gossamer/internal/primitives/runtime" grandpa "github.com/ChainSafe/gossamer/pkg/finality-grandpa" "github.com/ChainSafe/gossamer/pkg/scale" @@ -21,12 +23,99 @@ var ( type writeAux func(insertions []api.KeyValue) error +type getGenesisAuthorities func() (primitives.AuthorityList, error) + // Persistent data kept between runs. type persistentData[H runtime.Hash, N runtime.Number] struct { authoritySet *SharedAuthoritySet[H, N] setState *SharedVoterSetState[H, N] } +func loadDecoded[T any](store api.AuxStore, key []byte) (*T, error) { + encodedValue, err := store.GetAux(key) + if err != nil { + return nil, err + } + if encodedValue == nil { + return nil, nil + } + + var dst T + err = scale.Unmarshal(encodedValue, &dst) + if err != nil { + return nil, err + } + return &dst, nil +} + +func loadPersistent[H runtime.Hash, N runtime.Number]( + store api.AuxStore, + genesisHash H, + genesisNumber N, + genesisAuths getGenesisAuthorities, +) (*persistentData[H, N], error) { + genesis := grandpa.HashNumber[H, N]{Hash: genesisHash, Number: genesisNumber} + makeGenesisRound := grandpa.NewRoundState[H, N] + + authSet, err := loadDecoded[AuthoritySet[H, N]](store, authoritySetKey) + if authSet != nil { + var setState voterSetState[H, N] + state, err := loadDecoded[voterSetStateVDT[H, N]](store, setStateKey) + if err != nil { + return nil, err + } + + if state != nil && state.inner != nil { + setState = state.inner + } else { + state := makeGenesisRound(genesis) + if state.PrevoteGHOST == nil { + panic("state is for completed round; completed rounds must have a prevote ghost; qed.") + } + base := state.PrevoteGHOST + setState = newVoterSetStateLive(primitives.SetID(authSet.SetID), *authSet, *base) + } + + return &persistentData[H, N]{ + authoritySet: &SharedAuthoritySet[H, N]{inner: *shareddata.NewSharedData(*authSet)}, + setState: &SharedVoterSetState[H, N]{inner: setState}, + }, nil + } + + logger.Info("👴 Loading GRANDPA authority set from genesis on what appears to be first startup") + genesisAuthorities, err := genesisAuths() + if err != nil { + return nil, err + } + genesisSet, err := NewGenesisAuthoritySet[H, N](genesisAuthorities) + if err != nil { + panic("genesis authorities is non-empty; all weights are non-zero; qed.") + } + + state := makeGenesisRound(genesis) + base := state.PrevoteGHOST + if base == nil { + panic("state is for completed round; completed rounds must have a prevote ghost; qed.") + } + + genesisState := newVoterSetStateLive(0, *genesisSet, *base) + genesisStateVDT := newVoterSetStateVDT[H, N]() + genesisStateVDT.inner = genesisState + insert := []api.KeyValue{ + {Key: authoritySetKey, Value: scale.MustMarshal(*genesisSet)}, + {Key: setStateKey, Value: scale.MustMarshal(genesisStateVDT)}, + } + err = store.InsertAux(insert, nil) + if err != nil { + return nil, err + } + + return &persistentData[H, N]{ + authoritySet: &SharedAuthoritySet[H, N]{inner: *shareddata.NewSharedData(*genesisSet)}, + setState: &SharedVoterSetState[H, N]{inner: genesisState}, + }, nil +} + // updateAuthoritySet Update the authority set on disk after a change. // // If there has just been a handoff, pass a newSet parameter that describes the handoff. set in all cases should diff --git a/internal/client/consensus/grandpa/grandpa.go b/internal/client/consensus/grandpa/grandpa.go index 8bb92d41ed..7f4af18bee 100644 --- a/internal/client/consensus/grandpa/grandpa.go +++ b/internal/client/consensus/grandpa/grandpa.go @@ -110,7 +110,7 @@ type ClientForGrandpa[ papi.ProvideRuntimeAPI[primitives.GrandpaAPI[H, N]] // api.ExecutorProvider client_common.BlockImport[H, N, E, Header] - // api.StorageProvider[H, N, Hasher] + api.StorageProvider[H, N, Hasher] } // Something that one can ask to do a block sync request. @@ -174,6 +174,254 @@ type GenesisAuthoritySetProvider interface { Get() (primitives.AuthorityList, error) } +// / Make block importer and link half necessary to tie the background voter +// / to it. +// / +// / The `justification_import_period` sets the minimum period on which +// / justifications will be imported. When importing a block, if it includes a +// / justification it will only be processed if it fits within this period, +// / otherwise it will be ignored (and won't be validated). This is to avoid +// / slowing down sync by a peer serving us unnecessary justifications which +// / aren't trivial to validate. +// pub fn block_import( +// +// client: Arc, +// justification_import_period: u32, +// genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, +// select_chain: SC, +// telemetry: Option, +// +// ) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> +// where +// +// SC: SelectChain, +// BE: Backend + 'static, +// Client: ClientForGrandpa + 'static, +// +// { +// block_import_with_authority_set_hard_forks( +// client, +// justification_import_period, +// genesis_authorities_provider, +// select_chain, +// Default::default(), +// telemetry, +// ) +// } +func BlockImport[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +]( + client ClientForGrandpa[H, N, Hasher, Header, E], + justificationImportPeriod uint32, + genesisAuthoritySetProvider GenesisAuthoritySetProvider, + selectChain common.SelectChain[H, N, Header], + // TODO: telemetry +) (*GrandpaBlockImport[H, N, Hasher, Header, E], LinkHalf[H, N, Hasher, Header, E], error) { + return blockImportWithAuthoritySetHardForks( + client, + justificationImportPeriod, + genesisAuthoritySetProvider, + selectChain, + nil, + ) +} + +// / A descriptor for an authority set hard fork. These are authority set changes +// / that are not signalled by the runtime and instead are defined off-chain +// / (hence the hard fork). +// pub struct AuthoritySetHardFork { +type AuthoritySetHardFork[H, N any] struct { + // /// The new authority set id. + // pub set_id: SetId, + SetID SetID + // /// The block hash and number at which the hard fork should be applied. + // pub block: (Block::Hash, NumberFor), + Block HashNumber[H, N] + // /// The authorities in the new set. + // pub authorities: AuthorityList, + Authorities primitives.AuthorityList + // /// The latest block number that was finalized before this authority set + // /// hard fork. When defined, the authority set change will be forced, i.e. + // /// the node won't wait for the block above to be finalized before enacting + // /// the change, and the given finalized number will be used as a base for + // /// voting. + // pub last_finalized: Option>, + LastFinalized *N +} + +// / Make block importer and link half necessary to tie the background voter to +// / it. A vector of authority set hard forks can be passed, any authority set +// / change signaled at the given block (either already signalled or in a further +// / block when importing it) will be replaced by a standard change with the +// / given static authorities. +// pub fn block_import_with_authority_set_hard_forks( +// +// client: Arc, +// justification_import_period: u32, +// genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, +// select_chain: SC, +// authority_set_hard_forks: Vec>, +// telemetry: Option, +// +// ) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> +// where +// +// SC: SelectChain, +// BE: Backend + 'static, +// Client: ClientForGrandpa + 'static, +// +// { +func blockImportWithAuthoritySetHardForks[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +]( + client ClientForGrandpa[H, N, Hasher, Header, E], + justificationImportPeriod uint32, + genesisAuthoritySetProvider GenesisAuthoritySetProvider, + selectChain common.SelectChain[H, N, Header], + authoritySetHardForks []AuthoritySetHardFork[H, N], + // TODO: telemetry +) (*GrandpaBlockImport[H, N, Hasher, Header, E], LinkHalf[H, N, Hasher, Header, E], error) { + // let chain_info = client.info(); + // let genesis_hash = chain_info.genesis_hash; + chainInfo := client.Info() + genesisHash := chainInfo.GenesisHash + + // let persistent_data = + // aux_schema::load_persistent(&*client, genesis_hash, >::zero(), { + // let telemetry = telemetry.clone(); + // move || { + // let authorities = genesis_authorities_provider.get()?; + // telemetry!( + // telemetry; + // CONSENSUS_DEBUG; + // "afg.loading_authorities"; + // "authorities_len" => ?authorities.len() + // ); + // Ok(authorities) + // } + // })?; + persistentData, err := loadPersistent[H, N](client, genesisHash, 0, func() (primitives.AuthorityList, error) { + authorities, err := genesisAuthoritySetProvider.Get() + if err != nil { + return nil, err + } + return authorities, nil + }) + if err != nil { + return nil, LinkHalf[H, N, Hasher, Header, E]{}, err + } + + _ = persistentData + + // let (voter_commands_tx, voter_commands_rx) = + // tracing_unbounded("mpsc_grandpa_voter_command", 100_000); + voterCommands := make(chan voterCommand, 100000) + + // let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); + justificationSender, justificationStream := NewGrandpaJustificationSender[H, N, Header]() + + // create pending change objects with 0 delay for each authority set hard fork. + // let authority_set_hard_forks = authority_set_hard_forks + // .into_iter() + // .map(|fork| { + // let delay_kind = if let Some(last_finalized) = fork.last_finalized { + // authorities::DelayKind::Best { median_last_finalized: last_finalized } + // } else { + // authorities::DelayKind::Finalized + // }; + + // ( + // fork.set_id, + // authorities::PendingChange { + // next_authorities: fork.authorities, + // delay: Zero::zero(), + // canon_hash: fork.block.0, + // canon_height: fork.block.1, + // delay_kind, + // }, + // ) + // }) + // .collect(); + hardForks := make([]struct { + SetID + PendingChange[H, N] + }, len(authoritySetHardForks)) + for i, fork := range authoritySetHardForks { + var kind delayKind + if fork.LastFinalized != nil { + kind = delayKindBest[N]{MedianLastFinalized: *fork.LastFinalized} + } else { + kind = delayKindFinalized{} + } + + hardForks[i] = struct { + SetID + PendingChange[H, N] + }{ + SetID: SetID(fork.SetID), + PendingChange: PendingChange[H, N]{ + NextAuthorities: fork.Authorities, + Delay: 0, + CanonHash: fork.Block.Hash, + CanonHeight: fork.Block.Number, + DelayKind: kind, + }, + } + } + + // Ok(( + // + // GrandpaBlockImport::new( + // client.clone(), + // justification_import_period, + // select_chain.clone(), + // persistent_data.authority_set.clone(), + // voter_commands_tx, + // authority_set_hard_forks, + // justification_sender.clone(), + // telemetry.clone(), + // ), + // LinkHalf { + // client, + // select_chain, + // persistent_data, + // voter_commands_rx, + // justification_sender, + // justification_stream, + // telemetry, + // }, + // + // )) + + blockImport := newGrandpaBlockImport( + client, + justificationImportPeriod, + selectChain, + persistentData.authoritySet, + voterCommands, + hardForks, + justificationSender, + ) + + linkHalf := LinkHalf[H, N, Hasher, Header, E]{ + client: client, + selectChain: selectChain, + persistentData: *persistentData, + voterCommandsRx: voterCommands, + justificationSender: justificationSender, + justificationStream: justificationStream, + } + return blockImport, linkHalf, nil +} + func globalCommunication[ H runtime.Hash, N runtime.Number, diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go new file mode 100644 index 0000000000..faf3edb1ed --- /dev/null +++ b/internal/client/consensus/grandpa/import.go @@ -0,0 +1,1383 @@ +package grandpa + +import ( + "fmt" + "sync" + + "github.com/ChainSafe/gossamer/internal/client/api" + "github.com/ChainSafe/gossamer/internal/client/api/utils" + client_common "github.com/ChainSafe/gossamer/internal/client/consensus/common" + shareddata "github.com/ChainSafe/gossamer/internal/client/consensus/common/shared-data" + "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/consensus/common" + "github.com/ChainSafe/gossamer/internal/primitives/consensus/grandpa" + "github.com/ChainSafe/gossamer/internal/primitives/crypto/hashing" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" + "github.com/ChainSafe/gossamer/internal/primitives/storage" + forktree "github.com/ChainSafe/gossamer/internal/utils/fork-tree" + "github.com/ChainSafe/gossamer/pkg/scale" +) + +// enum AppliedChanges { +type importAppliedChanges interface { + needsJustification() bool +} + +// Standard(bool), // true if the change is ready to be applied (i.e. it's a root) +type importAppliedChangesStandard bool + +// Forced(NewAuthoritySet), +type importAppliedChangesForced[H runtime.Hash, N runtime.Number] newAuthoritySet[H, N] + +// None, +type importAppliedChangesNone struct{} + +func (importAppliedChangesStandard) needsJustification() bool { + return true +} +func (importAppliedChangesForced[H, N]) needsJustification() bool { + return false +} +func (importAppliedChangesNone) needsJustification() bool { + return false +} + +// } + +type justInCase[H runtime.Hash, N runtime.Number] struct { + old AuthoritySet[H, N] + shareddata.SharedDataLocked[AuthoritySet[H, N]] +} + +// struct PendingSetChanges { +type pendingSetChanges[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], +] struct { + // just_in_case: Option<( + // + // AuthoritySet>, + // SharedDataLockedUpgradable>>, + // + // )>, + justInCase *justInCase[H, N] + // applied_changes: AppliedChanges>, + appliedChanges importAppliedChanges + // do_pause: bool, + doPause bool +} + +// revert the pending set change explicitly. +// fn revert(self) {} +func (pendingSetChanges[H, N, Hasher, Header]) revert() {} + +// fn defuse(mut self) -> (AppliedChanges>, bool) { +// self.just_in_case = None; +// let applied_changes = std::mem::replace(&mut self.applied_changes, AppliedChanges::None); +// (applied_changes, self.do_pause) +// } +func (psc *pendingSetChanges[H, N, Hasher, Header]) defuse() (importAppliedChanges, bool) { + psc.justInCase = nil + appliedChanges := psc.appliedChanges + psc.appliedChanges = importAppliedChangesNone{} + return appliedChanges, psc.doPause +} + +// impl Drop for PendingSetChanges { +// fn drop(&mut self) { +// if let Some((old_set, mut authorities)) = self.just_in_case.take() { +// *authorities.upgrade() = old_set; +// } +// } +// } +func (psc *pendingSetChanges[H, N, Hasher, Header]) drop() { + // if let Some((oldSet, mut authorities)) = self.justInCase.take() { + // *authorities.upgrade() = oldSet; + // } + if psc.justInCase != nil { + jic := psc.justInCase + psc.justInCase = nil + oldSet := jic.old + locked := jic.SharedDataLocked + *locked.MutRef() = oldSet + defer locked.Unlock() + } +} + +// / A block-import handler for GRANDPA. +// / +// / This scans each imported block for signals of changing authority set. +// / If the block being imported enacts an authority set change then: +// / - If the current authority set is still live: we import the block +// / - Otherwise, the block must include a valid justification. +// / +// / When using GRANDPA, the block import worker should be using this block import +// / object.im +// pub struct GrandpaBlockImport { +type GrandpaBlockImport[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +] struct { + // inner: Arc, + inner ClientForGrandpa[H, N, Hasher, Header, E] + // justification_import_period: u32, + justificationImportPeriod uint32 + // select_chain: SC, + selectChain common.SelectChain[H, N, Header] + // authority_set: SharedAuthoritySet>, + authoritySet *SharedAuthoritySet[H, N] + // send_voter_commands: TracingUnboundedSender>>, + sendVoterCommands chan voterCommand + // authority_set_hard_forks: + // + // Mutex>>>, + authoritySetHardForks map[H]PendingChange[H, N] + authoritySetHardForksMtx sync.Mutex + // + // justification_sender: GrandpaJustificationSender, + justificationSender GrandpaJustificationSender[H, N, Header] + // telemetry: Option, + // TODO: telemetry + // _phantom: PhantomData, +} + +// impl GrandpaBlockImport { +// pub(crate) fn new( +// inner: Arc, +// justification_import_period: u32, +// select_chain: SC, +// authority_set: SharedAuthoritySet>, +// send_voter_commands: TracingUnboundedSender>>, +// authority_set_hard_forks: Vec<(SetId, PendingChange>)>, +// justification_sender: GrandpaJustificationSender, +// telemetry: Option, +// ) -> GrandpaBlockImport { +func newGrandpaBlockImport[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +]( + inner ClientForGrandpa[H, N, Hasher, Header, E], + justificationImportPeriod uint32, + selectChain common.SelectChain[H, N, Header], + sharedAuthoritySet *SharedAuthoritySet[H, N], + sendVoterCommands chan voterCommand, + authoritySetHardForks []struct { + SetID + PendingChange[H, N] + }, + justificationSender GrandpaJustificationSender[H, N, Header], + // TODO: telemetry +) *GrandpaBlockImport[H, N, Hasher, Header, E] { + // check for and apply any forced authority set hard fork that applies + // to the *current* authority set. + // if let Some((_, change)) = authority_set_hard_forks + // .iter() + // .find(|(set_id, _)| *set_id == authority_set.set_id()) + // { + // authority_set.inner().current_authorities = change.next_authorities.clone(); + // } + for _, hardFork := range authoritySetHardForks { + setID := hardFork.SetID + if setID == SetID(sharedAuthoritySet.SetID()) { + authoritySet, unlock := sharedAuthoritySet.inner.DataMut() + // authoritySet.mtx.Lock() + authoritySet.CurrentAuthorities = hardFork.PendingChange.NextAuthorities + // authoritySet.mtx.Unlock() + unlock() + } + } + + // index authority set hard forks by block hash so that they can be used + // by any node syncing the chain and importing a block hard fork + // authority set changes. + // let authority_set_hard_forks = authority_set_hard_forks + // .into_iter() + // .map(|(_, change)| (change.canon_hash, change)) + // .collect::>(); + var authoritySetHardForksMap = make(map[H]PendingChange[H, N]) + for _, hardFork := range authoritySetHardForks { + authoritySetHardForksMap[hardFork.PendingChange.CanonHash] = hardFork.PendingChange + } + + // check for and apply any forced authority set hard fork that apply to + // any *pending* standard changes, checking by the block hash at which + // they were announced. + // { + // let mut authority_set = authority_set.inner(); + + // authority_set.pending_standard_changes = + // authority_set.pending_standard_changes.clone().map(&mut |hash, _, original| { + // authority_set_hard_forks.get(hash).cloned().unwrap_or(original) + // }); + // } + // authoritySet.mtx.Lock() + // authSet := &authoritySet.inner + authoritySet, unlock := sharedAuthoritySet.inner.DataMut() + + authoritySet.PendingStandardChanges = forktree.Map(authoritySet.PendingStandardChanges, func(hash H, _ N, original PendingChange[H, N]) PendingChange[H, N] { + if change, ok := authoritySetHardForksMap[hash]; ok { + return change + } + return original + }) + // authoritySet.mtx.Unlock() + unlock() + + // GrandpaBlockImport { + // inner, + // justification_import_period, + // select_chain, + // authority_set, + // send_voter_commands, + // authority_set_hard_forks: Mutex::new(authority_set_hard_forks), + // justification_sender, + // telemetry, + // _phantom: PhantomData, + // } + return &GrandpaBlockImport[H, N, Hasher, Header, E]{ + inner: inner, + justificationImportPeriod: justificationImportPeriod, + selectChain: selectChain, + authoritySet: sharedAuthoritySet, + sendVoterCommands: sendVoterCommands, + authoritySetHardForks: authoritySetHardForksMap, + justificationSender: justificationSender, + } +} + +// / Checks the given header for a consensus digest signalling a **standard** scheduled change and +// / extracts it. +// pub fn find_scheduled_change( +// +// header: &B::Header, +// +// ) -> Option>> { +func FindScheduledChange[ + H runtime.Hash, + N runtime.Number, + Header runtime.Header[N, H], +]( + header Header, +) *grandpa.ScheduledChange[N] { + // let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) + + // let filter_log = |log: ConsensusLog>| match log { + // ConsensusLog::ScheduledChange(change) => Some(change), + // _ => None, + // }; + filterLog := func(log grandpa.ConsensusLog) *grandpa.ScheduledChange[N] { + scheduledChange, ok := log.(grandpa.ConensusLogScheduledChange[N]) + if !ok { + return nil + } + orig := grandpa.ScheduledChange[N](scheduledChange) + return &orig + } + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + // header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) + for _, log := range header.Digest().Logs { + logVDT := runtime.DigestItemTryTo[grandpa.ConsensusLogVDT[N]](log, id) + if logVDT == nil { + continue + } + val, err := logVDT.Value() + if err != nil { + continue + } + log := val.(grandpa.ConsensusLog) + sc := filterLog(log) + if sc != nil { + return sc + } + } + return nil +} + +type ForcedChange[H runtime.Hash, N runtime.Number] struct { + MedianLastFinalized N + grandpa.ScheduledChange[N] +} + +// / Checks the given header for a consensus digest signalling a **forced** scheduled change and +// / extracts it. +// pub fn find_forced_change( +// +// header: &B::Header, +// +// ) -> Option<(NumberFor, ScheduledChange>)> { +func FindForcedChange[ + H runtime.Hash, + N runtime.Number, + Header runtime.Header[N, H], +]( + header Header, +) *ForcedChange[H, N] { + // let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) + + // let filter_log = |log: ConsensusLog>| match log { + // ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), + // _ => None, + // }; + filterLog := func(log grandpa.ConsensusLog) *ForcedChange[H, N] { + forcedChange, ok := log.(grandpa.ConsensusLogForcedChange[N]) + if !ok { + return nil + } + return &ForcedChange[H, N]{ + MedianLastFinalized: forcedChange.Delay, + ScheduledChange: forcedChange.ScheduledChange, + } + } + + // // find the first consensus digest with the right ID which converts to + // // the right kind of consensus log. + // header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) + for _, log := range header.Digest().Logs { + logVDT := runtime.DigestItemTryTo[grandpa.ConsensusLogVDT[N]](log, id) + if logVDT == nil { + continue + } + val, err := logVDT.Value() + if err != nil { + continue + } + log := val.(grandpa.ConsensusLog) + fc := filterLog(log) + if fc != nil { + return fc + } + } + return nil +} + +// impl GrandpaBlockImport +// where +// +// NumberFor: finality_grandpa::BlockNumberOps, +// BE: Backend, +// Client: ClientForGrandpa, +// Client::Api: GrandpaApi, +// for<'a> &'a Client: BlockImport, +// +// { +// // check for a new authority set change. +// fn check_new_change( +// &self, +// header: &Block::Header, +// hash: Block::Hash, +// ) -> Option>> { +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkNewChange( + header Header, + hash H, +) *PendingChange[H, N] { + // check for forced authority set hard forks + // if let Some(change) = self.authority_set_hard_forks.lock().get(&hash) { + // return Some(change.clone()) + // } + gbi.authoritySetHardForksMtx.Lock() + change, ok := gbi.authoritySetHardForks[hash] + gbi.authoritySetHardForksMtx.Unlock() + if ok { + return &change + } + + // check for forced change. + // if let Some((median_last_finalized, change)) = find_forced_change::(header) { + // return Some(PendingChange { + // next_authorities: change.next_authorities, + // delay: change.delay, + // canon_height: *header.number(), + // canon_hash: hash, + // delay_kind: DelayKind::Best { median_last_finalized }, + // }) + // } + fc := FindForcedChange[H, N](header) + if fc != nil { + return &PendingChange[H, N]{ + NextAuthorities: fc.ScheduledChange.NextAuthorities, + Delay: fc.ScheduledChange.Delay, + CanonHeight: header.Number(), + CanonHash: hash, + DelayKind: delayKindBest[N]{MedianLastFinalized: fc.MedianLastFinalized}, + } + } + + // check normal scheduled change. + // let change = find_scheduled_change::(header)?; + // Some(PendingChange { + // next_authorities: change.next_authorities, + // delay: change.delay, + // canon_height: *header.number(), + // canon_hash: hash, + // delay_kind: DelayKind::Finalized, + // }) + scheduled := FindScheduledChange[H, N](header) + return &PendingChange[H, N]{ + NextAuthorities: scheduled.NextAuthorities, + Delay: scheduled.Delay, + CanonHeight: header.Number(), + CanonHash: hash, + DelayKind: delayKindFinalized{}, + } +} + +// struct InnerGuard<'a, H, N> { +// old: Option>, +// guard: Option>>, +// } +type innerGuard[H runtime.Hash, N runtime.Number] struct { + old *AuthoritySet[H, N] + guard *shareddata.SharedDataLocked[AuthoritySet[H, N]] +} + +// impl<'a, H, N> InnerGuard<'a, H, N> { +// fn as_mut(&mut self) -> &mut AuthoritySet { +// self.guard.as_mut().expect("only taken on deconstruction; qed") +// } +func (ig *innerGuard[H, N]) asMut() *AuthoritySet[H, N] { + if ig.guard == nil { + panic("guard is nil; only taken on deconstruction; qed") + } + return ig.guard.MutRef() +} + +// fn set_old(&mut self, old: AuthoritySet) { +// if self.old.is_none() { +// // ignore "newer" old changes. +// self.old = Some(old); +// } +// } +func (ig *innerGuard[H, N]) setOld(old AuthoritySet[H, N]) { + if ig.old == nil { + // ignore "newer" old changes. + ig.old = &old + } +} + +// fn consume( +// mut self, +// ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { +// self.old +// .take() +// .map(|old| (old, self.guard.take().expect("only taken on deconstruction; qed"))) +// } +// } +type consumed[H runtime.Hash, N runtime.Number] struct { + old AuthoritySet[H, N] + shareddata.SharedDataLocked[AuthoritySet[H, N]] +} + +func (ig *innerGuard[H, N]) consume() *consumed[H, N] { + old := ig.old + ig.old = nil + if old == nil { + return nil + } + if ig.guard == nil { + panic("guard is nil; only taken on deconstruction; qed") + } + return &consumed[H, N]{ + old: *old, + SharedDataLocked: *ig.guard, + } +} + +// impl<'a, H, N> Drop for InnerGuard<'a, H, N> { +// fn drop(&mut self) { +// if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { +// *guard = old; +// } +// } +// } +func (ig *innerGuard[H, N]) drop() { + // if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { + // *guard = old; + // } + guard := ig.guard + ig.guard = nil + old := ig.old + ig.old = nil + if guard != nil && old != nil { + *ig.guard.MutRef() = *ig.old + } +} + +// fn make_authorities_changes( +// +// &self, +// block: &mut BlockImportParams, +// hash: Block::Hash, +// initial_sync: bool, +// +// ) -> Result, ConsensusError> { +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( + block *client_common.BlockImportParams[H, N, E, Header], + hash H, + initialSync bool, +) (*pendingSetChanges[H, N, Hasher, Header], error) { + // when we update the authorities, we need to hold the lock + // until the block is written to prevent a race if we need to restore + // the old authority set on error or panic. + + // let number = *(block.header.number()); + // let maybe_change = self.check_new_change(&block.header, hash); + number := block.Header.Number() + maybeChange := gbi.checkNewChange(block.Header, hash) + + // returns a function for checking whether a block is a descendent of another + // consistent with querying client directly after importing the block. + // let parent_hash = *block.header.parent_hash(); + // let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); + parentHash := block.Header.ParentHash() + isDescendentOf := utils.IsDescendantOf(gbi.inner, &utils.HashParent[H]{Hash: hash, Parent: parentHash}) + // let mut guard = InnerGuard { guard: Some(self.authority_set.inner_locked()), old: None }; + locked := gbi.authoritySet.inner.Locked() + defer locked.Unlock() + guard := innerGuard[H, N]{ + old: nil, + guard: &locked, + } + + // whether to pause the old authority set -- happens after import + // of a forced change block. + // let mut do_pause = false; + var doPause bool + + // add any pending changes. + // if let Some(change) = maybe_change { + // let old = guard.as_mut().clone(); + // guard.set_old(old); + + // if let DelayKind::Best { .. } = change.delay_kind { + // do_pause = true; + // } + + // guard + // .as_mut() + // .add_pending_change(change, &is_descendent_of) + // .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + // } + if maybeChange != nil { + change := *maybeChange + old := guard.asMut().Clone() + guard.setOld(old) + + if _, ok := change.DelayKind.(delayKindBest[N]); ok { + doPause = true + } + + err := guard.asMut().addPendingChange(change, isDescendentOf) + if err != nil { + return nil, err + } + } + + // let applied_changes = { + // let forced_change_set = guard + // .as_mut() + // .apply_forced_changes( + // hash, + // number, + // &is_descendent_of, + // initial_sync, + // self.telemetry.clone(), + // ) + // .map_err(|e| ConsensusError::ClientImport(e.to_string())) + // .map_err(ConsensusError::from)?; + var appliedChanges importAppliedChanges + forcedChangeSet, err := guard.asMut().applyForcedChanges( + hash, + number, + isDescendentOf, + ) + if err != nil { + return nil, err + } + + // if let Some((median_last_finalized_number, new_set)) = forced_change_set { + if forcedChangeSet != nil { + medianLastFinalizedNumber := forcedChangeSet.median + newSet := forcedChangeSet.set + // let new_authorities = { + // let (set_id, new_authorities) = new_set.current(); + setID, newAuthorities := newSet.current() + + // we will use the median last finalized number as a hint + // for the canon block the new authority set should start + // with. we use the minimum between the median and the local + // best finalized block. + // let best_finalized_number = self.inner.info().finalized_number; + // let canon_number = best_finalized_number.min(median_last_finalized_number); + // let canon_hash = self.inner.hash(canon_number) + // .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + // .expect( + // "the given block number is less or equal than the current best finalized number; \ + // current best finalized number must exist in chain; qed." + // ); + bestFinalizedNumber := gbi.inner.Info().FinalizedNumber + canonNumber := bestFinalizedNumber + if medianLastFinalizedNumber < bestFinalizedNumber { + canonNumber = medianLastFinalizedNumber + } + canonHash, err := gbi.inner.Hash(canonNumber) + if err != nil { + return nil, err + } + if canonHash == nil { + panic("the given block number is less or equal than the current best finalized number; current best finalized number must exist in chain; qed.") + } + + // NewAuthoritySet { + // canon_number, + // canon_hash, + // set_id, + // authorities: new_authorities.to_vec(), + // } + // }; + // let old = ::std::mem::replace(guard.as_mut(), new_set); + // guard.set_old(old); + newAuthoritySet := newAuthoritySet[H, N]{ + CanonNumber: canonNumber, + CanonHash: *canonHash, + SetID: grandpa.SetID(setID), + Authorities: newAuthorities, + } + old := guard.asMut().Clone() + guard.setOld(old) + *guard.asMut() = newSet + + // AppliedChanges::Forced(new_authorities) + appliedChanges = importAppliedChangesForced[H, N](newAuthoritySet) + } else { + // let did_standard = guard + // .as_mut() + // .enacts_standard_change(hash, number, &is_descendent_of) + // .map_err(|e| ConsensusError::ClientImport(e.to_string())) + // .map_err(ConsensusError::from)?; + + // if let Some(root) = did_standard { + // AppliedChanges::Standard(root) + // } else { + // AppliedChanges::None + // } + // } + didStandard, err := guard.asMut().EnactsStandardChange(hash, number, isDescendentOf) + if err != nil { + return nil, err + } + + if didStandard != nil { + appliedChanges = importAppliedChangesStandard(*didStandard) + } else { + appliedChanges = importAppliedChangesNone{} + } + } + + // consume the guard safely and write necessary changes. + // let just_in_case = guard.consume(); + justInCaseConsumed := guard.consume() + // if let Some((_, ref authorities)) = just_in_case { + if justInCaseConsumed != nil { + authorities := &justInCaseConsumed.SharedDataLocked + // let authorities_change = match applied_changes { + // AppliedChanges::Forced(ref new) => Some(new), + // AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. + // AppliedChanges::None => None, + // }; + var authoritiesChange *newAuthoritySet[H, N] + switch appliedChanges := appliedChanges.(type) { + case importAppliedChangesForced[H, N]: + newSet := newAuthoritySet[H, N](appliedChanges) + authoritiesChange = &newSet + case importAppliedChangesStandard: + // the change isn't actually applied yet. + case importAppliedChangesNone: + // no change + default: + panic("unreachable") + } + // crate::aux_schema::update_authority_set::( + // authorities, + // authorities_change, + // |insert| { + // block + // .auxiliary + // .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + // }, + // ); + updateAuthoritySet(authorities.Data(), authoritiesChange, func(insertions []api.KeyValue) error { + converted := make([]api.AuxDataOperation, len(insertions)) + for i, kv := range insertions { + converted[i] = api.AuxDataOperation{ + Key: kv.Key, + Data: kv.Value, + } + } + block.Auxiliary = append(block.Auxiliary, converted...) + return nil + }) + } + + // let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); + var jic *justInCase[H, N] + if justInCaseConsumed != nil { + jic = &justInCase[H, N]{ + old: justInCaseConsumed.old, + SharedDataLocked: justInCaseConsumed.SharedDataLocked, + } + } + + // Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) + return &pendingSetChanges[H, N, Hasher, Header]{ + justInCase: jic, + appliedChanges: appliedChanges, + doPause: doPause, + }, nil +} + +// /// Read current set id form a given state. +// fn current_set_id(&self, hash: Block::Hash) -> Result { +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (grandpa.SetID, error) { + // let runtime_version = self.inner.runtime_api().version(hash).map_err(|e| { + // ConsensusError::ClientImport(format!( + // "Unable to retrieve current runtime version. {}", + // e + // )) + // })?; + runtimeVersion, err := gbi.inner.RuntimeAPI().Version(hash) + if err != nil { + return 0, err + } + + var GrandpaID = [8]byte{237, 153, 197, 172, 178, 94, 237, 245} + apiVersion := runtimeVersion.APIVersion(GrandpaID) + + // if runtime_version + // .api_version(&>::ID) + // .map_or(false, |v| v < 3) + // { + if apiVersion != nil && *apiVersion < 3 { + // The new API is not supported in this runtime. Try reading directly from storage. + // This code may be removed once warp sync to an old runtime is no longer needed. + // for prefix in ["GrandpaFinality", "Grandpa"] { + for _, prefix := range []string{"GrandpaFinality", "Grandpa"} { + // let k = [ + // sp_crypto_hashing::twox_128(prefix.as_bytes()), + // sp_crypto_hashing::twox_128(b"CurrentSetId"), + // ] + // .concat(); + k0 := hashing.Twox128([]byte(prefix)) + k1 := hashing.Twox128([]byte("CurrentSetId")) + k := k0[:] + k = append(k, k1[:]...) + // if let Ok(Some(id)) = + // self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) + // { + // if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { + // return Ok(id) + // } + // } + id, _ := gbi.inner.Storage(hash, storage.StorageKey(k)) + if id != nil { + var setID grandpa.SetID + err := scale.Unmarshal(*id, &setID) + if err == nil { + return setID, nil + } + } + } + // Err(ConsensusError::ClientImport("Unable to retrieve current set id.".into())) + return 0, fmt.Errorf("unable to retrieve current set id") + } else { + // self.inner + // .runtime_api() + // .current_set_id(hash) + // .map_err(|e| ConsensusError::ClientImport(e.to_string())) + setID, err := gbi.inner.RuntimeAPI().CurrentSetID(hash) + if err != nil { + return 0, err + } + return setID, nil + } +} + +// /// Import whole new state and reset authority set. +// async fn import_state( +// +// &self, +// mut block: BlockImportParams, +// +// ) -> Result { +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( + block *client_common.BlockImportParams[H, N, E, Header], +) (client_common.ImportResult, error) { + // let hash = block.post_hash(); + // let number = *block.header.number(); + hash := block.GetPostHash() + number := block.Header.Number() + + // Force imported state finality. + // block.finalized = true; + // let import_result = (&*self.inner).import_block(block).await; + block.Finalized = true + importResult, err := gbi.inner.ImportBlock(block) + // match import_result { + if err == nil { + // Ok(ImportResult::Imported(aux)) => { + switch importResult := importResult.(type) { + case client_common.ImportResultImported: + aux := client_common.ImportedAux(importResult) + // We've just imported a new state. We trust the sync module has verified + // finality proofs and that the state is correct and final. + // So we can read the authority list and set id from the state. + // self.authority_set_hard_forks.lock().clear(); + gbi.authoritySetHardForksMtx.Lock() + gbi.authoritySetHardForks = make(map[H]PendingChange[H, N]) + gbi.authoritySetHardForksMtx.Unlock() + // let authorities = self + // .inner + // .runtime_api() + // .grandpa_authorities(hash) + // .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + authorities, err := gbi.inner.RuntimeAPI().GrandpaAuthorities(hash) + if err != nil { + return nil, err + } + // let set_id = self.current_set_id(hash)?; + setID, err := gbi.currentSetID(hash) + if err != nil { + return nil, err + } + // let authority_set = AuthoritySet::new( + // authorities.clone(), + // set_id, + // fork_tree::ForkTree::new(), + // Vec::new(), + // AuthoritySetChanges::empty(), + // ) + // .ok_or_else(|| ConsensusError::ClientImport("Invalid authority list".into()))?; + authoritySet, err := NewAuthoritySet[H, N]( + authorities, + uint64(setID), + forktree.NewForkTree[H, N, PendingChange[H, N]](), + []PendingChange[H, N]{}, + AuthoritySetChanges[N]{}, + ) + if err != nil { + return nil, err + } + + // *self.authority_set.inner_locked() = authority_set.clone(); + locked := gbi.authoritySet.inner.Locked() + *locked.MutRef() = authoritySet.Clone() + defer locked.Unlock() + + // crate::aux_schema::update_authority_set::( + // &authority_set, + // None, + // |insert| self.inner.insert_aux(insert, []), + // ) + // .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + err = updateAuthoritySet( + locked.Data(), + nil, + func(insertions []api.KeyValue) error { + return gbi.inner.InsertAux(insertions, nil) + }, + ) + if err != nil { + return nil, err + } + // let new_set = + // NewAuthoritySet { canon_number: number, canon_hash: hash, set_id, authorities }; + // let _ = self + // .send_voter_commands + // .unbounded_send(VoterCommand::ChangeAuthorities(new_set)); + // Ok(ImportResult::Imported(aux)) + newSet := newAuthoritySet[H, N]{ + CanonNumber: number, + CanonHash: hash, + SetID: grandpa.SetID(setID), + Authorities: authorities, + } + gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newSet) + return client_common.ImportResultImported(aux), nil + // }, + case client_common.ImportResultAlreadyInChain, + client_common.ImportResultKnownBad, + client_common.ImportResultMissingState, + client_common.ImportResultUnknownParent: + // Ok(r) => Ok(r), + return importResult, nil + default: + panic("unreachable") + } + + } else { + return nil, err + } + // Err(e) => Err(ConsensusError::ClientImport(e.to_string())), + // } + // } +} + +// impl BlockImport for GrandpaBlockImport +// where +// NumberFor: finality_grandpa::BlockNumberOps, +// BE: Backend, +// Client: ClientForGrandpa, +// Client::Api: GrandpaApi, +// for<'a> &'a Client: BlockImport, +// SC: Send + Sync, +// { +// type Error = ConsensusError; + +// async fn import_block( +// +// &self, +// mut block: BlockImportParams, +// +// ) -> Result { +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( + block *client_common.BlockImportParams[H, N, E, Header], +) (client_common.ImportResult, error) { + // let hash = block.post_hash(); + // let number = *block.header.number(); + hash := block.GetPostHash() + number := block.Header.Number() + + // early exit if block already in chain, otherwise the check for + // authority changes will error when trying to re-import a change block + // match self.inner.status(hash) { + // Ok(BlockStatus::InChain) => { + // // Strip justifications when re-importing an existing block. + // let _justifications = block.justifications.take(); + // return (&*self.inner).import_block(block).await + // }, + // Ok(BlockStatus::Unknown) => {}, + // Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + // } + status, err := gbi.inner.Status(hash) + if err != nil { + return nil, err + } + if status == blockchain.BlockStatusInChain { + // Strip justifications when re-importing an existing block. + block.Justifications = nil + return gbi.inner.ImportBlock(block) + } + + // if block.with_state() { + // return self.import_state(block).await + // } + if block.WithState() { + return gbi.importState(block) + } + + // if number <= self.inner.info().finalized_number { + if number <= gbi.inner.Info().FinalizedNumber { + // Importing an old block. Just save justifications and authority set changes + // if self.check_new_change(&block.header, hash).is_some() { + // if block.justifications.is_none() { + // return Err(ConsensusError::ClientImport( + // "Justification required when importing \ + // an old block with authority set change." + // .into(), + // )) + // } + // let mut authority_set = self.authority_set.inner_locked(); + // authority_set.authority_set_changes.insert(number); + // crate::aux_schema::update_authority_set::( + // &authority_set, + // None, + // |insert| { + // block + // .auxiliary + // .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + // }, + // ); + // } + if gbi.checkNewChange(block.Header, hash) != nil { + if block.Justifications == nil { + return nil, fmt.Errorf("justification required when importing an old block with authority set change") + } + locked := gbi.authoritySet.inner.Locked() + authoritySet := locked.MutRef() + authoritySet.AuthoritySetChanges.insert(number) + err := updateAuthoritySet( + *authoritySet, + nil, + func(insertions []api.KeyValue) error { + converted := make([]api.AuxDataOperation, len(insertions)) + for i, kv := range insertions { + converted[i] = api.AuxDataOperation{ + Key: kv.Key, + Data: kv.Value, + } + } + block.Auxiliary = append(block.Auxiliary, converted...) + return nil + }, + ) + if err != nil { + return nil, err + } + } + // return (&*self.inner).import_block(block).await + return gbi.inner.ImportBlock(block) + } + + // on initial sync we will restrict logging under info to avoid spam. + // let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; + initialSync := block.Origin == common.NetworkInitialSyncBlockOrigin + + // let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; + pendingChanges, err := gbi.makeAuthoritiesChanges(block, hash, initialSync) + if err != nil { + return nil, err + } + defer pendingChanges.drop() + + // we don't want to finalize on `inner.import_block` + // let mut justifications = block.justifications.take(); + // let import_result = (&*self.inner).import_block(block).await; + justifications := block.Justifications + block.Justifications = nil + importResult, err := gbi.inner.ImportBlock(block) + + // let mut imported_aux = { + // match import_result { + // Ok(ImportResult::Imported(aux)) => aux, + // Ok(r) => { + // debug!( + // target: LOG_TARGET, + // "Restoring old authority set after block import result: {:?}", r, + // ); + // pending_changes.revert(); + // return Ok(r) + // }, + // Err(e) => { + // debug!( + // target: LOG_TARGET, + // "Restoring old authority set after block import error: {}", e, + // ); + // pending_changes.revert(); + // return Err(ConsensusError::ClientImport(e.to_string())) + // }, + // } + // }; + if err != nil { + logger.Debugf("Restoring old authority set after block import error: %s", err) + pendingChanges.revert() + return nil, err + } + var importedAux client_common.ImportedAux + switch importResult := importResult.(type) { + case client_common.ImportResultImported: + importedAux = client_common.ImportedAux(importResult) + default: + logger.Debugf("Restoring old authority set after block import result: %v", importResult) + pendingChanges.revert() + return importResult, nil + } + + // let (applied_changes, do_pause) = pending_changes.defuse(); + appliedChanges, doPause := pendingChanges.defuse() + + // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. + // if do_pause { + // let _ = self.send_voter_commands.unbounded_send(VoterCommand::Pause( + // "Forced change scheduled after inactivity".to_string(), + // )); + // } + if doPause { + gbi.sendVoterCommands <- voterCommandPause("Forced change scheduled after inactivity") + } + + // let needs_justification = applied_changes.needs_justification(); + needsJustification := appliedChanges.needsJustification() + + // match applied_changes { + switch appliedChanges := appliedChanges.(type) { + // AppliedChanges::Forced(new) => { + case importAppliedChangesForced[H, N]: + // NOTE: when we do a force change we are "discrediting" the old set so we + // ignore any justifications from them. this block may contain a justification + // which should be checked and imported below against the new authority + // triggered by this forced change. the new grandpa voter will start at the + // last median finalized block (which is before the block that enacts the + // change), full nodes syncing the chain will not be able to successfully + // import justifications for those blocks since their local authority set view + // is still of the set before the forced change was enacted, still after #1867 + // they should import the block and discard the justification, and they will + // then request a justification from sync if it's necessary (which they should + // then be able to successfully validate). + // let _ = + // self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); + gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newAuthoritySet[H, N](appliedChanges)) + // we must clear all pending justifications requests, presumably they won't be + // finalized hence why this forced changes was triggered + // imported_aux.clear_justification_requests = true; + importedAux.ClearJustificationRequests = true + + // AppliedChanges::Standard(false) => { + case importAppliedChangesStandard: + // this is a standard change, we don't apply it yet, but we will send a + // we can't apply this change yet since there are other dependent changes that we + // need to apply first, drop any justification that might have been provided with + // the block to make sure we request them from `sync` which will ensure they'll be + // applied in-order. + // justifications.take(); + justifications = nil + default: + } + // } + + // let grandpa_justification = + // justifications.and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); + var grandpaJustification *runtime.EncodedJustification + if justifications != nil { + grandpaJustification = justifications.IntoJustification(grandpa.GrandpaEngineID) + } + + // match grandpa_justification { + // Some(justification) => { + if grandpaJustification != nil { + // if environment::should_process_justification( + // &*self.inner, + // self.justification_import_period, + // number, + // needs_justification, + // ) { + if shouldProcessJustification( + gbi.inner, + gbi.justificationImportPeriod, + number, + needsJustification, + ) { + // let import_res = self.import_justification( + // hash, + // number, + // (GRANDPA_ENGINE_ID, justification), + // needs_justification, + // initial_sync, + // ); + err := gbi.importJustification( + hash, + number, + runtime.Justification{ + ConsensusEngineID: grandpa.GrandpaEngineID, + EncodedJustification: *grandpaJustification, + }, + needsJustification, + initialSync, + ) + + // import_res.unwrap_or_else(|err| { + if err != nil { + // if needs_justification { + // debug!( + // target: LOG_TARGET, + // "Requesting justification from peers due to imported block #{} that enacts authority set change with invalid justification: {}", + // number, + // err + // ); + // imported_aux.bad_justification = true; + // imported_aux.needs_justification = true; + // } + if needsJustification { + logger.Debugf("Requesting justification from peers due to imported block #%d that enacts authority set change with invalid justification: %s", number, err) + importedAux.BadJustification = true + importedAux.NeedsJustification = true + } + } + // }); + } else { + // debug!( + // target: LOG_TARGET, + // "Ignoring unnecessary justification for block #{}", + // number, + // ); + logger.Debugf("Ignoring unnecessary justification for block #%d", number) + } + } else { + // None => + // if needs_justification { + // debug!( + // target: LOG_TARGET, + // "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", + // number, + // ); + + // imported_aux.needs_justification = true; + // }, + if needsJustification { + logger.Debugf("Imported unjustified block #%d that enacts authority set change, waiting for finality for enactment.", number) + importedAux.NeedsJustification = true + } + } + + // Ok(ImportResult::Imported(imported_aux)) + return client_common.ImportResultImported(importedAux), nil +} + +// async fn check_block( +// +// &self, +// block: BlockCheckParams, +// +// ) -> Result { +// self.inner.check_block(block).await +// } +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkBlock( + block client_common.BlockCheckParams[H, N], +) (client_common.ImportResult, error) { + return gbi.inner.CheckBlock(block) +} + +// } + +// impl GrandpaBlockImport +// where +// +// BE: Backend, +// Client: ClientForGrandpa, +// NumberFor: finality_grandpa::BlockNumberOps, +// +// { +// /// Import a block justification and finalize the block. +// /// +// /// If `enacts_change` is set to true, then finalizing this block *must* +// /// enact an authority set change, the function will panic otherwise. +// fn import_justification( +// &self, +// hash: Block::Hash, +// number: NumberFor, +// justification: Justification, +// enacts_change: bool, +// initial_sync: bool, +// ) -> Result<(), ConsensusError> { +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importJustification( + hash H, + number N, + justification runtime.Justification, + enactsChange bool, + initialSync bool, +) error { + // if justification.0 != GRANDPA_ENGINE_ID { + if justification.ConsensusEngineID != grandpa.GrandpaEngineID { + // TODO: the import queue needs to be refactored to be able dispatch to the correct + // `JustificationImport` instance based on `ConsensusEngineId`, or we need to build a + // justification import pipeline similar to what we do for `BlockImport`. In the + // meantime we'll just drop the justification, since this is only used for BEEFY which + // is still WIP. + return nil + } + + // let justification = GrandpaJustification::decode_and_verify_finalizes( + // &justification.1, + // (hash, number), + // self.authority_set.set_id(), + // &self.authority_set.current_authorities(), + // ); + just, err := DecodeGrandpaJustificationVerifyFinalizes[H, N, Hasher, Header]( + justification.EncodedJustification, + HashNumber[H, N]{Hash: hash, Number: number}, + gbi.authoritySet.SetID(), + gbi.authoritySet.CurrentAuthorities(), + ) + + // let justification = match justification { + // Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + // Ok(justification) => justification, + // }; + if err != nil { + return err + } + + // let result = environment::finalize_block( + // self.inner.clone(), + // &self.authority_set, + // None, + // hash, + // number, + // justification.into(), + // initial_sync, + // Some(&self.justification_sender), + // self.telemetry.clone(), + // ); + err = finalizeBlock( + gbi.inner, + gbi.authoritySet, + nil, + hash, + number, + justificationOrCommitJustification[H, N, Header]{just}, + initialSync, + &gbi.justificationSender, + ) + // match result { + if err != nil { + // Err(CommandOrError::VoterCommand(command)) => { + // grandpa_log!( + // initial_sync, + // "👴 Imported justification for block #{} that triggers \ + // command {}, signaling voter.", + // number, + // command, + // ); + + // // send the command to the voter + // let _ = self.send_voter_commands.unbounded_send(command); + // }, + _, ok := err.(voterCommand) + if ok { + l := logger.Infof + if initialSync { + l = logger.Debugf + } + l("👴 Imported justification for block #%d that triggers command %s, signaling voter.", number, err) + + // send the command to the voter + gbi.sendVoterCommands <- err.(voterCommand) + } else { + // Err(CommandOrError::Error(e)) => + // return Err(match e { + // Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), + // Error::Network(error) => ConsensusError::ClientImport(error), + // Error::Blockchain(error) => ConsensusError::ClientImport(error), + // Error::Client(error) => ConsensusError::ClientImport(error.to_string()), + // Error::Safety(error) => ConsensusError::ClientImport(error), + // Error::Signing(error) => ConsensusError::ClientImport(error), + // Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), + // Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), + // }), + return err + } + } else { + // Ok(_) => { + // assert!( + // !enacts_change, + // "returns Ok when no authority set change should be enacted; qed;" + // ); + // }, + if enactsChange { + panic("returns Ok when no authority set change should be enacted; qed;") + } + } + + // Ok(()) + // } + return nil +} diff --git a/internal/client/consensus/grandpa/notification.go b/internal/client/consensus/grandpa/notification.go index 0a9eab8d85..abae864fca 100644 --- a/internal/client/consensus/grandpa/notification.go +++ b/internal/client/consensus/grandpa/notification.go @@ -15,6 +15,16 @@ type GrandpaJustificationSender[Hash runtime.Hash, N runtime.Number, Header runt notification.NotificationSender[GrandpaJustification[Hash, N, Header]] } +func NewGrandpaJustificationSender[ + Hash runtime.Hash, + N runtime.Number, + Header runtime.Header[N, Hash], +]() (GrandpaJustificationSender[Hash, N, Header], GrandpaJustificationStream[Hash, N, Header]) { + sender, stream := notification.NewNotificationStream[GrandpaJustification[Hash, N, Header]]() + return GrandpaJustificationSender[Hash, N, Header]{NotificationSender: sender}, + GrandpaJustificationStream[Hash, N, Header]{NotificationStream: stream} +} + // The receiving half of the Grandpa justification channel. // // Used to receive notifications about justifications generated at the end of a Grandpa round. diff --git a/internal/client/utils/notification/notification.go b/internal/client/utils/notification/notification.go index bf04520c2b..f5a377deb6 100644 --- a/internal/client/utils/notification/notification.go +++ b/internal/client/utils/notification/notification.go @@ -13,6 +13,21 @@ type NotificationStream[Payload any] struct { hub *pubsub.Hub[struct{}, func() (Payload, error), Payload, *registry[Payload]] //nolint: unused } +// impl NotificationStream { +// /// Creates a new pair of receiver and sender of `Payload` notifications. +// pub fn channel() -> (NotificationSender, Self) { +// let hub = Hub::new(TK::TRACING_KEY); +// let sender = NotificationSender { hub: hub.clone() }; +// let receiver = NotificationStream { hub, _pd: Default::default() }; +// (sender, receiver) +// } +func NewNotificationStream[Payload any]() (NotificationSender[Payload], NotificationStream[Payload]) { + hub := pubsub.NewHub("", ®istry[Payload]{}) + sender := NotificationSender[Payload]{hub: hub} + receiver := NotificationStream[Payload]{hub: hub} + return sender, receiver +} + // NotificationSender is the sending half of the notifications channel(s). type NotificationSender[Payload any] struct { hub *pubsub.Hub[struct{}, func() (Payload, error), Payload, *registry[Payload]] diff --git a/internal/primitives/api/api.go b/internal/primitives/api/api.go index ca24902431..f498ebd70e 100644 --- a/internal/primitives/api/api.go +++ b/internal/primitives/api/api.go @@ -10,6 +10,7 @@ import ( "github.com/ChainSafe/gossamer/internal/primitives/state-machine/overlayedchanges" ptrie "github.com/ChainSafe/gossamer/internal/primitives/trie" "github.com/ChainSafe/gossamer/internal/primitives/trie/recorder" + "github.com/ChainSafe/gossamer/internal/primitives/version" ) // Something that provides a runtime api. @@ -71,3 +72,18 @@ type ApiExt[ // Execute the given block ExecuteBlock(runtimeApiAtParam H, block runtime.Block[H, N, E, Header]) error } + +// pub trait Core { +type Core[H runtime.Hash] interface { + /// Returns the version of the runtime. + // fn version() -> RuntimeVersion; + Version(hash H) (version.RuntimeVersion, error) + // /// Execute the given block. + // fn execute_block(block: Block); + // /// Initialize a block with the given header. + // #[changed_in(5)] + // #[renamed("initialise_block", 2)] + // fn initialize_block(header: &::Header); + // /// Initialize a block with the given header and return the runtime executive mode. + // fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode; +} diff --git a/internal/primitives/consensus/grandpa/grandpa.go b/internal/primitives/consensus/grandpa/grandpa.go index b6c5426ca9..7618a21e77 100644 --- a/internal/primitives/consensus/grandpa/grandpa.go +++ b/internal/primitives/consensus/grandpa/grandpa.go @@ -4,8 +4,11 @@ package grandpa import ( + "fmt" + "github.com/ChainSafe/gossamer/internal/client/keystore" "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/internal/primitives/api" "github.com/ChainSafe/gossamer/internal/primitives/consensus/grandpa/app" "github.com/ChainSafe/gossamer/internal/primitives/core/crypto" "github.com/ChainSafe/gossamer/internal/primitives/runtime" @@ -96,7 +99,145 @@ type GrandpaJustification[H runtime.Hash, N runtime.Number, Header runtime.Heade VoteAncestries []Header } -// EquivocationProof is proof of voter misbehavior on a given set id. Misbehavior/equivocation in GRANDPA happens when +// / An consensus log item for GRANDPA. +// #[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] +// #[cfg_attr(feature = "serde", derive(Serialize))] +// pub enum ConsensusLog { +type ConsensusLog interface { + isConsensusLog() +} + +// / Schedule an authority set change. +// / +// / The earliest digest of this type in a single block will be respected, +// / provided that there is no `ForcedChange` digest. If there is, then the +// / `ForcedChange` will take precedence. +// / +// / No change should be scheduled if one is already and the delay has not +// / passed completely. +// / +// / This should be a pure function: i.e. as long as the runtime can interpret +// / the digest type it should return the same result regardless of the current +// / state. +// #[codec(index = 1)] +// ScheduledChange(ScheduledChange), +type ConensusLogScheduledChange[N runtime.Number] ScheduledChange[N] + +// / Force an authority set change. +// / +// / Forced changes are applied after a delay of _imported_ blocks, +// / while pending changes are applied after a delay of _finalized_ blocks. +// / +// / The earliest digest of this type in a single block will be respected, +// / with others ignored. +// / +// / No change should be scheduled if one is already and the delay has not +// / passed completely. +// / +// / This should be a pure function: i.e. as long as the runtime can interpret +// / the digest type it should return the same result regardless of the current +// / state. +// +// #[codec(index = 2)] +// ForcedChange(N, ScheduledChange), +type ConsensusLogForcedChange[N runtime.Number] struct { + Median N + ScheduledChange[N] +} + +// / Note that the authority with given index is disabled until the next change. +// +// #[codec(index = 3)] +// OnDisabled(AuthorityIndex), +type ConsensusLogOnDisabled AuthorityIndex + +// / A signal to pause the current authority set after the given delay. +// / After finalizing the block at _delay_ the authorities should stop voting. +// #[codec(index = 4)] +// Pause(N), +type ConsensusLogPause[N runtime.Number] struct { + Delay N +} + +// / A signal to resume the current authority set after the given delay. +// / After authoring the block at _delay_ the authorities should resume voting. +// +// #[codec(index = 5)] +// Resume(N), +type ConsensusLogResume[N runtime.Number] struct { + Delay N +} + +func (ConensusLogScheduledChange[N]) isConsensusLog() {} +func (ConsensusLogForcedChange[N]) isConsensusLog() {} +func (ConsensusLogOnDisabled) isConsensusLog() {} +func (ConsensusLogPause[N]) isConsensusLog() {} +func (ConsensusLogResume[N]) isConsensusLog() {} + +type ConsensusLogVDT[N runtime.Number] struct { + inner ConsensusLog +} + +func (mvdt *ConsensusLogVDT[N]) SetValue(value any) (err error) { + switch value := value.(type) { + case ConensusLogScheduledChange[N]: + mvdt.inner = value + return + case ConsensusLogForcedChange[N]: + mvdt.inner = value + return + case ConsensusLogOnDisabled: + mvdt.inner = value + return + case ConsensusLogPause[N]: + mvdt.inner = value + return + case ConsensusLogResume[N]: + mvdt.inner = value + return + default: + return fmt.Errorf("unsupported type") + } +} + +func (mvdt ConsensusLogVDT[N]) IndexValue() (index uint, value any, err error) { + switch mvdt.inner.(type) { + case ConensusLogScheduledChange[N]: + return 1, mvdt.inner, nil + case ConsensusLogForcedChange[N]: + return 2, mvdt.inner, nil + case ConsensusLogOnDisabled: + return 3, mvdt.inner, nil + case ConsensusLogPause[N]: + return 4, mvdt.inner, nil + case ConsensusLogResume[N]: + return 5, mvdt.inner, nil + } + return 0, nil, scale.ErrUnsupportedVaryingDataTypeValue +} + +func (mvdt ConsensusLogVDT[N]) Value() (value any, err error) { + _, value, err = mvdt.IndexValue() + return +} + +func (mvdt ConsensusLogVDT[N]) ValueAt(index uint) (value any, err error) { + switch index { + case 1: + return ConensusLogScheduledChange[N]{}, nil + case 2: + return ConsensusLogForcedChange[N]{}, nil + case 3: + return ConsensusLogOnDisabled(0), nil + case 4: + return ConsensusLogPause[N]{}, nil + case 5: + return ConsensusLogResume[N]{}, nil + } + return nil, scale.ErrUnknownVaryingDataTypeValue +} + +// EquiovcationProof is proof of voter misbehavior on a given set id. Misbehavior/equivocation in GRANDPA happens when // a voter votes on the same round (either at prevote or precommit stage) for different blocks. Proving is achieved by // collecting the signed messages of conflicting votes. type EquivocationProof[H runtime.Hash, N runtime.Number] struct { @@ -211,12 +352,13 @@ type OpaqueKeyOwnershipProof = runtime.OpaqueValue // // The consensus protocol will coordinate the handoff externally. type GrandpaAPI[H runtime.Hash, N runtime.Number] interface { + api.Core[H] // Get the current GRANDPA authorities and weights. This should not change except for when changes are scheduled // and the corresponding delay has passed. // // When called at block B, it will return the set of authorities that should be used to finalize descendants of // this block (B+1, B+2, ...). The block B itself is finalized by the authorities from block B-1. - GrandpaAuthorities() AuthorityList + GrandpaAuthorities(hash H) (AuthorityList, error) // Submits an unsigned extrinsic to report an equivocation. The caller must provide the equivocation proof and a // key ownership proof (should be obtained using GenerateKeyOwnershipProof). The extrinsic will be unsigned @@ -242,4 +384,8 @@ type GrandpaAPI[H runtime.Hash, N runtime.Number] interface { setID SetID, authorityID AuthorityID, ) *OpaqueKeyOwnershipProof + + /// Get current GRANDPA authority set id. + // fn current_set_id() -> SetId; + CurrentSetID(hash H) (SetID, error) } diff --git a/internal/primitives/crypto/hashing/hashing.go b/internal/primitives/crypto/hashing/hashing.go index 086edef4ab..47cee391f7 100644 --- a/internal/primitives/crypto/hashing/hashing.go +++ b/internal/primitives/crypto/hashing/hashing.go @@ -4,6 +4,9 @@ package hashing import ( + "encoding/binary" + + "github.com/OneOfOne/xxhash" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/sha3" ) @@ -37,3 +40,32 @@ func Keccak256(data []byte) [32]byte { copy(buf[:], hash) return buf } + +// / Do a XX 128-bit hash and return result. +func Twox128(data []byte) [16]byte { + // compute xxHash64 twice with seeds 0 and 1 applied on given byte array + h0 := xxhash.NewS64(0) // create xxHash with 0 seed + _, err := h0.Write(data) + if err != nil { + panic(err) + } + res0 := h0.Sum64() + hash0 := make([]byte, 8) + binary.LittleEndian.PutUint64(hash0, res0) + + h1 := xxhash.NewS64(1) // create xxHash with 1 seed + _, err = h1.Write(data) + if err != nil { + panic(err) + } + res1 := h1.Sum64() + hash1 := make([]byte, 8) + binary.LittleEndian.PutUint64(hash1, res1) + + return [16]byte{ + hash0[0], hash0[1], hash0[2], hash0[3], + hash0[4], hash0[5], hash0[6], hash0[7], + hash1[0], hash1[1], hash1[2], hash1[3], + hash1[4], hash1[5], hash1[6], hash1[7], + } +} diff --git a/internal/primitives/runtime/runtime.go b/internal/primitives/runtime/runtime.go index aef1a0eb5f..29e6763d22 100644 --- a/internal/primitives/runtime/runtime.go +++ b/internal/primitives/runtime/runtime.go @@ -47,6 +47,21 @@ func (j Justifications) Get(engineID ConsensusEngineID) *EncodedJustification { return nil } +// / Return a copy of the encoded justification for the given consensus +// / engine, if it exists. +// +// pub fn into_justification(self, engine_id: ConsensusEngineId) -> Option { +// self.into_iter().find(|j| j.0 == engine_id).map(|j| j.1) +// } +func (j Justifications) IntoJustification(enginedID ConsensusEngineID) *EncodedJustification { + for _, justification := range j { + if justification.ConsensusEngineID == enginedID { + return &justification.EncodedJustification + } + } + return nil +} + // Complex storage builder stuff. type BuildStorage interface { // Build the storage out of this builder. diff --git a/internal/primitives/version/version.go b/internal/primitives/version/version.go index 88b20bc40d..2ee5684473 100644 --- a/internal/primitives/version/version.go +++ b/internal/primitives/version/version.go @@ -16,7 +16,7 @@ type ApiID [8]uint8 // A pair of ApiID and a uint32 for version. type ApiIDVersion struct { - ApiId ApiID + ApiID Version uint32 } @@ -67,7 +67,7 @@ type RuntimeVersion struct { ImplVersion uint32 // List of supported API "features" along with their versions. - Apis ApisVec + APIs []ApiIDVersion // All existing calls (dispatchables) are fully compatible when this number doesn't change. If // this number changes, then SpecVersion must change, also. @@ -89,3 +89,17 @@ func (v *RuntimeVersion) StateVersion() storage.StateVersion { return storage.StateVersion(stateVersion) } + +// / Returns the api version found for api with `id`. +// +// pub fn api_version(&self, id: &ApiId) -> Option { +// self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) +// } +func (v *RuntimeVersion) APIVersion(id ApiID) *uint32 { + for _, api := range v.APIs { + if api.ApiID == id { + return &api.Version + } + } + return nil +} From 93c9cd690220b384a92e25c7d31762b90dd0017e Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Mon, 9 Jun 2025 14:53:33 -0400 Subject: [PATCH 2/6] fix lint --- internal/client/api/backend.go | 10 +- .../client/consensus/common/block_import.go | 30 +- .../client/consensus/grandpa/aux_schema.go | 3 + internal/client/consensus/grandpa/grandpa.go | 157 +---- internal/client/consensus/grandpa/import.go | 664 ++---------------- .../client/utils/notification/notification.go | 10 +- internal/primitives/api/api.go | 12 +- .../primitives/consensus/grandpa/grandpa.go | 81 +-- internal/primitives/crypto/hashing/hashing.go | 2 +- internal/primitives/runtime/runtime.go | 8 +- internal/primitives/version/version.go | 6 +- 11 files changed, 122 insertions(+), 861 deletions(-) diff --git a/internal/client/api/backend.go b/internal/client/api/backend.go index 72aa11e7df..309ea7f9f1 100644 --- a/internal/client/api/backend.go +++ b/internal/client/api/backend.go @@ -254,15 +254,9 @@ type AuxStore interface { GetAux(key []byte) ([]byte, error) } -// / Provides access to storage primitives -// pub trait StorageProvider> { +// StorageProvider provides access to storage primitives type StorageProvider[H runtime.Hash, N runtime.Number, Hasher runtime.Hasher[H]] interface { - /// Given a block's `Hash` and a key, return the value under the key in that block. - // fn storage( - // &self, - // hash: Block::Hash, - // key: &StorageKey, - // ) -> sp_blockchain::Result>; + // Given a block hash and a key, return the value under the key in that block. Storage(hash H, key storage.StorageKey) (*storage.StorageData, error) } diff --git a/internal/client/consensus/common/block_import.go b/internal/client/consensus/common/block_import.go index 2cf399c67d..c9f687a6f5 100644 --- a/internal/client/consensus/common/block_import.go +++ b/internal/client/consensus/common/block_import.go @@ -184,15 +184,7 @@ type BlockImportParams[H runtime.Hash, N runtime.Number, E runtime.Extrinsic, He PostHash *H } -// / Get the full header hash (with post-digests applied). -// -// pub fn post_hash(&self) -> Block::Hash { -// if let Some(hash) = self.post_hash { -// hash -// } else { -// self.post_header().hash() -// } -// } +// GetPostHash retrieves the full header hash (with post-digests applied). func (b *BlockImportParams[H, N, E, Header]) GetPostHash() H { if b.PostHash != nil { return *b.PostHash @@ -200,19 +192,7 @@ func (b *BlockImportParams[H, N, E, Header]) GetPostHash() H { return b.GetPostHeader().Hash() } -/// Get the post header. -// pub fn post_header(&self) -> Block::Header { -// if self.post_digests.is_empty() { -// self.header.clone() -// } else { -// let mut hdr = self.header.clone(); -// for digest_item in &self.post_digests { -// hdr.digest_mut().push(digest_item.clone()); -// } - -// hdr -// } -// } +// GetPostHeader retrieves the post header. func (b *BlockImportParams[H, N, E, Header]) GetPostHeader() Header { if len(b.PostDigests) == 0 { return b.Header.Clone().(Header) @@ -224,11 +204,7 @@ func (b *BlockImportParams[H, N, E, Header]) GetPostHeader() Header { return hdr } -// / Check if this block contains state import action -// -// pub fn with_state(&self) -> bool { -// matches!(self.state_action, StateAction::ApplyChanges(StorageChanges::Import(_))) -// } +// WithState checks if this block contains state import action func (b *BlockImportParams[H, N, E, Header]) WithState() bool { _, ok := b.StateAction.(StateActionApplyChanges) return ok diff --git a/internal/client/consensus/grandpa/aux_schema.go b/internal/client/consensus/grandpa/aux_schema.go index 3d6ad757c4..c803fcae84 100644 --- a/internal/client/consensus/grandpa/aux_schema.go +++ b/internal/client/consensus/grandpa/aux_schema.go @@ -58,6 +58,9 @@ func loadPersistent[H runtime.Hash, N runtime.Number]( makeGenesisRound := grandpa.NewRoundState[H, N] authSet, err := loadDecoded[AuthoritySet[H, N]](store, authoritySetKey) + if err != nil { + return nil, err + } if authSet != nil { var setState voterSetState[H, N] state, err := loadDecoded[voterSetStateVDT[H, N]](store, setStateKey) diff --git a/internal/client/consensus/grandpa/grandpa.go b/internal/client/consensus/grandpa/grandpa.go index 7f4af18bee..bd0246d16d 100644 --- a/internal/client/consensus/grandpa/grandpa.go +++ b/internal/client/consensus/grandpa/grandpa.go @@ -165,7 +165,7 @@ type LinkHalf[ persistentData persistentData[H, N] voterCommandsRx chan voterCommand justificationSender GrandpaJustificationSender[H, N, Header] - justificationStream GrandpaJustificationStream[H, N, Header] //nolint: unused + justificationStream GrandpaJustificationStream[H, N, Header] } // Provider for the Grandpa authority set configured on the genesis block. @@ -174,40 +174,12 @@ type GenesisAuthoritySetProvider interface { Get() (primitives.AuthorityList, error) } -// / Make block importer and link half necessary to tie the background voter -// / to it. -// / -// / The `justification_import_period` sets the minimum period on which -// / justifications will be imported. When importing a block, if it includes a -// / justification it will only be processed if it fits within this period, -// / otherwise it will be ignored (and won't be validated). This is to avoid -// / slowing down sync by a peer serving us unnecessary justifications which -// / aren't trivial to validate. -// pub fn block_import( +// Make block importer and link half necessary to tie the background voter to it. // -// client: Arc, -// justification_import_period: u32, -// genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, -// select_chain: SC, -// telemetry: Option, -// -// ) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> -// where -// -// SC: SelectChain, -// BE: Backend + 'static, -// Client: ClientForGrandpa + 'static, -// -// { -// block_import_with_authority_set_hard_forks( -// client, -// justification_import_period, -// genesis_authorities_provider, -// select_chain, -// Default::default(), -// telemetry, -// ) -// } +// The justificationImportPeriod sets the minimum period on which justifications will be imported. When importing +// a block, if it includes a justification it will only be processed if it fits within this period, otherwise it will +// be ignored (and won't be validated). This is to avoid slowing down sync by a peer serving us unnecessary +// justifications which aren't trivial to validate. func BlockImport[ H runtime.Hash, N runtime.Number, @@ -230,51 +202,24 @@ func BlockImport[ ) } -// / A descriptor for an authority set hard fork. These are authority set changes -// / that are not signalled by the runtime and instead are defined off-chain -// / (hence the hard fork). -// pub struct AuthoritySetHardFork { +// A descriptor for an authority set hard fork. These are authority set changes that are not signalled by the runtime +// and instead are defined off-chain (hence the hard fork). type AuthoritySetHardFork[H, N any] struct { - // /// The new authority set id. - // pub set_id: SetId, + // The new authority set id. SetID SetID - // /// The block hash and number at which the hard fork should be applied. - // pub block: (Block::Hash, NumberFor), + // The block hash and number at which the hard fork should be applied. Block HashNumber[H, N] - // /// The authorities in the new set. - // pub authorities: AuthorityList, + // The authorities in the new set. Authorities primitives.AuthorityList - // /// The latest block number that was finalized before this authority set - // /// hard fork. When defined, the authority set change will be forced, i.e. - // /// the node won't wait for the block above to be finalized before enacting - // /// the change, and the given finalized number will be used as a base for - // /// voting. - // pub last_finalized: Option>, + // The latest block number that was finalized before this authority set hard fork. When defined, the authority set + // change will be forced, i.e. the node won't wait for the block above to be finalized before enacting the change, + // and the given finalized number will be used as a base for voting. LastFinalized *N } -// / Make block importer and link half necessary to tie the background voter to -// / it. A vector of authority set hard forks can be passed, any authority set -// / change signaled at the given block (either already signalled or in a further -// / block when importing it) will be replaced by a standard change with the -// / given static authorities. -// pub fn block_import_with_authority_set_hard_forks( -// -// client: Arc, -// justification_import_period: u32, -// genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, -// select_chain: SC, -// authority_set_hard_forks: Vec>, -// telemetry: Option, -// -// ) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> -// where -// -// SC: SelectChain, -// BE: Backend + 'static, -// Client: ClientForGrandpa + 'static, -// -// { +// Make block importer and link half necessary to tie the background voter to it. A vector of authority set hard forks +// can be passed, any authority set change signalled at the given block (either already signalled or in a further block +// when importing it) will be replaced by a standard change with the given static authorities. func blockImportWithAuthoritySetHardForks[ H runtime.Hash, N runtime.Number, @@ -289,25 +234,9 @@ func blockImportWithAuthoritySetHardForks[ authoritySetHardForks []AuthoritySetHardFork[H, N], // TODO: telemetry ) (*GrandpaBlockImport[H, N, Hasher, Header, E], LinkHalf[H, N, Hasher, Header, E], error) { - // let chain_info = client.info(); - // let genesis_hash = chain_info.genesis_hash; chainInfo := client.Info() genesisHash := chainInfo.GenesisHash - // let persistent_data = - // aux_schema::load_persistent(&*client, genesis_hash, >::zero(), { - // let telemetry = telemetry.clone(); - // move || { - // let authorities = genesis_authorities_provider.get()?; - // telemetry!( - // telemetry; - // CONSENSUS_DEBUG; - // "afg.loading_authorities"; - // "authorities_len" => ?authorities.len() - // ); - // Ok(authorities) - // } - // })?; persistentData, err := loadPersistent[H, N](client, genesisHash, 0, func() (primitives.AuthorityList, error) { authorities, err := genesisAuthoritySetProvider.Get() if err != nil { @@ -319,37 +248,11 @@ func blockImportWithAuthoritySetHardForks[ return nil, LinkHalf[H, N, Hasher, Header, E]{}, err } - _ = persistentData - - // let (voter_commands_tx, voter_commands_rx) = - // tracing_unbounded("mpsc_grandpa_voter_command", 100_000); voterCommands := make(chan voterCommand, 100000) - // let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); justificationSender, justificationStream := NewGrandpaJustificationSender[H, N, Header]() // create pending change objects with 0 delay for each authority set hard fork. - // let authority_set_hard_forks = authority_set_hard_forks - // .into_iter() - // .map(|fork| { - // let delay_kind = if let Some(last_finalized) = fork.last_finalized { - // authorities::DelayKind::Best { median_last_finalized: last_finalized } - // } else { - // authorities::DelayKind::Finalized - // }; - - // ( - // fork.set_id, - // authorities::PendingChange { - // next_authorities: fork.authorities, - // delay: Zero::zero(), - // canon_hash: fork.block.0, - // canon_height: fork.block.1, - // delay_kind, - // }, - // ) - // }) - // .collect(); hardForks := make([]struct { SetID PendingChange[H, N] @@ -366,7 +269,7 @@ func blockImportWithAuthoritySetHardForks[ SetID PendingChange[H, N] }{ - SetID: SetID(fork.SetID), + SetID: fork.SetID, PendingChange: PendingChange[H, N]{ NextAuthorities: fork.Authorities, Delay: 0, @@ -377,30 +280,6 @@ func blockImportWithAuthoritySetHardForks[ } } - // Ok(( - // - // GrandpaBlockImport::new( - // client.clone(), - // justification_import_period, - // select_chain.clone(), - // persistent_data.authority_set.clone(), - // voter_commands_tx, - // authority_set_hard_forks, - // justification_sender.clone(), - // telemetry.clone(), - // ), - // LinkHalf { - // client, - // select_chain, - // persistent_data, - // voter_commands_rx, - // justification_sender, - // justification_stream, - // telemetry, - // }, - // - // )) - blockImport := newGrandpaBlockImport( client, justificationImportPeriod, diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go index faf3edb1ed..d5eb68be72 100644 --- a/internal/client/consensus/grandpa/import.go +++ b/internal/client/consensus/grandpa/import.go @@ -18,18 +18,14 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) -// enum AppliedChanges { type importAppliedChanges interface { needsJustification() bool } -// Standard(bool), // true if the change is ready to be applied (i.e. it's a root) -type importAppliedChangesStandard bool +type importAppliedChangesStandard bool // true if the change is ready to be applied (i.e. it's a root) -// Forced(NewAuthoritySet), type importAppliedChangesForced[H runtime.Hash, N runtime.Number] newAuthoritySet[H, N] -// None, type importAppliedChangesNone struct{} func (importAppliedChangesStandard) needsJustification() bool { @@ -42,42 +38,25 @@ func (importAppliedChangesNone) needsJustification() bool { return false } -// } - type justInCase[H runtime.Hash, N runtime.Number] struct { old AuthoritySet[H, N] shareddata.SharedDataLocked[AuthoritySet[H, N]] } -// struct PendingSetChanges { type pendingSetChanges[ H runtime.Hash, N runtime.Number, Hasher runtime.Hasher[H], Header runtime.Header[N, H], ] struct { - // just_in_case: Option<( - // - // AuthoritySet>, - // SharedDataLockedUpgradable>>, - // - // )>, - justInCase *justInCase[H, N] - // applied_changes: AppliedChanges>, + justInCase *justInCase[H, N] appliedChanges importAppliedChanges - // do_pause: bool, - doPause bool + doPause bool } // revert the pending set change explicitly. -// fn revert(self) {} func (pendingSetChanges[H, N, Hasher, Header]) revert() {} -// fn defuse(mut self) -> (AppliedChanges>, bool) { -// self.just_in_case = None; -// let applied_changes = std::mem::replace(&mut self.applied_changes, AppliedChanges::None); -// (applied_changes, self.do_pause) -// } func (psc *pendingSetChanges[H, N, Hasher, Header]) defuse() (importAppliedChanges, bool) { psc.justInCase = nil appliedChanges := psc.appliedChanges @@ -85,17 +64,7 @@ func (psc *pendingSetChanges[H, N, Hasher, Header]) defuse() (importAppliedChang return appliedChanges, psc.doPause } -// impl Drop for PendingSetChanges { -// fn drop(&mut self) { -// if let Some((old_set, mut authorities)) = self.just_in_case.take() { -// *authorities.upgrade() = old_set; -// } -// } -// } func (psc *pendingSetChanges[H, N, Hasher, Header]) drop() { - // if let Some((oldSet, mut authorities)) = self.justInCase.take() { - // *authorities.upgrade() = oldSet; - // } if psc.justInCase != nil { jic := psc.justInCase psc.justInCase = nil @@ -106,16 +75,14 @@ func (psc *pendingSetChanges[H, N, Hasher, Header]) drop() { } } -// / A block-import handler for GRANDPA. -// / -// / This scans each imported block for signals of changing authority set. -// / If the block being imported enacts an authority set change then: -// / - If the current authority set is still live: we import the block -// / - Otherwise, the block must include a valid justification. -// / -// / When using GRANDPA, the block import worker should be using this block import -// / object.im -// pub struct GrandpaBlockImport { +// A block-import handler for GRANDPA. +// +// This scans each imported block for signals of changing authority set. +// If the block being imported enacts an authority set change then: +// - If the current authority set is still live: we import the block +// - Otherwise, the block must include a valid justification. +// +// When using GRANDPA, the block import worker should be using this block import object. type GrandpaBlockImport[ H runtime.Hash, N runtime.Number, @@ -123,40 +90,17 @@ type GrandpaBlockImport[ Header runtime.Header[N, H], E runtime.Extrinsic, ] struct { - // inner: Arc, - inner ClientForGrandpa[H, N, Hasher, Header, E] - // justification_import_period: u32, + inner ClientForGrandpa[H, N, Hasher, Header, E] justificationImportPeriod uint32 - // select_chain: SC, - selectChain common.SelectChain[H, N, Header] - // authority_set: SharedAuthoritySet>, - authoritySet *SharedAuthoritySet[H, N] - // send_voter_commands: TracingUnboundedSender>>, - sendVoterCommands chan voterCommand - // authority_set_hard_forks: - // - // Mutex>>>, - authoritySetHardForks map[H]PendingChange[H, N] - authoritySetHardForksMtx sync.Mutex - // - // justification_sender: GrandpaJustificationSender, - justificationSender GrandpaJustificationSender[H, N, Header] - // telemetry: Option, + selectChain common.SelectChain[H, N, Header] + authoritySet *SharedAuthoritySet[H, N] + sendVoterCommands chan voterCommand + authoritySetHardForks map[H]PendingChange[H, N] + authoritySetHardForksMtx sync.Mutex + justificationSender GrandpaJustificationSender[H, N, Header] // TODO: telemetry - // _phantom: PhantomData, } -// impl GrandpaBlockImport { -// pub(crate) fn new( -// inner: Arc, -// justification_import_period: u32, -// select_chain: SC, -// authority_set: SharedAuthoritySet>, -// send_voter_commands: TracingUnboundedSender>>, -// authority_set_hard_forks: Vec<(SetId, PendingChange>)>, -// justification_sender: GrandpaJustificationSender, -// telemetry: Option, -// ) -> GrandpaBlockImport { func newGrandpaBlockImport[ H runtime.Hash, N runtime.Number, @@ -178,12 +122,6 @@ func newGrandpaBlockImport[ ) *GrandpaBlockImport[H, N, Hasher, Header, E] { // check for and apply any forced authority set hard fork that applies // to the *current* authority set. - // if let Some((_, change)) = authority_set_hard_forks - // .iter() - // .find(|(set_id, _)| *set_id == authority_set.set_id()) - // { - // authority_set.inner().current_authorities = change.next_authorities.clone(); - // } for _, hardFork := range authoritySetHardForks { setID := hardFork.SetID if setID == SetID(sharedAuthoritySet.SetID()) { @@ -198,10 +136,6 @@ func newGrandpaBlockImport[ // index authority set hard forks by block hash so that they can be used // by any node syncing the chain and importing a block hard fork // authority set changes. - // let authority_set_hard_forks = authority_set_hard_forks - // .into_iter() - // .map(|(_, change)| (change.canon_hash, change)) - // .collect::>(); var authoritySetHardForksMap = make(map[H]PendingChange[H, N]) for _, hardFork := range authoritySetHardForks { authoritySetHardForksMap[hardFork.PendingChange.CanonHash] = hardFork.PendingChange @@ -210,38 +144,19 @@ func newGrandpaBlockImport[ // check for and apply any forced authority set hard fork that apply to // any *pending* standard changes, checking by the block hash at which // they were announced. - // { - // let mut authority_set = authority_set.inner(); - - // authority_set.pending_standard_changes = - // authority_set.pending_standard_changes.clone().map(&mut |hash, _, original| { - // authority_set_hard_forks.get(hash).cloned().unwrap_or(original) - // }); - // } - // authoritySet.mtx.Lock() - // authSet := &authoritySet.inner authoritySet, unlock := sharedAuthoritySet.inner.DataMut() - authoritySet.PendingStandardChanges = forktree.Map(authoritySet.PendingStandardChanges, func(hash H, _ N, original PendingChange[H, N]) PendingChange[H, N] { - if change, ok := authoritySetHardForksMap[hash]; ok { - return change - } - return original - }) - // authoritySet.mtx.Unlock() + authoritySet.PendingStandardChanges = forktree.Map( + authoritySet.PendingStandardChanges, + func(hash H, _ N, original PendingChange[H, N]) PendingChange[H, N] { + if change, ok := authoritySetHardForksMap[hash]; ok { + return change + } + return original + }, + ) unlock() - // GrandpaBlockImport { - // inner, - // justification_import_period, - // select_chain, - // authority_set, - // send_voter_commands, - // authority_set_hard_forks: Mutex::new(authority_set_hard_forks), - // justification_sender, - // telemetry, - // _phantom: PhantomData, - // } return &GrandpaBlockImport[H, N, Hasher, Header, E]{ inner: inner, justificationImportPeriod: justificationImportPeriod, @@ -253,13 +168,8 @@ func newGrandpaBlockImport[ } } -// / Checks the given header for a consensus digest signalling a **standard** scheduled change and -// / extracts it. -// pub fn find_scheduled_change( -// -// header: &B::Header, -// -// ) -> Option>> { +// Checks the given header for a consensus digest signalling a **standard** scheduled change and +// extracts it. func FindScheduledChange[ H runtime.Hash, N runtime.Number, @@ -267,13 +177,8 @@ func FindScheduledChange[ ]( header Header, ) *grandpa.ScheduledChange[N] { - // let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) - // let filter_log = |log: ConsensusLog>| match log { - // ConsensusLog::ScheduledChange(change) => Some(change), - // _ => None, - // }; filterLog := func(log grandpa.ConsensusLog) *grandpa.ScheduledChange[N] { scheduledChange, ok := log.(grandpa.ConensusLogScheduledChange[N]) if !ok { @@ -284,7 +189,6 @@ func FindScheduledChange[ } // find the first consensus digest with the right ID which converts to // the right kind of consensus log. - // header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) for _, log := range header.Digest().Logs { logVDT := runtime.DigestItemTryTo[grandpa.ConsensusLogVDT[N]](log, id) if logVDT == nil { @@ -308,13 +212,8 @@ type ForcedChange[H runtime.Hash, N runtime.Number] struct { grandpa.ScheduledChange[N] } -// / Checks the given header for a consensus digest signalling a **forced** scheduled change and -// / extracts it. -// pub fn find_forced_change( -// -// header: &B::Header, -// -// ) -> Option<(NumberFor, ScheduledChange>)> { +// Checks the given header for a consensus digest signalling a **forced** scheduled change and +// extracts it. func FindForcedChange[ H runtime.Hash, N runtime.Number, @@ -322,13 +221,8 @@ func FindForcedChange[ ]( header Header, ) *ForcedChange[H, N] { - // let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) - // let filter_log = |log: ConsensusLog>| match log { - // ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), - // _ => None, - // }; filterLog := func(log grandpa.ConsensusLog) *ForcedChange[H, N] { forcedChange, ok := log.(grandpa.ConsensusLogForcedChange[N]) if !ok { @@ -340,9 +234,8 @@ func FindForcedChange[ } } - // // find the first consensus digest with the right ID which converts to - // // the right kind of consensus log. - // header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. for _, log := range header.Digest().Logs { logVDT := runtime.DigestItemTryTo[grandpa.ConsensusLogVDT[N]](log, id) if logVDT == nil { @@ -361,30 +254,11 @@ func FindForcedChange[ return nil } -// impl GrandpaBlockImport -// where -// -// NumberFor: finality_grandpa::BlockNumberOps, -// BE: Backend, -// Client: ClientForGrandpa, -// Client::Api: GrandpaApi, -// for<'a> &'a Client: BlockImport, -// -// { -// // check for a new authority set change. -// fn check_new_change( -// &self, -// header: &Block::Header, -// hash: Block::Hash, -// ) -> Option>> { func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkNewChange( header Header, hash H, ) *PendingChange[H, N] { // check for forced authority set hard forks - // if let Some(change) = self.authority_set_hard_forks.lock().get(&hash) { - // return Some(change.clone()) - // } gbi.authoritySetHardForksMtx.Lock() change, ok := gbi.authoritySetHardForks[hash] gbi.authoritySetHardForksMtx.Unlock() @@ -393,15 +267,6 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkNewChange( } // check for forced change. - // if let Some((median_last_finalized, change)) = find_forced_change::(header) { - // return Some(PendingChange { - // next_authorities: change.next_authorities, - // delay: change.delay, - // canon_height: *header.number(), - // canon_hash: hash, - // delay_kind: DelayKind::Best { median_last_finalized }, - // }) - // } fc := FindForcedChange[H, N](header) if fc != nil { return &PendingChange[H, N]{ @@ -414,14 +279,6 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkNewChange( } // check normal scheduled change. - // let change = find_scheduled_change::(header)?; - // Some(PendingChange { - // next_authorities: change.next_authorities, - // delay: change.delay, - // canon_height: *header.number(), - // canon_hash: hash, - // delay_kind: DelayKind::Finalized, - // }) scheduled := FindScheduledChange[H, N](header) return &PendingChange[H, N]{ NextAuthorities: scheduled.NextAuthorities, @@ -432,19 +289,11 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkNewChange( } } -// struct InnerGuard<'a, H, N> { -// old: Option>, -// guard: Option>>, -// } type innerGuard[H runtime.Hash, N runtime.Number] struct { old *AuthoritySet[H, N] guard *shareddata.SharedDataLocked[AuthoritySet[H, N]] } -// impl<'a, H, N> InnerGuard<'a, H, N> { -// fn as_mut(&mut self) -> &mut AuthoritySet { -// self.guard.as_mut().expect("only taken on deconstruction; qed") -// } func (ig *innerGuard[H, N]) asMut() *AuthoritySet[H, N] { if ig.guard == nil { panic("guard is nil; only taken on deconstruction; qed") @@ -452,12 +301,6 @@ func (ig *innerGuard[H, N]) asMut() *AuthoritySet[H, N] { return ig.guard.MutRef() } -// fn set_old(&mut self, old: AuthoritySet) { -// if self.old.is_none() { -// // ignore "newer" old changes. -// self.old = Some(old); -// } -// } func (ig *innerGuard[H, N]) setOld(old AuthoritySet[H, N]) { if ig.old == nil { // ignore "newer" old changes. @@ -465,14 +308,6 @@ func (ig *innerGuard[H, N]) setOld(old AuthoritySet[H, N]) { } } -// fn consume( -// mut self, -// ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { -// self.old -// .take() -// .map(|old| (old, self.guard.take().expect("only taken on deconstruction; qed"))) -// } -// } type consumed[H runtime.Hash, N runtime.Number] struct { old AuthoritySet[H, N] shareddata.SharedDataLocked[AuthoritySet[H, N]] @@ -493,17 +328,7 @@ func (ig *innerGuard[H, N]) consume() *consumed[H, N] { } } -// impl<'a, H, N> Drop for InnerGuard<'a, H, N> { -// fn drop(&mut self) { -// if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { -// *guard = old; -// } -// } -// } func (ig *innerGuard[H, N]) drop() { - // if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { - // *guard = old; - // } guard := ig.guard ig.guard = nil old := ig.old @@ -513,14 +338,6 @@ func (ig *innerGuard[H, N]) drop() { } } -// fn make_authorities_changes( -// -// &self, -// block: &mut BlockImportParams, -// hash: Block::Hash, -// initial_sync: bool, -// -// ) -> Result, ConsensusError> { func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( block *client_common.BlockImportParams[H, N, E, Header], hash H, @@ -530,44 +347,26 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( // until the block is written to prevent a race if we need to restore // the old authority set on error or panic. - // let number = *(block.header.number()); - // let maybe_change = self.check_new_change(&block.header, hash); number := block.Header.Number() maybeChange := gbi.checkNewChange(block.Header, hash) // returns a function for checking whether a block is a descendent of another // consistent with querying client directly after importing the block. - // let parent_hash = *block.header.parent_hash(); - // let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); parentHash := block.Header.ParentHash() isDescendentOf := utils.IsDescendantOf(gbi.inner, &utils.HashParent[H]{Hash: hash, Parent: parentHash}) - // let mut guard = InnerGuard { guard: Some(self.authority_set.inner_locked()), old: None }; locked := gbi.authoritySet.inner.Locked() defer locked.Unlock() guard := innerGuard[H, N]{ old: nil, guard: &locked, } + defer guard.drop() // whether to pause the old authority set -- happens after import // of a forced change block. - // let mut do_pause = false; var doPause bool // add any pending changes. - // if let Some(change) = maybe_change { - // let old = guard.as_mut().clone(); - // guard.set_old(old); - - // if let DelayKind::Best { .. } = change.delay_kind { - // do_pause = true; - // } - - // guard - // .as_mut() - // .add_pending_change(change, &is_descendent_of) - // .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - // } if maybeChange != nil { change := *maybeChange old := guard.asMut().Clone() @@ -583,18 +382,6 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( } } - // let applied_changes = { - // let forced_change_set = guard - // .as_mut() - // .apply_forced_changes( - // hash, - // number, - // &is_descendent_of, - // initial_sync, - // self.telemetry.clone(), - // ) - // .map_err(|e| ConsensusError::ClientImport(e.to_string())) - // .map_err(ConsensusError::from)?; var appliedChanges importAppliedChanges forcedChangeSet, err := guard.asMut().applyForcedChanges( hash, @@ -605,26 +392,15 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( return nil, err } - // if let Some((median_last_finalized_number, new_set)) = forced_change_set { if forcedChangeSet != nil { medianLastFinalizedNumber := forcedChangeSet.median newSet := forcedChangeSet.set - // let new_authorities = { - // let (set_id, new_authorities) = new_set.current(); setID, newAuthorities := newSet.current() // we will use the median last finalized number as a hint // for the canon block the new authority set should start // with. we use the minimum between the median and the local // best finalized block. - // let best_finalized_number = self.inner.info().finalized_number; - // let canon_number = best_finalized_number.min(median_last_finalized_number); - // let canon_hash = self.inner.hash(canon_number) - // .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - // .expect( - // "the given block number is less or equal than the current best finalized number; \ - // current best finalized number must exist in chain; qed." - // ); bestFinalizedNumber := gbi.inner.Info().FinalizedNumber canonNumber := bestFinalizedNumber if medianLastFinalizedNumber < bestFinalizedNumber { @@ -635,18 +411,10 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( return nil, err } if canonHash == nil { - panic("the given block number is less or equal than the current best finalized number; current best finalized number must exist in chain; qed.") + panic("the given block number is less or equal than the current best finalized number; " + + "current best finalized number must exist in chain; qed.") } - // NewAuthoritySet { - // canon_number, - // canon_hash, - // set_id, - // authorities: new_authorities.to_vec(), - // } - // }; - // let old = ::std::mem::replace(guard.as_mut(), new_set); - // guard.set_old(old); newAuthoritySet := newAuthoritySet[H, N]{ CanonNumber: canonNumber, CanonHash: *canonHash, @@ -657,21 +425,8 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( guard.setOld(old) *guard.asMut() = newSet - // AppliedChanges::Forced(new_authorities) appliedChanges = importAppliedChangesForced[H, N](newAuthoritySet) } else { - // let did_standard = guard - // .as_mut() - // .enacts_standard_change(hash, number, &is_descendent_of) - // .map_err(|e| ConsensusError::ClientImport(e.to_string())) - // .map_err(ConsensusError::from)?; - - // if let Some(root) = did_standard { - // AppliedChanges::Standard(root) - // } else { - // AppliedChanges::None - // } - // } didStandard, err := guard.asMut().EnactsStandardChange(hash, number, isDescendentOf) if err != nil { return nil, err @@ -685,16 +440,9 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( } // consume the guard safely and write necessary changes. - // let just_in_case = guard.consume(); justInCaseConsumed := guard.consume() - // if let Some((_, ref authorities)) = just_in_case { if justInCaseConsumed != nil { authorities := &justInCaseConsumed.SharedDataLocked - // let authorities_change = match applied_changes { - // AppliedChanges::Forced(ref new) => Some(new), - // AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. - // AppliedChanges::None => None, - // }; var authoritiesChange *newAuthoritySet[H, N] switch appliedChanges := appliedChanges.(type) { case importAppliedChangesForced[H, N]: @@ -707,16 +455,7 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( default: panic("unreachable") } - // crate::aux_schema::update_authority_set::( - // authorities, - // authorities_change, - // |insert| { - // block - // .auxiliary - // .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - // }, - // ); - updateAuthoritySet(authorities.Data(), authoritiesChange, func(insertions []api.KeyValue) error { + err := updateAuthoritySet(authorities.Data(), authoritiesChange, func(insertions []api.KeyValue) error { converted := make([]api.AuxDataOperation, len(insertions)) for i, kv := range insertions { converted[i] = api.AuxDataOperation{ @@ -727,9 +466,11 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( block.Auxiliary = append(block.Auxiliary, converted...) return nil }) + if err != nil { + return nil, err + } } - // let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); var jic *justInCase[H, N] if justInCaseConsumed != nil { jic = &justInCase[H, N]{ @@ -738,7 +479,6 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( } } - // Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) return &pendingSetChanges[H, N, Hasher, Header]{ justInCase: jic, appliedChanges: appliedChanges, @@ -746,15 +486,8 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( }, nil } -// /// Read current set id form a given state. -// fn current_set_id(&self, hash: Block::Hash) -> Result { +// Read current set id form a given state. func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (grandpa.SetID, error) { - // let runtime_version = self.inner.runtime_api().version(hash).map_err(|e| { - // ConsensusError::ClientImport(format!( - // "Unable to retrieve current runtime version. {}", - // e - // )) - // })?; runtimeVersion, err := gbi.inner.RuntimeAPI().Version(hash) if err != nil { return 0, err @@ -763,31 +496,14 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (gr var GrandpaID = [8]byte{237, 153, 197, 172, 178, 94, 237, 245} apiVersion := runtimeVersion.APIVersion(GrandpaID) - // if runtime_version - // .api_version(&>::ID) - // .map_or(false, |v| v < 3) - // { if apiVersion != nil && *apiVersion < 3 { // The new API is not supported in this runtime. Try reading directly from storage. // This code may be removed once warp sync to an old runtime is no longer needed. - // for prefix in ["GrandpaFinality", "Grandpa"] { for _, prefix := range []string{"GrandpaFinality", "Grandpa"} { - // let k = [ - // sp_crypto_hashing::twox_128(prefix.as_bytes()), - // sp_crypto_hashing::twox_128(b"CurrentSetId"), - // ] - // .concat(); k0 := hashing.Twox128([]byte(prefix)) k1 := hashing.Twox128([]byte("CurrentSetId")) k := k0[:] k = append(k, k1[:]...) - // if let Ok(Some(id)) = - // self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) - // { - // if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { - // return Ok(id) - // } - // } id, _ := gbi.inner.Storage(hash, storage.StorageKey(k)) if id != nil { var setID grandpa.SetID @@ -797,13 +513,8 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (gr } } } - // Err(ConsensusError::ClientImport("Unable to retrieve current set id.".into())) return 0, fmt.Errorf("unable to retrieve current set id") } else { - // self.inner - // .runtime_api() - // .current_set_id(hash) - // .map_err(|e| ConsensusError::ClientImport(e.to_string())) setID, err := gbi.inner.RuntimeAPI().CurrentSetID(hash) if err != nil { return 0, err @@ -812,61 +523,34 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (gr } } -// /// Import whole new state and reset authority set. -// async fn import_state( -// -// &self, -// mut block: BlockImportParams, -// -// ) -> Result { +// Import whole new state and reset authority set. func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( block *client_common.BlockImportParams[H, N, E, Header], ) (client_common.ImportResult, error) { - // let hash = block.post_hash(); - // let number = *block.header.number(); hash := block.GetPostHash() number := block.Header.Number() // Force imported state finality. - // block.finalized = true; - // let import_result = (&*self.inner).import_block(block).await; block.Finalized = true importResult, err := gbi.inner.ImportBlock(block) - // match import_result { if err == nil { - // Ok(ImportResult::Imported(aux)) => { switch importResult := importResult.(type) { case client_common.ImportResultImported: aux := client_common.ImportedAux(importResult) // We've just imported a new state. We trust the sync module has verified // finality proofs and that the state is correct and final. // So we can read the authority list and set id from the state. - // self.authority_set_hard_forks.lock().clear(); gbi.authoritySetHardForksMtx.Lock() gbi.authoritySetHardForks = make(map[H]PendingChange[H, N]) gbi.authoritySetHardForksMtx.Unlock() - // let authorities = self - // .inner - // .runtime_api() - // .grandpa_authorities(hash) - // .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; authorities, err := gbi.inner.RuntimeAPI().GrandpaAuthorities(hash) if err != nil { return nil, err } - // let set_id = self.current_set_id(hash)?; setID, err := gbi.currentSetID(hash) if err != nil { return nil, err } - // let authority_set = AuthoritySet::new( - // authorities.clone(), - // set_id, - // fork_tree::ForkTree::new(), - // Vec::new(), - // AuthoritySetChanges::empty(), - // ) - // .ok_or_else(|| ConsensusError::ClientImport("Invalid authority list".into()))?; authoritySet, err := NewAuthoritySet[H, N]( authorities, uint64(setID), @@ -878,17 +562,10 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( return nil, err } - // *self.authority_set.inner_locked() = authority_set.clone(); locked := gbi.authoritySet.inner.Locked() *locked.MutRef() = authoritySet.Clone() defer locked.Unlock() - // crate::aux_schema::update_authority_set::( - // &authority_set, - // None, - // |insert| self.inner.insert_aux(insert, []), - // ) - // .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; err = updateAuthoritySet( locked.Data(), nil, @@ -899,21 +576,14 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( if err != nil { return nil, err } - // let new_set = - // NewAuthoritySet { canon_number: number, canon_hash: hash, set_id, authorities }; - // let _ = self - // .send_voter_commands - // .unbounded_send(VoterCommand::ChangeAuthorities(new_set)); - // Ok(ImportResult::Imported(aux)) newSet := newAuthoritySet[H, N]{ CanonNumber: number, CanonHash: hash, - SetID: grandpa.SetID(setID), + SetID: setID, Authorities: authorities, } gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newSet) return client_common.ImportResultImported(aux), nil - // }, case client_common.ImportResultAlreadyInChain, client_common.ImportResultKnownBad, client_common.ImportResultMissingState, @@ -927,47 +597,16 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( } else { return nil, err } - // Err(e) => Err(ConsensusError::ClientImport(e.to_string())), - // } - // } } -// impl BlockImport for GrandpaBlockImport -// where -// NumberFor: finality_grandpa::BlockNumberOps, -// BE: Backend, -// Client: ClientForGrandpa, -// Client::Api: GrandpaApi, -// for<'a> &'a Client: BlockImport, -// SC: Send + Sync, -// { -// type Error = ConsensusError; - -// async fn import_block( -// -// &self, -// mut block: BlockImportParams, -// -// ) -> Result { -func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) ImportBlock( block *client_common.BlockImportParams[H, N, E, Header], ) (client_common.ImportResult, error) { - // let hash = block.post_hash(); - // let number = *block.header.number(); hash := block.GetPostHash() number := block.Header.Number() // early exit if block already in chain, otherwise the check for // authority changes will error when trying to re-import a change block - // match self.inner.status(hash) { - // Ok(BlockStatus::InChain) => { - // // Strip justifications when re-importing an existing block. - // let _justifications = block.justifications.take(); - // return (&*self.inner).import_block(block).await - // }, - // Ok(BlockStatus::Unknown) => {}, - // Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - // } status, err := gbi.inner.Status(hash) if err != nil { return nil, err @@ -978,36 +617,12 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( return gbi.inner.ImportBlock(block) } - // if block.with_state() { - // return self.import_state(block).await - // } if block.WithState() { return gbi.importState(block) } - // if number <= self.inner.info().finalized_number { if number <= gbi.inner.Info().FinalizedNumber { // Importing an old block. Just save justifications and authority set changes - // if self.check_new_change(&block.header, hash).is_some() { - // if block.justifications.is_none() { - // return Err(ConsensusError::ClientImport( - // "Justification required when importing \ - // an old block with authority set change." - // .into(), - // )) - // } - // let mut authority_set = self.authority_set.inner_locked(); - // authority_set.authority_set_changes.insert(number); - // crate::aux_schema::update_authority_set::( - // &authority_set, - // None, - // |insert| { - // block - // .auxiliary - // .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) - // }, - // ); - // } if gbi.checkNewChange(block.Header, hash) != nil { if block.Justifications == nil { return nil, fmt.Errorf("justification required when importing an old block with authority set change") @@ -1034,49 +649,23 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( return nil, err } } - // return (&*self.inner).import_block(block).await return gbi.inner.ImportBlock(block) } // on initial sync we will restrict logging under info to avoid spam. - // let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; initialSync := block.Origin == common.NetworkInitialSyncBlockOrigin - // let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; pendingChanges, err := gbi.makeAuthoritiesChanges(block, hash, initialSync) if err != nil { return nil, err } defer pendingChanges.drop() - // we don't want to finalize on `inner.import_block` - // let mut justifications = block.justifications.take(); - // let import_result = (&*self.inner).import_block(block).await; + // we don't want to finalize on inner.ImportBlock justifications := block.Justifications block.Justifications = nil importResult, err := gbi.inner.ImportBlock(block) - // let mut imported_aux = { - // match import_result { - // Ok(ImportResult::Imported(aux)) => aux, - // Ok(r) => { - // debug!( - // target: LOG_TARGET, - // "Restoring old authority set after block import result: {:?}", r, - // ); - // pending_changes.revert(); - // return Ok(r) - // }, - // Err(e) => { - // debug!( - // target: LOG_TARGET, - // "Restoring old authority set after block import error: {}", e, - // ); - // pending_changes.revert(); - // return Err(ConsensusError::ClientImport(e.to_string())) - // }, - // } - // }; if err != nil { logger.Debugf("Restoring old authority set after block import error: %s", err) pendingChanges.revert() @@ -1092,25 +681,16 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( return importResult, nil } - // let (applied_changes, do_pause) = pending_changes.defuse(); appliedChanges, doPause := pendingChanges.defuse() // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. - // if do_pause { - // let _ = self.send_voter_commands.unbounded_send(VoterCommand::Pause( - // "Forced change scheduled after inactivity".to_string(), - // )); - // } if doPause { gbi.sendVoterCommands <- voterCommandPause("Forced change scheduled after inactivity") } - // let needs_justification = applied_changes.needs_justification(); needsJustification := appliedChanges.needsJustification() - // match applied_changes { switch appliedChanges := appliedChanges.(type) { - // AppliedChanges::Forced(new) => { case importAppliedChangesForced[H, N]: // NOTE: when we do a force change we are "discrediting" the old set so we // ignore any justifications from them. this block may contain a justification @@ -1123,56 +703,32 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( // they should import the block and discard the justification, and they will // then request a justification from sync if it's necessary (which they should // then be able to successfully validate). - // let _ = - // self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newAuthoritySet[H, N](appliedChanges)) // we must clear all pending justifications requests, presumably they won't be // finalized hence why this forced changes was triggered - // imported_aux.clear_justification_requests = true; importedAux.ClearJustificationRequests = true - // AppliedChanges::Standard(false) => { case importAppliedChangesStandard: // this is a standard change, we don't apply it yet, but we will send a // we can't apply this change yet since there are other dependent changes that we // need to apply first, drop any justification that might have been provided with // the block to make sure we request them from `sync` which will ensure they'll be // applied in-order. - // justifications.take(); justifications = nil default: } - // } - - // let grandpa_justification = - // justifications.and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); var grandpaJustification *runtime.EncodedJustification if justifications != nil { grandpaJustification = justifications.IntoJustification(grandpa.GrandpaEngineID) } - // match grandpa_justification { - // Some(justification) => { if grandpaJustification != nil { - // if environment::should_process_justification( - // &*self.inner, - // self.justification_import_period, - // number, - // needs_justification, - // ) { if shouldProcessJustification( gbi.inner, gbi.justificationImportPeriod, number, needsJustification, ) { - // let import_res = self.import_justification( - // hash, - // number, - // (GRANDPA_ENGINE_ID, justification), - // needs_justification, - // initial_sync, - // ); err := gbi.importJustification( hash, number, @@ -1184,90 +740,40 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importBlock( initialSync, ) - // import_res.unwrap_or_else(|err| { if err != nil { - // if needs_justification { - // debug!( - // target: LOG_TARGET, - // "Requesting justification from peers due to imported block #{} that enacts authority set change with invalid justification: {}", - // number, - // err - // ); - // imported_aux.bad_justification = true; - // imported_aux.needs_justification = true; - // } if needsJustification { - logger.Debugf("Requesting justification from peers due to imported block #%d that enacts authority set change with invalid justification: %s", number, err) + logger.Debugf( + ("Requesting justification from peers due to imported block #%d that enacts authority set" + + "change with invalid justification: %s"), number, err) importedAux.BadJustification = true importedAux.NeedsJustification = true } } - // }); } else { - // debug!( - // target: LOG_TARGET, - // "Ignoring unnecessary justification for block #{}", - // number, - // ); logger.Debugf("Ignoring unnecessary justification for block #%d", number) } } else { - // None => - // if needs_justification { - // debug!( - // target: LOG_TARGET, - // "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", - // number, - // ); - - // imported_aux.needs_justification = true; - // }, if needsJustification { - logger.Debugf("Imported unjustified block #%d that enacts authority set change, waiting for finality for enactment.", number) + logger.Debugf( + "Imported unjustified block #%d that enacts authority set change, waiting for finality for enactment.", + number) importedAux.NeedsJustification = true } } - // Ok(ImportResult::Imported(imported_aux)) return client_common.ImportResultImported(importedAux), nil } -// async fn check_block( -// -// &self, -// block: BlockCheckParams, -// -// ) -> Result { -// self.inner.check_block(block).await -// } -func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkBlock( +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) CheckBlock( block client_common.BlockCheckParams[H, N], ) (client_common.ImportResult, error) { return gbi.inner.CheckBlock(block) } -// } - -// impl GrandpaBlockImport -// where -// -// BE: Backend, -// Client: ClientForGrandpa, -// NumberFor: finality_grandpa::BlockNumberOps, +// Import a block justification and finalize the block. // -// { -// /// Import a block justification and finalize the block. -// /// -// /// If `enacts_change` is set to true, then finalizing this block *must* -// /// enact an authority set change, the function will panic otherwise. -// fn import_justification( -// &self, -// hash: Block::Hash, -// number: NumberFor, -// justification: Justification, -// enacts_change: bool, -// initial_sync: bool, -// ) -> Result<(), ConsensusError> { +// If enactsChange is set to true, then finalizing this block *must* +// enact an authority set change, the function will panic otherwise. func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importJustification( hash H, number N, @@ -1275,22 +781,15 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importJustification( enactsChange bool, initialSync bool, ) error { - // if justification.0 != GRANDPA_ENGINE_ID { if justification.ConsensusEngineID != grandpa.GrandpaEngineID { // TODO: the import queue needs to be refactored to be able dispatch to the correct - // `JustificationImport` instance based on `ConsensusEngineId`, or we need to build a - // justification import pipeline similar to what we do for `BlockImport`. In the + // JustificationImport instance based on ConsensusEngineID, or we need to build a + // justification import pipeline similar to what we do for BlockImport. In the // meantime we'll just drop the justification, since this is only used for BEEFY which // is still WIP. return nil } - // let justification = GrandpaJustification::decode_and_verify_finalizes( - // &justification.1, - // (hash, number), - // self.authority_set.set_id(), - // &self.authority_set.current_authorities(), - // ); just, err := DecodeGrandpaJustificationVerifyFinalizes[H, N, Hasher, Header]( justification.EncodedJustification, HashNumber[H, N]{Hash: hash, Number: number}, @@ -1298,25 +797,10 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importJustification( gbi.authoritySet.CurrentAuthorities(), ) - // let justification = match justification { - // Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - // Ok(justification) => justification, - // }; if err != nil { return err } - // let result = environment::finalize_block( - // self.inner.clone(), - // &self.authority_set, - // None, - // hash, - // number, - // justification.into(), - // initial_sync, - // Some(&self.justification_sender), - // self.telemetry.clone(), - // ); err = finalizeBlock( gbi.inner, gbi.authoritySet, @@ -1327,57 +811,25 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importJustification( initialSync, &gbi.justificationSender, ) - // match result { if err != nil { - // Err(CommandOrError::VoterCommand(command)) => { - // grandpa_log!( - // initial_sync, - // "👴 Imported justification for block #{} that triggers \ - // command {}, signaling voter.", - // number, - // command, - // ); - - // // send the command to the voter - // let _ = self.send_voter_commands.unbounded_send(command); - // }, _, ok := err.(voterCommand) if ok { l := logger.Infof if initialSync { l = logger.Debugf } - l("👴 Imported justification for block #%d that triggers command %s, signaling voter.", number, err) + l("👴 Imported justification for block #%d that triggers command %s, signalling voter.", number, err) // send the command to the voter gbi.sendVoterCommands <- err.(voterCommand) } else { - // Err(CommandOrError::Error(e)) => - // return Err(match e { - // Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), - // Error::Network(error) => ConsensusError::ClientImport(error), - // Error::Blockchain(error) => ConsensusError::ClientImport(error), - // Error::Client(error) => ConsensusError::ClientImport(error.to_string()), - // Error::Safety(error) => ConsensusError::ClientImport(error), - // Error::Signing(error) => ConsensusError::ClientImport(error), - // Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), - // Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), - // }), return err } } else { - // Ok(_) => { - // assert!( - // !enacts_change, - // "returns Ok when no authority set change should be enacted; qed;" - // ); - // }, if enactsChange { panic("returns Ok when no authority set change should be enacted; qed;") } } - // Ok(()) - // } return nil } diff --git a/internal/client/utils/notification/notification.go b/internal/client/utils/notification/notification.go index f5a377deb6..28c191261f 100644 --- a/internal/client/utils/notification/notification.go +++ b/internal/client/utils/notification/notification.go @@ -10,17 +10,9 @@ import "github.com/ChainSafe/gossamer/internal/client/utils/pubsub" // The [NotificationStream] entity stores the [pubsub.Hub] so it can be // used to add more subscriptions. type NotificationStream[Payload any] struct { - hub *pubsub.Hub[struct{}, func() (Payload, error), Payload, *registry[Payload]] //nolint: unused + hub *pubsub.Hub[struct{}, func() (Payload, error), Payload, *registry[Payload]] } -// impl NotificationStream { -// /// Creates a new pair of receiver and sender of `Payload` notifications. -// pub fn channel() -> (NotificationSender, Self) { -// let hub = Hub::new(TK::TRACING_KEY); -// let sender = NotificationSender { hub: hub.clone() }; -// let receiver = NotificationStream { hub, _pd: Default::default() }; -// (sender, receiver) -// } func NewNotificationStream[Payload any]() (NotificationSender[Payload], NotificationStream[Payload]) { hub := pubsub.NewHub("", ®istry[Payload]{}) sender := NotificationSender[Payload]{hub: hub} diff --git a/internal/primitives/api/api.go b/internal/primitives/api/api.go index f498ebd70e..e9a798530f 100644 --- a/internal/primitives/api/api.go +++ b/internal/primitives/api/api.go @@ -73,17 +73,7 @@ type ApiExt[ ExecuteBlock(runtimeApiAtParam H, block runtime.Block[H, N, E, Header]) error } -// pub trait Core { type Core[H runtime.Hash] interface { - /// Returns the version of the runtime. - // fn version() -> RuntimeVersion; + // Returns the version of the runtime. Version(hash H) (version.RuntimeVersion, error) - // /// Execute the given block. - // fn execute_block(block: Block); - // /// Initialize a block with the given header. - // #[changed_in(5)] - // #[renamed("initialise_block", 2)] - // fn initialize_block(header: &::Header); - // /// Initialize a block with the given header and return the runtime executive mode. - // fn initialize_block(header: &::Header) -> ExtrinsicInclusionMode; } diff --git a/internal/primitives/consensus/grandpa/grandpa.go b/internal/primitives/consensus/grandpa/grandpa.go index 7618a21e77..97cd7c538e 100644 --- a/internal/primitives/consensus/grandpa/grandpa.go +++ b/internal/primitives/consensus/grandpa/grandpa.go @@ -99,71 +99,55 @@ type GrandpaJustification[H runtime.Hash, N runtime.Number, Header runtime.Heade VoteAncestries []Header } -// / An consensus log item for GRANDPA. -// #[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] -// #[cfg_attr(feature = "serde", derive(Serialize))] -// pub enum ConsensusLog { +// A consensus log item for GRANDPA. type ConsensusLog interface { isConsensusLog() } -// / Schedule an authority set change. -// / -// / The earliest digest of this type in a single block will be respected, -// / provided that there is no `ForcedChange` digest. If there is, then the -// / `ForcedChange` will take precedence. -// / -// / No change should be scheduled if one is already and the delay has not -// / passed completely. -// / -// / This should be a pure function: i.e. as long as the runtime can interpret -// / the digest type it should return the same result regardless of the current -// / state. -// #[codec(index = 1)] -// ScheduledChange(ScheduledChange), +// Schedule an authority set change. +// +// The earliest digest of this type in a single block will be respected, +// provided that there is no ForcedChange digest. If there is, then the +// ForcedChange will take precedence. +// +// No change should be scheduled if one is already and the delay has not +// passed completely. +// +// This should be a pure function: i.e. as long as the runtime can interpret +// the digest type it should return the same result regardless of the current +// state. type ConensusLogScheduledChange[N runtime.Number] ScheduledChange[N] -// / Force an authority set change. -// / -// / Forced changes are applied after a delay of _imported_ blocks, -// / while pending changes are applied after a delay of _finalized_ blocks. -// / -// / The earliest digest of this type in a single block will be respected, -// / with others ignored. -// / -// / No change should be scheduled if one is already and the delay has not -// / passed completely. -// / -// / This should be a pure function: i.e. as long as the runtime can interpret -// / the digest type it should return the same result regardless of the current -// / state. +// Force an authority set change. +// +// Forced changes are applied after a delay of _imported_ blocks, +// while pending changes are applied after a delay of _finalized_ blocks. // -// #[codec(index = 2)] -// ForcedChange(N, ScheduledChange), +// The earliest digest of this type in a single block will be respected, +// with others ignored. +// +// No change should be scheduled if one is already and the delay has not +// passed completely. +// +// This should be a pure function: i.e. as long as the runtime can interpret +// the digest type it should return the same result regardless of the current +// state. type ConsensusLogForcedChange[N runtime.Number] struct { Median N ScheduledChange[N] } -// / Note that the authority with given index is disabled until the next change. -// -// #[codec(index = 3)] -// OnDisabled(AuthorityIndex), +// Note that the authority with given index is disabled until the next change. type ConsensusLogOnDisabled AuthorityIndex -// / A signal to pause the current authority set after the given delay. -// / After finalizing the block at _delay_ the authorities should stop voting. -// #[codec(index = 4)] -// Pause(N), +// A signal to pause the current authority set after the given delay. +// After finalizing the block at _delay_ the authorities should stop voting. type ConsensusLogPause[N runtime.Number] struct { Delay N } -// / A signal to resume the current authority set after the given delay. -// / After authoring the block at _delay_ the authorities should resume voting. -// -// #[codec(index = 5)] -// Resume(N), +// A signal to resume the current authority set after the given delay. +// After authoring the block at _delay_ the authorities should resume voting. type ConsensusLogResume[N runtime.Number] struct { Delay N } @@ -237,7 +221,7 @@ func (mvdt ConsensusLogVDT[N]) ValueAt(index uint) (value any, err error) { return nil, scale.ErrUnknownVaryingDataTypeValue } -// EquiovcationProof is proof of voter misbehavior on a given set id. Misbehavior/equivocation in GRANDPA happens when +// EquivocationProof is proof of voter misbehavior on a given set id. Misbehavior/equivocation in GRANDPA happens when // a voter votes on the same round (either at prevote or precommit stage) for different blocks. Proving is achieved by // collecting the signed messages of conflicting votes. type EquivocationProof[H runtime.Hash, N runtime.Number] struct { @@ -386,6 +370,5 @@ type GrandpaAPI[H runtime.Hash, N runtime.Number] interface { ) *OpaqueKeyOwnershipProof /// Get current GRANDPA authority set id. - // fn current_set_id() -> SetId; CurrentSetID(hash H) (SetID, error) } diff --git a/internal/primitives/crypto/hashing/hashing.go b/internal/primitives/crypto/hashing/hashing.go index 47cee391f7..94f6aef1a9 100644 --- a/internal/primitives/crypto/hashing/hashing.go +++ b/internal/primitives/crypto/hashing/hashing.go @@ -41,7 +41,7 @@ func Keccak256(data []byte) [32]byte { return buf } -// / Do a XX 128-bit hash and return result. +// Do a XX 128-bit hash and return result. func Twox128(data []byte) [16]byte { // compute xxHash64 twice with seeds 0 and 1 applied on given byte array h0 := xxhash.NewS64(0) // create xxHash with 0 seed diff --git a/internal/primitives/runtime/runtime.go b/internal/primitives/runtime/runtime.go index 29e6763d22..bf2e69025b 100644 --- a/internal/primitives/runtime/runtime.go +++ b/internal/primitives/runtime/runtime.go @@ -47,12 +47,8 @@ func (j Justifications) Get(engineID ConsensusEngineID) *EncodedJustification { return nil } -// / Return a copy of the encoded justification for the given consensus -// / engine, if it exists. -// -// pub fn into_justification(self, engine_id: ConsensusEngineId) -> Option { -// self.into_iter().find(|j| j.0 == engine_id).map(|j| j.1) -// } +// Return a copy of the encoded justification for the given consensus +// engine, if it exists. func (j Justifications) IntoJustification(enginedID ConsensusEngineID) *EncodedJustification { for _, justification := range j { if justification.ConsensusEngineID == enginedID { diff --git a/internal/primitives/version/version.go b/internal/primitives/version/version.go index 2ee5684473..321594d63f 100644 --- a/internal/primitives/version/version.go +++ b/internal/primitives/version/version.go @@ -90,11 +90,7 @@ func (v *RuntimeVersion) StateVersion() storage.StateVersion { return storage.StateVersion(stateVersion) } -// / Returns the api version found for api with `id`. -// -// pub fn api_version(&self, id: &ApiId) -> Option { -// self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) -// } +// Returns the api version found for api with id. func (v *RuntimeVersion) APIVersion(id ApiID) *uint32 { for _, api := range v.APIs { if api.ApiID == id { From 0745d6010486715b1cb49e37bea6e2f53805e0be Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Mon, 9 Jun 2025 14:58:43 -0400 Subject: [PATCH 3/6] fix lint --- internal/client/consensus/grandpa/authorities.go | 10 ++++++++-- internal/client/consensus/grandpa/authorities_test.go | 9 +++++++-- internal/client/consensus/grandpa/import.go | 2 +- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/internal/client/consensus/grandpa/authorities.go b/internal/client/consensus/grandpa/authorities.go index 6baa1d552d..55532e950b 100644 --- a/internal/client/consensus/grandpa/authorities.go +++ b/internal/client/consensus/grandpa/authorities.go @@ -144,10 +144,11 @@ func (sas *SharedAuthoritySet[H, N]) applyForcedChanges( //nolint:unused bestHash H, bestNumber N, isDescendentOf IsDescendentOf[H], + initialSync bool, // TODO: telemtry, ) (newSet *appliedChanges[H, N], err error) { authSet := sas.inner.Data() - return authSet.applyForcedChanges(bestHash, bestNumber, isDescendentOf) + return authSet.applyForcedChanges(bestHash, bestNumber, isDescendentOf, initialSync) } // applyStandardChanges will apply or prune any pending transitions based on a finality trigger. This method ensures @@ -505,6 +506,7 @@ func (authSet *AuthoritySet[H, N]) currentLimit(min N) (limit *N) { func (authSet *AuthoritySet[H, N]) applyForcedChanges(bestHash H, //skipcq: RVV-B0001 bestNumber N, isDescendentOf IsDescendentOf[H], + initialSync bool, // TODO: telemetry ) (newSet *appliedChanges[H, N], err error) { @@ -541,7 +543,11 @@ func (authSet *AuthoritySet[H, N]) applyForcedChanges(bestHash H, //skipcq: RVV } // apply this change: make the set canonical - logger.Infof("👴 Applying authority set change forced at block #%d", change.CanonHeight) + l := logger.Infof + if initialSync { + l = logger.Debugf + } + l("👴 Applying authority set change forced at block #%d", change.CanonHeight) // TODO telemetry diff --git a/internal/client/consensus/grandpa/authorities_test.go b/internal/client/consensus/grandpa/authorities_test.go index b54cd142e8..34f543c288 100644 --- a/internal/client/consensus/grandpa/authorities_test.go +++ b/internal/client/consensus/grandpa/authorities_test.go @@ -577,12 +577,13 @@ func TestForceChanges(t *testing.T) { "hash_a10", 10, staticIsDescendentOf[string](true), + false, ) require.NoError(t, err) require.Nil(t, resForced) // too late - resForced, err = authorities.applyForcedChanges("hash_a16", 16, isDescOfA) + resForced, err = authorities.applyForcedChanges("hash_a16", 16, isDescOfA, false) require.NoError(t, err) require.Nil(t, resForced) @@ -602,7 +603,7 @@ func TestForceChanges(t *testing.T) { }, }, } - resForced, err = authorities.applyForcedChanges("hash_a15", 15, isDescOfA) + resForced, err = authorities.applyForcedChanges("hash_a15", 15, isDescOfA, false) require.NoError(t, err) require.NotNil(t, resForced) require.Equal(t, exp, *resForced) @@ -644,6 +645,7 @@ func TestForceChangesWithNoDelay(t *testing.T) { hashA, 5, staticIsDescendentOf[string](false), + false, ) require.NoError(t, err) require.NotNil(t, resForced) @@ -723,6 +725,7 @@ func TestForceChangesBlockedByStandardChanges(t *testing.T) { "hash_d45", 45, staticIsDescendentOf[string](true), + false, ) require.ErrorIs(t, err, errForcedAuthoritySetChangeDependencyUnsatisfied) require.Equal(t, 0, len(authorities.AuthoritySetChanges)) @@ -748,6 +751,7 @@ func TestForceChangesBlockedByStandardChanges(t *testing.T) { "hash_d45", 45, staticIsDescendentOf[string](true), + false, ) require.ErrorIs(t, err, errForcedAuthoritySetChangeDependencyUnsatisfied) require.Equal(t, expChanges, authorities.AuthoritySetChanges) @@ -787,6 +791,7 @@ func TestForceChangesBlockedByStandardChanges(t *testing.T) { hashD, 45, staticIsDescendentOf[string](true), + false, ) require.NoError(t, err) require.NotNil(t, resForced) diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go index d5eb68be72..b5a6ea1693 100644 --- a/internal/client/consensus/grandpa/import.go +++ b/internal/client/consensus/grandpa/import.go @@ -346,7 +346,6 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( // when we update the authorities, we need to hold the lock // until the block is written to prevent a race if we need to restore // the old authority set on error or panic. - number := block.Header.Number() maybeChange := gbi.checkNewChange(block.Header, hash) @@ -387,6 +386,7 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( hash, number, isDescendentOf, + initialSync, ) if err != nil { return nil, err From 7db9546d3381ad1358ef3b20102f0a9cb60b3333 Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Mon, 9 Jun 2025 15:05:27 -0400 Subject: [PATCH 4/6] add license --- internal/client/consensus/grandpa/import.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go index b5a6ea1693..40607fc982 100644 --- a/internal/client/consensus/grandpa/import.go +++ b/internal/client/consensus/grandpa/import.go @@ -1,3 +1,6 @@ +// Copyright 2025 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + package grandpa import ( From 60b5d787afab2115eda56166dc4b93b6539892c3 Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Thu, 19 Jun 2025 13:53:41 -0400 Subject: [PATCH 5/6] revise StorageProvider.Storage --- internal/client/api/backend.go | 2 +- internal/client/consensus/grandpa/import.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/client/api/backend.go b/internal/client/api/backend.go index 309ea7f9f1..2823408c73 100644 --- a/internal/client/api/backend.go +++ b/internal/client/api/backend.go @@ -257,7 +257,7 @@ type AuxStore interface { // StorageProvider provides access to storage primitives type StorageProvider[H runtime.Hash, N runtime.Number, Hasher runtime.Hasher[H]] interface { // Given a block hash and a key, return the value under the key in that block. - Storage(hash H, key storage.StorageKey) (*storage.StorageData, error) + Storage(hash H, key storage.StorageKey) (storage.StorageData, error) } // Backend is the client backend. diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go index 40607fc982..66a728a540 100644 --- a/internal/client/consensus/grandpa/import.go +++ b/internal/client/consensus/grandpa/import.go @@ -510,7 +510,7 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (gr id, _ := gbi.inner.Storage(hash, storage.StorageKey(k)) if id != nil { var setID grandpa.SetID - err := scale.Unmarshal(*id, &setID) + err := scale.Unmarshal(id, &setID) if err == nil { return setID, nil } From f9fc433ad5acd003c440748ebf98ede4b77836a5 Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Thu, 19 Jun 2025 18:33:05 -0400 Subject: [PATCH 6/6] cr feedback --- internal/client/consensus/grandpa/grandpa.go | 8 +- internal/client/consensus/grandpa/import.go | 127 ++++++++---------- .../primitives/consensus/grandpa/grandpa.go | 18 +-- internal/primitives/runtime/runtime.go | 3 +- 4 files changed, 70 insertions(+), 86 deletions(-) diff --git a/internal/client/consensus/grandpa/grandpa.go b/internal/client/consensus/grandpa/grandpa.go index bd0246d16d..4c592b3546 100644 --- a/internal/client/consensus/grandpa/grandpa.go +++ b/internal/client/consensus/grandpa/grandpa.go @@ -237,13 +237,7 @@ func blockImportWithAuthoritySetHardForks[ chainInfo := client.Info() genesisHash := chainInfo.GenesisHash - persistentData, err := loadPersistent[H, N](client, genesisHash, 0, func() (primitives.AuthorityList, error) { - authorities, err := genesisAuthoritySetProvider.Get() - if err != nil { - return nil, err - } - return authorities, nil - }) + persistentData, err := loadPersistent[H, N](client, genesisHash, 0, genesisAuthoritySetProvider.Get) if err != nil { return nil, LinkHalf[H, N, Hasher, Header, E]{}, err } diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go index 66a728a540..3846784d7c 100644 --- a/internal/client/consensus/grandpa/import.go +++ b/internal/client/consensus/grandpa/import.go @@ -129,9 +129,7 @@ func newGrandpaBlockImport[ setID := hardFork.SetID if setID == SetID(sharedAuthoritySet.SetID()) { authoritySet, unlock := sharedAuthoritySet.inner.DataMut() - // authoritySet.mtx.Lock() authoritySet.CurrentAuthorities = hardFork.PendingChange.NextAuthorities - // authoritySet.mtx.Unlock() unlock() } } @@ -183,7 +181,7 @@ func FindScheduledChange[ id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) filterLog := func(log grandpa.ConsensusLog) *grandpa.ScheduledChange[N] { - scheduledChange, ok := log.(grandpa.ConensusLogScheduledChange[N]) + scheduledChange, ok := log.(grandpa.ConsensusLogScheduledChange[N]) if !ok { return nil } @@ -405,10 +403,7 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( // with. we use the minimum between the median and the local // best finalized block. bestFinalizedNumber := gbi.inner.Info().FinalizedNumber - canonNumber := bestFinalizedNumber - if medianLastFinalizedNumber < bestFinalizedNumber { - canonNumber = medianLastFinalizedNumber - } + canonNumber := min(medianLastFinalizedNumber, bestFinalizedNumber) canonHash, err := gbi.inner.Hash(canonNumber) if err != nil { return nil, err @@ -536,69 +531,66 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( // Force imported state finality. block.Finalized = true importResult, err := gbi.inner.ImportBlock(block) - if err == nil { - switch importResult := importResult.(type) { - case client_common.ImportResultImported: - aux := client_common.ImportedAux(importResult) - // We've just imported a new state. We trust the sync module has verified - // finality proofs and that the state is correct and final. - // So we can read the authority list and set id from the state. - gbi.authoritySetHardForksMtx.Lock() - gbi.authoritySetHardForks = make(map[H]PendingChange[H, N]) - gbi.authoritySetHardForksMtx.Unlock() - authorities, err := gbi.inner.RuntimeAPI().GrandpaAuthorities(hash) - if err != nil { - return nil, err - } - setID, err := gbi.currentSetID(hash) - if err != nil { - return nil, err - } - authoritySet, err := NewAuthoritySet[H, N]( - authorities, - uint64(setID), - forktree.NewForkTree[H, N, PendingChange[H, N]](), - []PendingChange[H, N]{}, - AuthoritySetChanges[N]{}, - ) - if err != nil { - return nil, err - } + if err != nil { + return nil, err + } + switch importResult := importResult.(type) { + case client_common.ImportResultImported: + aux := client_common.ImportedAux(importResult) + // We've just imported a new state. We trust the sync module has verified + // finality proofs and that the state is correct and final. + // So we can read the authority list and set id from the state. + gbi.authoritySetHardForksMtx.Lock() + gbi.authoritySetHardForks = make(map[H]PendingChange[H, N]) + gbi.authoritySetHardForksMtx.Unlock() + authorities, err := gbi.inner.RuntimeAPI().GrandpaAuthorities(hash) + if err != nil { + return nil, err + } + setID, err := gbi.currentSetID(hash) + if err != nil { + return nil, err + } + authoritySet, err := NewAuthoritySet[H, N]( + authorities, + uint64(setID), + forktree.NewForkTree[H, N, PendingChange[H, N]](), + []PendingChange[H, N]{}, + AuthoritySetChanges[N]{}, + ) + if err != nil { + return nil, err + } - locked := gbi.authoritySet.inner.Locked() - *locked.MutRef() = authoritySet.Clone() - defer locked.Unlock() + locked := gbi.authoritySet.inner.Locked() + *locked.MutRef() = authoritySet.Clone() + defer locked.Unlock() - err = updateAuthoritySet( - locked.Data(), - nil, - func(insertions []api.KeyValue) error { - return gbi.inner.InsertAux(insertions, nil) - }, - ) - if err != nil { - return nil, err - } - newSet := newAuthoritySet[H, N]{ - CanonNumber: number, - CanonHash: hash, - SetID: setID, - Authorities: authorities, - } - gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newSet) - return client_common.ImportResultImported(aux), nil - case client_common.ImportResultAlreadyInChain, - client_common.ImportResultKnownBad, - client_common.ImportResultMissingState, - client_common.ImportResultUnknownParent: - // Ok(r) => Ok(r), - return importResult, nil - default: - panic("unreachable") + err = updateAuthoritySet( + locked.Data(), + nil, + func(insertions []api.KeyValue) error { + return gbi.inner.InsertAux(insertions, nil) + }, + ) + if err != nil { + return nil, err } - - } else { - return nil, err + newSet := newAuthoritySet[H, N]{ + CanonNumber: number, + CanonHash: hash, + SetID: setID, + Authorities: authorities, + } + gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newSet) + return client_common.ImportResultImported(aux), nil + case client_common.ImportResultAlreadyInChain, + client_common.ImportResultKnownBad, + client_common.ImportResultMissingState, + client_common.ImportResultUnknownParent: + return importResult, nil + default: + panic("unreachable") } } @@ -712,7 +704,6 @@ func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) ImportBlock( importedAux.ClearJustificationRequests = true case importAppliedChangesStandard: - // this is a standard change, we don't apply it yet, but we will send a // we can't apply this change yet since there are other dependent changes that we // need to apply first, drop any justification that might have been provided with // the block to make sure we request them from `sync` which will ensure they'll be diff --git a/internal/primitives/consensus/grandpa/grandpa.go b/internal/primitives/consensus/grandpa/grandpa.go index 97cd7c538e..5e0811f726 100644 --- a/internal/primitives/consensus/grandpa/grandpa.go +++ b/internal/primitives/consensus/grandpa/grandpa.go @@ -116,7 +116,7 @@ type ConsensusLog interface { // This should be a pure function: i.e. as long as the runtime can interpret // the digest type it should return the same result regardless of the current // state. -type ConensusLogScheduledChange[N runtime.Number] ScheduledChange[N] +type ConsensusLogScheduledChange[N runtime.Number] ScheduledChange[N] // Force an authority set change. // @@ -152,11 +152,11 @@ type ConsensusLogResume[N runtime.Number] struct { Delay N } -func (ConensusLogScheduledChange[N]) isConsensusLog() {} -func (ConsensusLogForcedChange[N]) isConsensusLog() {} -func (ConsensusLogOnDisabled) isConsensusLog() {} -func (ConsensusLogPause[N]) isConsensusLog() {} -func (ConsensusLogResume[N]) isConsensusLog() {} +func (ConsensusLogScheduledChange[N]) isConsensusLog() {} +func (ConsensusLogForcedChange[N]) isConsensusLog() {} +func (ConsensusLogOnDisabled) isConsensusLog() {} +func (ConsensusLogPause[N]) isConsensusLog() {} +func (ConsensusLogResume[N]) isConsensusLog() {} type ConsensusLogVDT[N runtime.Number] struct { inner ConsensusLog @@ -164,7 +164,7 @@ type ConsensusLogVDT[N runtime.Number] struct { func (mvdt *ConsensusLogVDT[N]) SetValue(value any) (err error) { switch value := value.(type) { - case ConensusLogScheduledChange[N]: + case ConsensusLogScheduledChange[N]: mvdt.inner = value return case ConsensusLogForcedChange[N]: @@ -186,7 +186,7 @@ func (mvdt *ConsensusLogVDT[N]) SetValue(value any) (err error) { func (mvdt ConsensusLogVDT[N]) IndexValue() (index uint, value any, err error) { switch mvdt.inner.(type) { - case ConensusLogScheduledChange[N]: + case ConsensusLogScheduledChange[N]: return 1, mvdt.inner, nil case ConsensusLogForcedChange[N]: return 2, mvdt.inner, nil @@ -208,7 +208,7 @@ func (mvdt ConsensusLogVDT[N]) Value() (value any, err error) { func (mvdt ConsensusLogVDT[N]) ValueAt(index uint) (value any, err error) { switch index { case 1: - return ConensusLogScheduledChange[N]{}, nil + return ConsensusLogScheduledChange[N]{}, nil case 2: return ConsensusLogForcedChange[N]{}, nil case 3: diff --git a/internal/primitives/runtime/runtime.go b/internal/primitives/runtime/runtime.go index bf2e69025b..0be19041bf 100644 --- a/internal/primitives/runtime/runtime.go +++ b/internal/primitives/runtime/runtime.go @@ -47,8 +47,7 @@ func (j Justifications) Get(engineID ConsensusEngineID) *EncodedJustification { return nil } -// Return a copy of the encoded justification for the given consensus -// engine, if it exists. +// IntoJustification returns the encoded justification for the given consensus engine, if it exists. func (j Justifications) IntoJustification(enginedID ConsensusEngineID) *EncodedJustification { for _, justification := range j { if justification.ConsensusEngineID == enginedID {