diff --git a/internal/client/api/backend.go b/internal/client/api/backend.go index 35b110fb1f..2823408c73 100644 --- a/internal/client/api/backend.go +++ b/internal/client/api/backend.go @@ -254,6 +254,12 @@ type AuxStore interface { GetAux(key []byte) ([]byte, error) } +// StorageProvider provides access to storage primitives +type StorageProvider[H runtime.Hash, N runtime.Number, Hasher runtime.Hasher[H]] interface { + // Given a block hash and a key, return the value under the key in that block. + Storage(hash H, key storage.StorageKey) (storage.StorageData, error) +} + // Backend is the client backend. // // Manages the data layer. diff --git a/internal/client/consensus/common/block_import.go b/internal/client/consensus/common/block_import.go index a198bf36f2..c9f687a6f5 100644 --- a/internal/client/consensus/common/block_import.go +++ b/internal/client/consensus/common/block_import.go @@ -183,3 +183,29 @@ type BlockImportParams[H runtime.Hash, N runtime.Number, E runtime.Extrinsic, He // Cached full header hash (with post-digests applied). PostHash *H } + +// GetPostHash retrieves the full header hash (with post-digests applied). +func (b *BlockImportParams[H, N, E, Header]) GetPostHash() H { + if b.PostHash != nil { + return *b.PostHash + } + return b.GetPostHeader().Hash() +} + +// GetPostHeader retrieves the post header. +func (b *BlockImportParams[H, N, E, Header]) GetPostHeader() Header { + if len(b.PostDigests) == 0 { + return b.Header.Clone().(Header) + } + hdr := b.Header.Clone().(Header) + for _, digestItem := range b.PostDigests { + hdr.DigestMut().Push(digestItem) + } + return hdr +} + +// WithState checks if this block contains state import action +func (b *BlockImportParams[H, N, E, Header]) WithState() bool { + _, ok := b.StateAction.(StateActionApplyChanges) + return ok +} diff --git a/internal/client/consensus/grandpa/authorities.go b/internal/client/consensus/grandpa/authorities.go index 6baa1d552d..55532e950b 100644 --- a/internal/client/consensus/grandpa/authorities.go +++ b/internal/client/consensus/grandpa/authorities.go @@ -144,10 +144,11 @@ func (sas *SharedAuthoritySet[H, N]) applyForcedChanges( //nolint:unused bestHash H, bestNumber N, isDescendentOf IsDescendentOf[H], + initialSync bool, // TODO: telemtry, ) (newSet *appliedChanges[H, N], err error) { authSet := sas.inner.Data() - return authSet.applyForcedChanges(bestHash, bestNumber, isDescendentOf) + return authSet.applyForcedChanges(bestHash, bestNumber, isDescendentOf, initialSync) } // applyStandardChanges will apply or prune any pending transitions based on a finality trigger. This method ensures @@ -505,6 +506,7 @@ func (authSet *AuthoritySet[H, N]) currentLimit(min N) (limit *N) { func (authSet *AuthoritySet[H, N]) applyForcedChanges(bestHash H, //skipcq: RVV-B0001 bestNumber N, isDescendentOf IsDescendentOf[H], + initialSync bool, // TODO: telemetry ) (newSet *appliedChanges[H, N], err error) { @@ -541,7 +543,11 @@ func (authSet *AuthoritySet[H, N]) applyForcedChanges(bestHash H, //skipcq: RVV } // apply this change: make the set canonical - logger.Infof("👴 Applying authority set change forced at block #%d", change.CanonHeight) + l := logger.Infof + if initialSync { + l = logger.Debugf + } + l("👴 Applying authority set change forced at block #%d", change.CanonHeight) // TODO telemetry diff --git a/internal/client/consensus/grandpa/authorities_test.go b/internal/client/consensus/grandpa/authorities_test.go index b54cd142e8..34f543c288 100644 --- a/internal/client/consensus/grandpa/authorities_test.go +++ b/internal/client/consensus/grandpa/authorities_test.go @@ -577,12 +577,13 @@ func TestForceChanges(t *testing.T) { "hash_a10", 10, staticIsDescendentOf[string](true), + false, ) require.NoError(t, err) require.Nil(t, resForced) // too late - resForced, err = authorities.applyForcedChanges("hash_a16", 16, isDescOfA) + resForced, err = authorities.applyForcedChanges("hash_a16", 16, isDescOfA, false) require.NoError(t, err) require.Nil(t, resForced) @@ -602,7 +603,7 @@ func TestForceChanges(t *testing.T) { }, }, } - resForced, err = authorities.applyForcedChanges("hash_a15", 15, isDescOfA) + resForced, err = authorities.applyForcedChanges("hash_a15", 15, isDescOfA, false) require.NoError(t, err) require.NotNil(t, resForced) require.Equal(t, exp, *resForced) @@ -644,6 +645,7 @@ func TestForceChangesWithNoDelay(t *testing.T) { hashA, 5, staticIsDescendentOf[string](false), + false, ) require.NoError(t, err) require.NotNil(t, resForced) @@ -723,6 +725,7 @@ func TestForceChangesBlockedByStandardChanges(t *testing.T) { "hash_d45", 45, staticIsDescendentOf[string](true), + false, ) require.ErrorIs(t, err, errForcedAuthoritySetChangeDependencyUnsatisfied) require.Equal(t, 0, len(authorities.AuthoritySetChanges)) @@ -748,6 +751,7 @@ func TestForceChangesBlockedByStandardChanges(t *testing.T) { "hash_d45", 45, staticIsDescendentOf[string](true), + false, ) require.ErrorIs(t, err, errForcedAuthoritySetChangeDependencyUnsatisfied) require.Equal(t, expChanges, authorities.AuthoritySetChanges) @@ -787,6 +791,7 @@ func TestForceChangesBlockedByStandardChanges(t *testing.T) { hashD, 45, staticIsDescendentOf[string](true), + false, ) require.NoError(t, err) require.NotNil(t, resForced) diff --git a/internal/client/consensus/grandpa/aux_schema.go b/internal/client/consensus/grandpa/aux_schema.go index 005e072a80..c803fcae84 100644 --- a/internal/client/consensus/grandpa/aux_schema.go +++ b/internal/client/consensus/grandpa/aux_schema.go @@ -7,6 +7,8 @@ import ( "fmt" "github.com/ChainSafe/gossamer/internal/client/api" + shareddata "github.com/ChainSafe/gossamer/internal/client/consensus/common/shared-data" + primitives "github.com/ChainSafe/gossamer/internal/primitives/consensus/grandpa" "github.com/ChainSafe/gossamer/internal/primitives/runtime" grandpa "github.com/ChainSafe/gossamer/pkg/finality-grandpa" "github.com/ChainSafe/gossamer/pkg/scale" @@ -21,12 +23,102 @@ var ( type writeAux func(insertions []api.KeyValue) error +type getGenesisAuthorities func() (primitives.AuthorityList, error) + // Persistent data kept between runs. type persistentData[H runtime.Hash, N runtime.Number] struct { authoritySet *SharedAuthoritySet[H, N] setState *SharedVoterSetState[H, N] } +func loadDecoded[T any](store api.AuxStore, key []byte) (*T, error) { + encodedValue, err := store.GetAux(key) + if err != nil { + return nil, err + } + if encodedValue == nil { + return nil, nil + } + + var dst T + err = scale.Unmarshal(encodedValue, &dst) + if err != nil { + return nil, err + } + return &dst, nil +} + +func loadPersistent[H runtime.Hash, N runtime.Number]( + store api.AuxStore, + genesisHash H, + genesisNumber N, + genesisAuths getGenesisAuthorities, +) (*persistentData[H, N], error) { + genesis := grandpa.HashNumber[H, N]{Hash: genesisHash, Number: genesisNumber} + makeGenesisRound := grandpa.NewRoundState[H, N] + + authSet, err := loadDecoded[AuthoritySet[H, N]](store, authoritySetKey) + if err != nil { + return nil, err + } + if authSet != nil { + var setState voterSetState[H, N] + state, err := loadDecoded[voterSetStateVDT[H, N]](store, setStateKey) + if err != nil { + return nil, err + } + + if state != nil && state.inner != nil { + setState = state.inner + } else { + state := makeGenesisRound(genesis) + if state.PrevoteGHOST == nil { + panic("state is for completed round; completed rounds must have a prevote ghost; qed.") + } + base := state.PrevoteGHOST + setState = newVoterSetStateLive(primitives.SetID(authSet.SetID), *authSet, *base) + } + + return &persistentData[H, N]{ + authoritySet: &SharedAuthoritySet[H, N]{inner: *shareddata.NewSharedData(*authSet)}, + setState: &SharedVoterSetState[H, N]{inner: setState}, + }, nil + } + + logger.Info("👴 Loading GRANDPA authority set from genesis on what appears to be first startup") + genesisAuthorities, err := genesisAuths() + if err != nil { + return nil, err + } + genesisSet, err := NewGenesisAuthoritySet[H, N](genesisAuthorities) + if err != nil { + panic("genesis authorities is non-empty; all weights are non-zero; qed.") + } + + state := makeGenesisRound(genesis) + base := state.PrevoteGHOST + if base == nil { + panic("state is for completed round; completed rounds must have a prevote ghost; qed.") + } + + genesisState := newVoterSetStateLive(0, *genesisSet, *base) + genesisStateVDT := newVoterSetStateVDT[H, N]() + genesisStateVDT.inner = genesisState + insert := []api.KeyValue{ + {Key: authoritySetKey, Value: scale.MustMarshal(*genesisSet)}, + {Key: setStateKey, Value: scale.MustMarshal(genesisStateVDT)}, + } + err = store.InsertAux(insert, nil) + if err != nil { + return nil, err + } + + return &persistentData[H, N]{ + authoritySet: &SharedAuthoritySet[H, N]{inner: *shareddata.NewSharedData(*genesisSet)}, + setState: &SharedVoterSetState[H, N]{inner: genesisState}, + }, nil +} + // updateAuthoritySet Update the authority set on disk after a change. // // If there has just been a handoff, pass a newSet parameter that describes the handoff. set in all cases should diff --git a/internal/client/consensus/grandpa/grandpa.go b/internal/client/consensus/grandpa/grandpa.go index 8bb92d41ed..4c592b3546 100644 --- a/internal/client/consensus/grandpa/grandpa.go +++ b/internal/client/consensus/grandpa/grandpa.go @@ -110,7 +110,7 @@ type ClientForGrandpa[ papi.ProvideRuntimeAPI[primitives.GrandpaAPI[H, N]] // api.ExecutorProvider client_common.BlockImport[H, N, E, Header] - // api.StorageProvider[H, N, Hasher] + api.StorageProvider[H, N, Hasher] } // Something that one can ask to do a block sync request. @@ -165,7 +165,7 @@ type LinkHalf[ persistentData persistentData[H, N] voterCommandsRx chan voterCommand justificationSender GrandpaJustificationSender[H, N, Header] - justificationStream GrandpaJustificationStream[H, N, Header] //nolint: unused + justificationStream GrandpaJustificationStream[H, N, Header] } // Provider for the Grandpa authority set configured on the genesis block. @@ -174,6 +174,127 @@ type GenesisAuthoritySetProvider interface { Get() (primitives.AuthorityList, error) } +// Make block importer and link half necessary to tie the background voter to it. +// +// The justificationImportPeriod sets the minimum period on which justifications will be imported. When importing +// a block, if it includes a justification it will only be processed if it fits within this period, otherwise it will +// be ignored (and won't be validated). This is to avoid slowing down sync by a peer serving us unnecessary +// justifications which aren't trivial to validate. +func BlockImport[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +]( + client ClientForGrandpa[H, N, Hasher, Header, E], + justificationImportPeriod uint32, + genesisAuthoritySetProvider GenesisAuthoritySetProvider, + selectChain common.SelectChain[H, N, Header], + // TODO: telemetry +) (*GrandpaBlockImport[H, N, Hasher, Header, E], LinkHalf[H, N, Hasher, Header, E], error) { + return blockImportWithAuthoritySetHardForks( + client, + justificationImportPeriod, + genesisAuthoritySetProvider, + selectChain, + nil, + ) +} + +// A descriptor for an authority set hard fork. These are authority set changes that are not signalled by the runtime +// and instead are defined off-chain (hence the hard fork). +type AuthoritySetHardFork[H, N any] struct { + // The new authority set id. + SetID SetID + // The block hash and number at which the hard fork should be applied. + Block HashNumber[H, N] + // The authorities in the new set. + Authorities primitives.AuthorityList + // The latest block number that was finalized before this authority set hard fork. When defined, the authority set + // change will be forced, i.e. the node won't wait for the block above to be finalized before enacting the change, + // and the given finalized number will be used as a base for voting. + LastFinalized *N +} + +// Make block importer and link half necessary to tie the background voter to it. A vector of authority set hard forks +// can be passed, any authority set change signalled at the given block (either already signalled or in a further block +// when importing it) will be replaced by a standard change with the given static authorities. +func blockImportWithAuthoritySetHardForks[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +]( + client ClientForGrandpa[H, N, Hasher, Header, E], + justificationImportPeriod uint32, + genesisAuthoritySetProvider GenesisAuthoritySetProvider, + selectChain common.SelectChain[H, N, Header], + authoritySetHardForks []AuthoritySetHardFork[H, N], + // TODO: telemetry +) (*GrandpaBlockImport[H, N, Hasher, Header, E], LinkHalf[H, N, Hasher, Header, E], error) { + chainInfo := client.Info() + genesisHash := chainInfo.GenesisHash + + persistentData, err := loadPersistent[H, N](client, genesisHash, 0, genesisAuthoritySetProvider.Get) + if err != nil { + return nil, LinkHalf[H, N, Hasher, Header, E]{}, err + } + + voterCommands := make(chan voterCommand, 100000) + + justificationSender, justificationStream := NewGrandpaJustificationSender[H, N, Header]() + + // create pending change objects with 0 delay for each authority set hard fork. + hardForks := make([]struct { + SetID + PendingChange[H, N] + }, len(authoritySetHardForks)) + for i, fork := range authoritySetHardForks { + var kind delayKind + if fork.LastFinalized != nil { + kind = delayKindBest[N]{MedianLastFinalized: *fork.LastFinalized} + } else { + kind = delayKindFinalized{} + } + + hardForks[i] = struct { + SetID + PendingChange[H, N] + }{ + SetID: fork.SetID, + PendingChange: PendingChange[H, N]{ + NextAuthorities: fork.Authorities, + Delay: 0, + CanonHash: fork.Block.Hash, + CanonHeight: fork.Block.Number, + DelayKind: kind, + }, + } + } + + blockImport := newGrandpaBlockImport( + client, + justificationImportPeriod, + selectChain, + persistentData.authoritySet, + voterCommands, + hardForks, + justificationSender, + ) + + linkHalf := LinkHalf[H, N, Hasher, Header, E]{ + client: client, + selectChain: selectChain, + persistentData: *persistentData, + voterCommandsRx: voterCommands, + justificationSender: justificationSender, + justificationStream: justificationStream, + } + return blockImport, linkHalf, nil +} + func globalCommunication[ H runtime.Hash, N runtime.Number, diff --git a/internal/client/consensus/grandpa/import.go b/internal/client/consensus/grandpa/import.go new file mode 100644 index 0000000000..3846784d7c --- /dev/null +++ b/internal/client/consensus/grandpa/import.go @@ -0,0 +1,829 @@ +// Copyright 2025 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package grandpa + +import ( + "fmt" + "sync" + + "github.com/ChainSafe/gossamer/internal/client/api" + "github.com/ChainSafe/gossamer/internal/client/api/utils" + client_common "github.com/ChainSafe/gossamer/internal/client/consensus/common" + shareddata "github.com/ChainSafe/gossamer/internal/client/consensus/common/shared-data" + "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/consensus/common" + "github.com/ChainSafe/gossamer/internal/primitives/consensus/grandpa" + "github.com/ChainSafe/gossamer/internal/primitives/crypto/hashing" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" + "github.com/ChainSafe/gossamer/internal/primitives/storage" + forktree "github.com/ChainSafe/gossamer/internal/utils/fork-tree" + "github.com/ChainSafe/gossamer/pkg/scale" +) + +type importAppliedChanges interface { + needsJustification() bool +} + +type importAppliedChangesStandard bool // true if the change is ready to be applied (i.e. it's a root) + +type importAppliedChangesForced[H runtime.Hash, N runtime.Number] newAuthoritySet[H, N] + +type importAppliedChangesNone struct{} + +func (importAppliedChangesStandard) needsJustification() bool { + return true +} +func (importAppliedChangesForced[H, N]) needsJustification() bool { + return false +} +func (importAppliedChangesNone) needsJustification() bool { + return false +} + +type justInCase[H runtime.Hash, N runtime.Number] struct { + old AuthoritySet[H, N] + shareddata.SharedDataLocked[AuthoritySet[H, N]] +} + +type pendingSetChanges[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], +] struct { + justInCase *justInCase[H, N] + appliedChanges importAppliedChanges + doPause bool +} + +// revert the pending set change explicitly. +func (pendingSetChanges[H, N, Hasher, Header]) revert() {} + +func (psc *pendingSetChanges[H, N, Hasher, Header]) defuse() (importAppliedChanges, bool) { + psc.justInCase = nil + appliedChanges := psc.appliedChanges + psc.appliedChanges = importAppliedChangesNone{} + return appliedChanges, psc.doPause +} + +func (psc *pendingSetChanges[H, N, Hasher, Header]) drop() { + if psc.justInCase != nil { + jic := psc.justInCase + psc.justInCase = nil + oldSet := jic.old + locked := jic.SharedDataLocked + *locked.MutRef() = oldSet + defer locked.Unlock() + } +} + +// A block-import handler for GRANDPA. +// +// This scans each imported block for signals of changing authority set. +// If the block being imported enacts an authority set change then: +// - If the current authority set is still live: we import the block +// - Otherwise, the block must include a valid justification. +// +// When using GRANDPA, the block import worker should be using this block import object. +type GrandpaBlockImport[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +] struct { + inner ClientForGrandpa[H, N, Hasher, Header, E] + justificationImportPeriod uint32 + selectChain common.SelectChain[H, N, Header] + authoritySet *SharedAuthoritySet[H, N] + sendVoterCommands chan voterCommand + authoritySetHardForks map[H]PendingChange[H, N] + authoritySetHardForksMtx sync.Mutex + justificationSender GrandpaJustificationSender[H, N, Header] + // TODO: telemetry +} + +func newGrandpaBlockImport[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +]( + inner ClientForGrandpa[H, N, Hasher, Header, E], + justificationImportPeriod uint32, + selectChain common.SelectChain[H, N, Header], + sharedAuthoritySet *SharedAuthoritySet[H, N], + sendVoterCommands chan voterCommand, + authoritySetHardForks []struct { + SetID + PendingChange[H, N] + }, + justificationSender GrandpaJustificationSender[H, N, Header], + // TODO: telemetry +) *GrandpaBlockImport[H, N, Hasher, Header, E] { + // check for and apply any forced authority set hard fork that applies + // to the *current* authority set. + for _, hardFork := range authoritySetHardForks { + setID := hardFork.SetID + if setID == SetID(sharedAuthoritySet.SetID()) { + authoritySet, unlock := sharedAuthoritySet.inner.DataMut() + authoritySet.CurrentAuthorities = hardFork.PendingChange.NextAuthorities + unlock() + } + } + + // index authority set hard forks by block hash so that they can be used + // by any node syncing the chain and importing a block hard fork + // authority set changes. + var authoritySetHardForksMap = make(map[H]PendingChange[H, N]) + for _, hardFork := range authoritySetHardForks { + authoritySetHardForksMap[hardFork.PendingChange.CanonHash] = hardFork.PendingChange + } + + // check for and apply any forced authority set hard fork that apply to + // any *pending* standard changes, checking by the block hash at which + // they were announced. + authoritySet, unlock := sharedAuthoritySet.inner.DataMut() + + authoritySet.PendingStandardChanges = forktree.Map( + authoritySet.PendingStandardChanges, + func(hash H, _ N, original PendingChange[H, N]) PendingChange[H, N] { + if change, ok := authoritySetHardForksMap[hash]; ok { + return change + } + return original + }, + ) + unlock() + + return &GrandpaBlockImport[H, N, Hasher, Header, E]{ + inner: inner, + justificationImportPeriod: justificationImportPeriod, + selectChain: selectChain, + authoritySet: sharedAuthoritySet, + sendVoterCommands: sendVoterCommands, + authoritySetHardForks: authoritySetHardForksMap, + justificationSender: justificationSender, + } +} + +// Checks the given header for a consensus digest signalling a **standard** scheduled change and +// extracts it. +func FindScheduledChange[ + H runtime.Hash, + N runtime.Number, + Header runtime.Header[N, H], +]( + header Header, +) *grandpa.ScheduledChange[N] { + id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) + + filterLog := func(log grandpa.ConsensusLog) *grandpa.ScheduledChange[N] { + scheduledChange, ok := log.(grandpa.ConsensusLogScheduledChange[N]) + if !ok { + return nil + } + orig := grandpa.ScheduledChange[N](scheduledChange) + return &orig + } + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + for _, log := range header.Digest().Logs { + logVDT := runtime.DigestItemTryTo[grandpa.ConsensusLogVDT[N]](log, id) + if logVDT == nil { + continue + } + val, err := logVDT.Value() + if err != nil { + continue + } + log := val.(grandpa.ConsensusLog) + sc := filterLog(log) + if sc != nil { + return sc + } + } + return nil +} + +type ForcedChange[H runtime.Hash, N runtime.Number] struct { + MedianLastFinalized N + grandpa.ScheduledChange[N] +} + +// Checks the given header for a consensus digest signalling a **forced** scheduled change and +// extracts it. +func FindForcedChange[ + H runtime.Hash, + N runtime.Number, + Header runtime.Header[N, H], +]( + header Header, +) *ForcedChange[H, N] { + id := runtime.OpaqueDigestItemIDConsensus(grandpa.GrandpaEngineID) + + filterLog := func(log grandpa.ConsensusLog) *ForcedChange[H, N] { + forcedChange, ok := log.(grandpa.ConsensusLogForcedChange[N]) + if !ok { + return nil + } + return &ForcedChange[H, N]{ + MedianLastFinalized: forcedChange.Delay, + ScheduledChange: forcedChange.ScheduledChange, + } + } + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + for _, log := range header.Digest().Logs { + logVDT := runtime.DigestItemTryTo[grandpa.ConsensusLogVDT[N]](log, id) + if logVDT == nil { + continue + } + val, err := logVDT.Value() + if err != nil { + continue + } + log := val.(grandpa.ConsensusLog) + fc := filterLog(log) + if fc != nil { + return fc + } + } + return nil +} + +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) checkNewChange( + header Header, + hash H, +) *PendingChange[H, N] { + // check for forced authority set hard forks + gbi.authoritySetHardForksMtx.Lock() + change, ok := gbi.authoritySetHardForks[hash] + gbi.authoritySetHardForksMtx.Unlock() + if ok { + return &change + } + + // check for forced change. + fc := FindForcedChange[H, N](header) + if fc != nil { + return &PendingChange[H, N]{ + NextAuthorities: fc.ScheduledChange.NextAuthorities, + Delay: fc.ScheduledChange.Delay, + CanonHeight: header.Number(), + CanonHash: hash, + DelayKind: delayKindBest[N]{MedianLastFinalized: fc.MedianLastFinalized}, + } + } + + // check normal scheduled change. + scheduled := FindScheduledChange[H, N](header) + return &PendingChange[H, N]{ + NextAuthorities: scheduled.NextAuthorities, + Delay: scheduled.Delay, + CanonHeight: header.Number(), + CanonHash: hash, + DelayKind: delayKindFinalized{}, + } +} + +type innerGuard[H runtime.Hash, N runtime.Number] struct { + old *AuthoritySet[H, N] + guard *shareddata.SharedDataLocked[AuthoritySet[H, N]] +} + +func (ig *innerGuard[H, N]) asMut() *AuthoritySet[H, N] { + if ig.guard == nil { + panic("guard is nil; only taken on deconstruction; qed") + } + return ig.guard.MutRef() +} + +func (ig *innerGuard[H, N]) setOld(old AuthoritySet[H, N]) { + if ig.old == nil { + // ignore "newer" old changes. + ig.old = &old + } +} + +type consumed[H runtime.Hash, N runtime.Number] struct { + old AuthoritySet[H, N] + shareddata.SharedDataLocked[AuthoritySet[H, N]] +} + +func (ig *innerGuard[H, N]) consume() *consumed[H, N] { + old := ig.old + ig.old = nil + if old == nil { + return nil + } + if ig.guard == nil { + panic("guard is nil; only taken on deconstruction; qed") + } + return &consumed[H, N]{ + old: *old, + SharedDataLocked: *ig.guard, + } +} + +func (ig *innerGuard[H, N]) drop() { + guard := ig.guard + ig.guard = nil + old := ig.old + ig.old = nil + if guard != nil && old != nil { + *ig.guard.MutRef() = *ig.old + } +} + +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) makeAuthoritiesChanges( + block *client_common.BlockImportParams[H, N, E, Header], + hash H, + initialSync bool, +) (*pendingSetChanges[H, N, Hasher, Header], error) { + // when we update the authorities, we need to hold the lock + // until the block is written to prevent a race if we need to restore + // the old authority set on error or panic. + number := block.Header.Number() + maybeChange := gbi.checkNewChange(block.Header, hash) + + // returns a function for checking whether a block is a descendent of another + // consistent with querying client directly after importing the block. + parentHash := block.Header.ParentHash() + isDescendentOf := utils.IsDescendantOf(gbi.inner, &utils.HashParent[H]{Hash: hash, Parent: parentHash}) + locked := gbi.authoritySet.inner.Locked() + defer locked.Unlock() + guard := innerGuard[H, N]{ + old: nil, + guard: &locked, + } + defer guard.drop() + + // whether to pause the old authority set -- happens after import + // of a forced change block. + var doPause bool + + // add any pending changes. + if maybeChange != nil { + change := *maybeChange + old := guard.asMut().Clone() + guard.setOld(old) + + if _, ok := change.DelayKind.(delayKindBest[N]); ok { + doPause = true + } + + err := guard.asMut().addPendingChange(change, isDescendentOf) + if err != nil { + return nil, err + } + } + + var appliedChanges importAppliedChanges + forcedChangeSet, err := guard.asMut().applyForcedChanges( + hash, + number, + isDescendentOf, + initialSync, + ) + if err != nil { + return nil, err + } + + if forcedChangeSet != nil { + medianLastFinalizedNumber := forcedChangeSet.median + newSet := forcedChangeSet.set + setID, newAuthorities := newSet.current() + + // we will use the median last finalized number as a hint + // for the canon block the new authority set should start + // with. we use the minimum between the median and the local + // best finalized block. + bestFinalizedNumber := gbi.inner.Info().FinalizedNumber + canonNumber := min(medianLastFinalizedNumber, bestFinalizedNumber) + canonHash, err := gbi.inner.Hash(canonNumber) + if err != nil { + return nil, err + } + if canonHash == nil { + panic("the given block number is less or equal than the current best finalized number; " + + "current best finalized number must exist in chain; qed.") + } + + newAuthoritySet := newAuthoritySet[H, N]{ + CanonNumber: canonNumber, + CanonHash: *canonHash, + SetID: grandpa.SetID(setID), + Authorities: newAuthorities, + } + old := guard.asMut().Clone() + guard.setOld(old) + *guard.asMut() = newSet + + appliedChanges = importAppliedChangesForced[H, N](newAuthoritySet) + } else { + didStandard, err := guard.asMut().EnactsStandardChange(hash, number, isDescendentOf) + if err != nil { + return nil, err + } + + if didStandard != nil { + appliedChanges = importAppliedChangesStandard(*didStandard) + } else { + appliedChanges = importAppliedChangesNone{} + } + } + + // consume the guard safely and write necessary changes. + justInCaseConsumed := guard.consume() + if justInCaseConsumed != nil { + authorities := &justInCaseConsumed.SharedDataLocked + var authoritiesChange *newAuthoritySet[H, N] + switch appliedChanges := appliedChanges.(type) { + case importAppliedChangesForced[H, N]: + newSet := newAuthoritySet[H, N](appliedChanges) + authoritiesChange = &newSet + case importAppliedChangesStandard: + // the change isn't actually applied yet. + case importAppliedChangesNone: + // no change + default: + panic("unreachable") + } + err := updateAuthoritySet(authorities.Data(), authoritiesChange, func(insertions []api.KeyValue) error { + converted := make([]api.AuxDataOperation, len(insertions)) + for i, kv := range insertions { + converted[i] = api.AuxDataOperation{ + Key: kv.Key, + Data: kv.Value, + } + } + block.Auxiliary = append(block.Auxiliary, converted...) + return nil + }) + if err != nil { + return nil, err + } + } + + var jic *justInCase[H, N] + if justInCaseConsumed != nil { + jic = &justInCase[H, N]{ + old: justInCaseConsumed.old, + SharedDataLocked: justInCaseConsumed.SharedDataLocked, + } + } + + return &pendingSetChanges[H, N, Hasher, Header]{ + justInCase: jic, + appliedChanges: appliedChanges, + doPause: doPause, + }, nil +} + +// Read current set id form a given state. +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) currentSetID(hash H) (grandpa.SetID, error) { + runtimeVersion, err := gbi.inner.RuntimeAPI().Version(hash) + if err != nil { + return 0, err + } + + var GrandpaID = [8]byte{237, 153, 197, 172, 178, 94, 237, 245} + apiVersion := runtimeVersion.APIVersion(GrandpaID) + + if apiVersion != nil && *apiVersion < 3 { + // The new API is not supported in this runtime. Try reading directly from storage. + // This code may be removed once warp sync to an old runtime is no longer needed. + for _, prefix := range []string{"GrandpaFinality", "Grandpa"} { + k0 := hashing.Twox128([]byte(prefix)) + k1 := hashing.Twox128([]byte("CurrentSetId")) + k := k0[:] + k = append(k, k1[:]...) + id, _ := gbi.inner.Storage(hash, storage.StorageKey(k)) + if id != nil { + var setID grandpa.SetID + err := scale.Unmarshal(id, &setID) + if err == nil { + return setID, nil + } + } + } + return 0, fmt.Errorf("unable to retrieve current set id") + } else { + setID, err := gbi.inner.RuntimeAPI().CurrentSetID(hash) + if err != nil { + return 0, err + } + return setID, nil + } +} + +// Import whole new state and reset authority set. +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importState( + block *client_common.BlockImportParams[H, N, E, Header], +) (client_common.ImportResult, error) { + hash := block.GetPostHash() + number := block.Header.Number() + + // Force imported state finality. + block.Finalized = true + importResult, err := gbi.inner.ImportBlock(block) + if err != nil { + return nil, err + } + switch importResult := importResult.(type) { + case client_common.ImportResultImported: + aux := client_common.ImportedAux(importResult) + // We've just imported a new state. We trust the sync module has verified + // finality proofs and that the state is correct and final. + // So we can read the authority list and set id from the state. + gbi.authoritySetHardForksMtx.Lock() + gbi.authoritySetHardForks = make(map[H]PendingChange[H, N]) + gbi.authoritySetHardForksMtx.Unlock() + authorities, err := gbi.inner.RuntimeAPI().GrandpaAuthorities(hash) + if err != nil { + return nil, err + } + setID, err := gbi.currentSetID(hash) + if err != nil { + return nil, err + } + authoritySet, err := NewAuthoritySet[H, N]( + authorities, + uint64(setID), + forktree.NewForkTree[H, N, PendingChange[H, N]](), + []PendingChange[H, N]{}, + AuthoritySetChanges[N]{}, + ) + if err != nil { + return nil, err + } + + locked := gbi.authoritySet.inner.Locked() + *locked.MutRef() = authoritySet.Clone() + defer locked.Unlock() + + err = updateAuthoritySet( + locked.Data(), + nil, + func(insertions []api.KeyValue) error { + return gbi.inner.InsertAux(insertions, nil) + }, + ) + if err != nil { + return nil, err + } + newSet := newAuthoritySet[H, N]{ + CanonNumber: number, + CanonHash: hash, + SetID: setID, + Authorities: authorities, + } + gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newSet) + return client_common.ImportResultImported(aux), nil + case client_common.ImportResultAlreadyInChain, + client_common.ImportResultKnownBad, + client_common.ImportResultMissingState, + client_common.ImportResultUnknownParent: + return importResult, nil + default: + panic("unreachable") + } +} + +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) ImportBlock( + block *client_common.BlockImportParams[H, N, E, Header], +) (client_common.ImportResult, error) { + hash := block.GetPostHash() + number := block.Header.Number() + + // early exit if block already in chain, otherwise the check for + // authority changes will error when trying to re-import a change block + status, err := gbi.inner.Status(hash) + if err != nil { + return nil, err + } + if status == blockchain.BlockStatusInChain { + // Strip justifications when re-importing an existing block. + block.Justifications = nil + return gbi.inner.ImportBlock(block) + } + + if block.WithState() { + return gbi.importState(block) + } + + if number <= gbi.inner.Info().FinalizedNumber { + // Importing an old block. Just save justifications and authority set changes + if gbi.checkNewChange(block.Header, hash) != nil { + if block.Justifications == nil { + return nil, fmt.Errorf("justification required when importing an old block with authority set change") + } + locked := gbi.authoritySet.inner.Locked() + authoritySet := locked.MutRef() + authoritySet.AuthoritySetChanges.insert(number) + err := updateAuthoritySet( + *authoritySet, + nil, + func(insertions []api.KeyValue) error { + converted := make([]api.AuxDataOperation, len(insertions)) + for i, kv := range insertions { + converted[i] = api.AuxDataOperation{ + Key: kv.Key, + Data: kv.Value, + } + } + block.Auxiliary = append(block.Auxiliary, converted...) + return nil + }, + ) + if err != nil { + return nil, err + } + } + return gbi.inner.ImportBlock(block) + } + + // on initial sync we will restrict logging under info to avoid spam. + initialSync := block.Origin == common.NetworkInitialSyncBlockOrigin + + pendingChanges, err := gbi.makeAuthoritiesChanges(block, hash, initialSync) + if err != nil { + return nil, err + } + defer pendingChanges.drop() + + // we don't want to finalize on inner.ImportBlock + justifications := block.Justifications + block.Justifications = nil + importResult, err := gbi.inner.ImportBlock(block) + + if err != nil { + logger.Debugf("Restoring old authority set after block import error: %s", err) + pendingChanges.revert() + return nil, err + } + var importedAux client_common.ImportedAux + switch importResult := importResult.(type) { + case client_common.ImportResultImported: + importedAux = client_common.ImportedAux(importResult) + default: + logger.Debugf("Restoring old authority set after block import result: %v", importResult) + pendingChanges.revert() + return importResult, nil + } + + appliedChanges, doPause := pendingChanges.defuse() + + // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. + if doPause { + gbi.sendVoterCommands <- voterCommandPause("Forced change scheduled after inactivity") + } + + needsJustification := appliedChanges.needsJustification() + + switch appliedChanges := appliedChanges.(type) { + case importAppliedChangesForced[H, N]: + // NOTE: when we do a force change we are "discrediting" the old set so we + // ignore any justifications from them. this block may contain a justification + // which should be checked and imported below against the new authority + // triggered by this forced change. the new grandpa voter will start at the + // last median finalized block (which is before the block that enacts the + // change), full nodes syncing the chain will not be able to successfully + // import justifications for those blocks since their local authority set view + // is still of the set before the forced change was enacted, still after #1867 + // they should import the block and discard the justification, and they will + // then request a justification from sync if it's necessary (which they should + // then be able to successfully validate). + gbi.sendVoterCommands <- voterCommandChangeAuthorities[H, N](newAuthoritySet[H, N](appliedChanges)) + // we must clear all pending justifications requests, presumably they won't be + // finalized hence why this forced changes was triggered + importedAux.ClearJustificationRequests = true + + case importAppliedChangesStandard: + // we can't apply this change yet since there are other dependent changes that we + // need to apply first, drop any justification that might have been provided with + // the block to make sure we request them from `sync` which will ensure they'll be + // applied in-order. + justifications = nil + default: + } + var grandpaJustification *runtime.EncodedJustification + if justifications != nil { + grandpaJustification = justifications.IntoJustification(grandpa.GrandpaEngineID) + } + + if grandpaJustification != nil { + if shouldProcessJustification( + gbi.inner, + gbi.justificationImportPeriod, + number, + needsJustification, + ) { + err := gbi.importJustification( + hash, + number, + runtime.Justification{ + ConsensusEngineID: grandpa.GrandpaEngineID, + EncodedJustification: *grandpaJustification, + }, + needsJustification, + initialSync, + ) + + if err != nil { + if needsJustification { + logger.Debugf( + ("Requesting justification from peers due to imported block #%d that enacts authority set" + + "change with invalid justification: %s"), number, err) + importedAux.BadJustification = true + importedAux.NeedsJustification = true + } + } + } else { + logger.Debugf("Ignoring unnecessary justification for block #%d", number) + } + } else { + if needsJustification { + logger.Debugf( + "Imported unjustified block #%d that enacts authority set change, waiting for finality for enactment.", + number) + importedAux.NeedsJustification = true + } + } + + return client_common.ImportResultImported(importedAux), nil +} + +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) CheckBlock( + block client_common.BlockCheckParams[H, N], +) (client_common.ImportResult, error) { + return gbi.inner.CheckBlock(block) +} + +// Import a block justification and finalize the block. +// +// If enactsChange is set to true, then finalizing this block *must* +// enact an authority set change, the function will panic otherwise. +func (gbi *GrandpaBlockImport[H, N, Hasher, Header, E]) importJustification( + hash H, + number N, + justification runtime.Justification, + enactsChange bool, + initialSync bool, +) error { + if justification.ConsensusEngineID != grandpa.GrandpaEngineID { + // TODO: the import queue needs to be refactored to be able dispatch to the correct + // JustificationImport instance based on ConsensusEngineID, or we need to build a + // justification import pipeline similar to what we do for BlockImport. In the + // meantime we'll just drop the justification, since this is only used for BEEFY which + // is still WIP. + return nil + } + + just, err := DecodeGrandpaJustificationVerifyFinalizes[H, N, Hasher, Header]( + justification.EncodedJustification, + HashNumber[H, N]{Hash: hash, Number: number}, + gbi.authoritySet.SetID(), + gbi.authoritySet.CurrentAuthorities(), + ) + + if err != nil { + return err + } + + err = finalizeBlock( + gbi.inner, + gbi.authoritySet, + nil, + hash, + number, + justificationOrCommitJustification[H, N, Header]{just}, + initialSync, + &gbi.justificationSender, + ) + if err != nil { + _, ok := err.(voterCommand) + if ok { + l := logger.Infof + if initialSync { + l = logger.Debugf + } + l("👴 Imported justification for block #%d that triggers command %s, signalling voter.", number, err) + + // send the command to the voter + gbi.sendVoterCommands <- err.(voterCommand) + } else { + return err + } + } else { + if enactsChange { + panic("returns Ok when no authority set change should be enacted; qed;") + } + } + + return nil +} diff --git a/internal/client/consensus/grandpa/notification.go b/internal/client/consensus/grandpa/notification.go index 0a9eab8d85..abae864fca 100644 --- a/internal/client/consensus/grandpa/notification.go +++ b/internal/client/consensus/grandpa/notification.go @@ -15,6 +15,16 @@ type GrandpaJustificationSender[Hash runtime.Hash, N runtime.Number, Header runt notification.NotificationSender[GrandpaJustification[Hash, N, Header]] } +func NewGrandpaJustificationSender[ + Hash runtime.Hash, + N runtime.Number, + Header runtime.Header[N, Hash], +]() (GrandpaJustificationSender[Hash, N, Header], GrandpaJustificationStream[Hash, N, Header]) { + sender, stream := notification.NewNotificationStream[GrandpaJustification[Hash, N, Header]]() + return GrandpaJustificationSender[Hash, N, Header]{NotificationSender: sender}, + GrandpaJustificationStream[Hash, N, Header]{NotificationStream: stream} +} + // The receiving half of the Grandpa justification channel. // // Used to receive notifications about justifications generated at the end of a Grandpa round. diff --git a/internal/client/utils/notification/notification.go b/internal/client/utils/notification/notification.go index bf04520c2b..28c191261f 100644 --- a/internal/client/utils/notification/notification.go +++ b/internal/client/utils/notification/notification.go @@ -10,7 +10,14 @@ import "github.com/ChainSafe/gossamer/internal/client/utils/pubsub" // The [NotificationStream] entity stores the [pubsub.Hub] so it can be // used to add more subscriptions. type NotificationStream[Payload any] struct { - hub *pubsub.Hub[struct{}, func() (Payload, error), Payload, *registry[Payload]] //nolint: unused + hub *pubsub.Hub[struct{}, func() (Payload, error), Payload, *registry[Payload]] +} + +func NewNotificationStream[Payload any]() (NotificationSender[Payload], NotificationStream[Payload]) { + hub := pubsub.NewHub("", ®istry[Payload]{}) + sender := NotificationSender[Payload]{hub: hub} + receiver := NotificationStream[Payload]{hub: hub} + return sender, receiver } // NotificationSender is the sending half of the notifications channel(s). diff --git a/internal/primitives/api/api.go b/internal/primitives/api/api.go index ca24902431..e9a798530f 100644 --- a/internal/primitives/api/api.go +++ b/internal/primitives/api/api.go @@ -10,6 +10,7 @@ import ( "github.com/ChainSafe/gossamer/internal/primitives/state-machine/overlayedchanges" ptrie "github.com/ChainSafe/gossamer/internal/primitives/trie" "github.com/ChainSafe/gossamer/internal/primitives/trie/recorder" + "github.com/ChainSafe/gossamer/internal/primitives/version" ) // Something that provides a runtime api. @@ -71,3 +72,8 @@ type ApiExt[ // Execute the given block ExecuteBlock(runtimeApiAtParam H, block runtime.Block[H, N, E, Header]) error } + +type Core[H runtime.Hash] interface { + // Returns the version of the runtime. + Version(hash H) (version.RuntimeVersion, error) +} diff --git a/internal/primitives/consensus/grandpa/grandpa.go b/internal/primitives/consensus/grandpa/grandpa.go index b6c5426ca9..5e0811f726 100644 --- a/internal/primitives/consensus/grandpa/grandpa.go +++ b/internal/primitives/consensus/grandpa/grandpa.go @@ -4,8 +4,11 @@ package grandpa import ( + "fmt" + "github.com/ChainSafe/gossamer/internal/client/keystore" "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/internal/primitives/api" "github.com/ChainSafe/gossamer/internal/primitives/consensus/grandpa/app" "github.com/ChainSafe/gossamer/internal/primitives/core/crypto" "github.com/ChainSafe/gossamer/internal/primitives/runtime" @@ -96,6 +99,128 @@ type GrandpaJustification[H runtime.Hash, N runtime.Number, Header runtime.Heade VoteAncestries []Header } +// A consensus log item for GRANDPA. +type ConsensusLog interface { + isConsensusLog() +} + +// Schedule an authority set change. +// +// The earliest digest of this type in a single block will be respected, +// provided that there is no ForcedChange digest. If there is, then the +// ForcedChange will take precedence. +// +// No change should be scheduled if one is already and the delay has not +// passed completely. +// +// This should be a pure function: i.e. as long as the runtime can interpret +// the digest type it should return the same result regardless of the current +// state. +type ConsensusLogScheduledChange[N runtime.Number] ScheduledChange[N] + +// Force an authority set change. +// +// Forced changes are applied after a delay of _imported_ blocks, +// while pending changes are applied after a delay of _finalized_ blocks. +// +// The earliest digest of this type in a single block will be respected, +// with others ignored. +// +// No change should be scheduled if one is already and the delay has not +// passed completely. +// +// This should be a pure function: i.e. as long as the runtime can interpret +// the digest type it should return the same result regardless of the current +// state. +type ConsensusLogForcedChange[N runtime.Number] struct { + Median N + ScheduledChange[N] +} + +// Note that the authority with given index is disabled until the next change. +type ConsensusLogOnDisabled AuthorityIndex + +// A signal to pause the current authority set after the given delay. +// After finalizing the block at _delay_ the authorities should stop voting. +type ConsensusLogPause[N runtime.Number] struct { + Delay N +} + +// A signal to resume the current authority set after the given delay. +// After authoring the block at _delay_ the authorities should resume voting. +type ConsensusLogResume[N runtime.Number] struct { + Delay N +} + +func (ConsensusLogScheduledChange[N]) isConsensusLog() {} +func (ConsensusLogForcedChange[N]) isConsensusLog() {} +func (ConsensusLogOnDisabled) isConsensusLog() {} +func (ConsensusLogPause[N]) isConsensusLog() {} +func (ConsensusLogResume[N]) isConsensusLog() {} + +type ConsensusLogVDT[N runtime.Number] struct { + inner ConsensusLog +} + +func (mvdt *ConsensusLogVDT[N]) SetValue(value any) (err error) { + switch value := value.(type) { + case ConsensusLogScheduledChange[N]: + mvdt.inner = value + return + case ConsensusLogForcedChange[N]: + mvdt.inner = value + return + case ConsensusLogOnDisabled: + mvdt.inner = value + return + case ConsensusLogPause[N]: + mvdt.inner = value + return + case ConsensusLogResume[N]: + mvdt.inner = value + return + default: + return fmt.Errorf("unsupported type") + } +} + +func (mvdt ConsensusLogVDT[N]) IndexValue() (index uint, value any, err error) { + switch mvdt.inner.(type) { + case ConsensusLogScheduledChange[N]: + return 1, mvdt.inner, nil + case ConsensusLogForcedChange[N]: + return 2, mvdt.inner, nil + case ConsensusLogOnDisabled: + return 3, mvdt.inner, nil + case ConsensusLogPause[N]: + return 4, mvdt.inner, nil + case ConsensusLogResume[N]: + return 5, mvdt.inner, nil + } + return 0, nil, scale.ErrUnsupportedVaryingDataTypeValue +} + +func (mvdt ConsensusLogVDT[N]) Value() (value any, err error) { + _, value, err = mvdt.IndexValue() + return +} + +func (mvdt ConsensusLogVDT[N]) ValueAt(index uint) (value any, err error) { + switch index { + case 1: + return ConsensusLogScheduledChange[N]{}, nil + case 2: + return ConsensusLogForcedChange[N]{}, nil + case 3: + return ConsensusLogOnDisabled(0), nil + case 4: + return ConsensusLogPause[N]{}, nil + case 5: + return ConsensusLogResume[N]{}, nil + } + return nil, scale.ErrUnknownVaryingDataTypeValue +} + // EquivocationProof is proof of voter misbehavior on a given set id. Misbehavior/equivocation in GRANDPA happens when // a voter votes on the same round (either at prevote or precommit stage) for different blocks. Proving is achieved by // collecting the signed messages of conflicting votes. @@ -211,12 +336,13 @@ type OpaqueKeyOwnershipProof = runtime.OpaqueValue // // The consensus protocol will coordinate the handoff externally. type GrandpaAPI[H runtime.Hash, N runtime.Number] interface { + api.Core[H] // Get the current GRANDPA authorities and weights. This should not change except for when changes are scheduled // and the corresponding delay has passed. // // When called at block B, it will return the set of authorities that should be used to finalize descendants of // this block (B+1, B+2, ...). The block B itself is finalized by the authorities from block B-1. - GrandpaAuthorities() AuthorityList + GrandpaAuthorities(hash H) (AuthorityList, error) // Submits an unsigned extrinsic to report an equivocation. The caller must provide the equivocation proof and a // key ownership proof (should be obtained using GenerateKeyOwnershipProof). The extrinsic will be unsigned @@ -242,4 +368,7 @@ type GrandpaAPI[H runtime.Hash, N runtime.Number] interface { setID SetID, authorityID AuthorityID, ) *OpaqueKeyOwnershipProof + + /// Get current GRANDPA authority set id. + CurrentSetID(hash H) (SetID, error) } diff --git a/internal/primitives/crypto/hashing/hashing.go b/internal/primitives/crypto/hashing/hashing.go index 086edef4ab..94f6aef1a9 100644 --- a/internal/primitives/crypto/hashing/hashing.go +++ b/internal/primitives/crypto/hashing/hashing.go @@ -4,6 +4,9 @@ package hashing import ( + "encoding/binary" + + "github.com/OneOfOne/xxhash" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/sha3" ) @@ -37,3 +40,32 @@ func Keccak256(data []byte) [32]byte { copy(buf[:], hash) return buf } + +// Do a XX 128-bit hash and return result. +func Twox128(data []byte) [16]byte { + // compute xxHash64 twice with seeds 0 and 1 applied on given byte array + h0 := xxhash.NewS64(0) // create xxHash with 0 seed + _, err := h0.Write(data) + if err != nil { + panic(err) + } + res0 := h0.Sum64() + hash0 := make([]byte, 8) + binary.LittleEndian.PutUint64(hash0, res0) + + h1 := xxhash.NewS64(1) // create xxHash with 1 seed + _, err = h1.Write(data) + if err != nil { + panic(err) + } + res1 := h1.Sum64() + hash1 := make([]byte, 8) + binary.LittleEndian.PutUint64(hash1, res1) + + return [16]byte{ + hash0[0], hash0[1], hash0[2], hash0[3], + hash0[4], hash0[5], hash0[6], hash0[7], + hash1[0], hash1[1], hash1[2], hash1[3], + hash1[4], hash1[5], hash1[6], hash1[7], + } +} diff --git a/internal/primitives/runtime/runtime.go b/internal/primitives/runtime/runtime.go index aef1a0eb5f..0be19041bf 100644 --- a/internal/primitives/runtime/runtime.go +++ b/internal/primitives/runtime/runtime.go @@ -47,6 +47,16 @@ func (j Justifications) Get(engineID ConsensusEngineID) *EncodedJustification { return nil } +// IntoJustification returns the encoded justification for the given consensus engine, if it exists. +func (j Justifications) IntoJustification(enginedID ConsensusEngineID) *EncodedJustification { + for _, justification := range j { + if justification.ConsensusEngineID == enginedID { + return &justification.EncodedJustification + } + } + return nil +} + // Complex storage builder stuff. type BuildStorage interface { // Build the storage out of this builder. diff --git a/internal/primitives/version/version.go b/internal/primitives/version/version.go index 88b20bc40d..321594d63f 100644 --- a/internal/primitives/version/version.go +++ b/internal/primitives/version/version.go @@ -16,7 +16,7 @@ type ApiID [8]uint8 // A pair of ApiID and a uint32 for version. type ApiIDVersion struct { - ApiId ApiID + ApiID Version uint32 } @@ -67,7 +67,7 @@ type RuntimeVersion struct { ImplVersion uint32 // List of supported API "features" along with their versions. - Apis ApisVec + APIs []ApiIDVersion // All existing calls (dispatchables) are fully compatible when this number doesn't change. If // this number changes, then SpecVersion must change, also. @@ -89,3 +89,13 @@ func (v *RuntimeVersion) StateVersion() storage.StateVersion { return storage.StateVersion(stateVersion) } + +// Returns the api version found for api with id. +func (v *RuntimeVersion) APIVersion(id ApiID) *uint32 { + for _, api := range v.APIs { + if api.ApiID == id { + return &api.Version + } + } + return nil +}