>
+ /\ action' = "InsertProposal"
+
+\* a new action used to filter messages that are not on time
+\* [PBTS-RECEPTION-STEP.0]
+ReceiveProposal(p) ==
+ \E v \in Values, t \in Timestamps:
+ /\ LET r == round[p] IN
+ LET msg ==
+ AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]],
+ round |-> round[p], proposal |-> Proposal(v, t), validRound |-> NilRound]) IN
+ /\ msg \in msgsPropose[round[p]]
+ /\ p \notin inspectedProposal[r]
+ /\ <> \notin receivedTimelyProposal
+ /\ inspectedProposal' = [inspectedProposal EXCEPT ![r] = @ \union {p}]
+ /\ \/ /\ localClock[p] - Precision < t
+ /\ t < localClock[p] + Precision + Delay
+ /\ receivedTimelyProposal' = receivedTimelyProposal \union {<
>}
+ /\ \/ /\ proposalReceivedTime[r] = NilTimestamp
+ /\ proposalReceivedTime' = [proposalReceivedTime EXCEPT ![r] = realTime]
+ \/ /\ proposalReceivedTime[r] /= NilTimestamp
+ /\ UNCHANGED proposalReceivedTime
+ \/ /\ \/ localClock[p] - Precision >= t
+ \/ t >= localClock[p] + Precision + Delay
+ /\ UNCHANGED <>
+ /\ UNCHANGED <>
+ /\ action' = "ReceiveProposal"
+
+\* lines 22-27
+UponProposalInPropose(p) ==
+ \E v \in Values, t \in Timestamps:
+ /\ step[p] = "PROPOSE" (* line 22 *)
+ /\ LET msg ==
+ AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]],
+ round |-> round[p], proposal |-> Proposal(v, t), validRound |-> NilRound]) IN
+ /\ <> \in receivedTimelyProposal \* updated line 22
+ /\ evidence' = {msg} \union evidence
+ /\ LET mid == (* line 23 *)
+ IF IsValid(v) /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v)
+ THEN Id(Proposal(v, t))
+ ELSE NilProposal
+ IN
+ BroadcastPrevote(p, round[p], mid) \* lines 24-26
+ /\ step' = [step EXCEPT ![p] = "PREVOTE"]
+ /\ UNCHANGED <>
+ /\ action' = "UponProposalInPropose"
+
+\* lines 28-33
+\* [PBTS-ALG-OLD-PREVOTE.0]
+UponProposalInProposeAndPrevote(p) ==
+ \E v \in Values, t1 \in Timestamps, t2 \in Timestamps, vr \in Rounds:
+ /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < round[p] \* line 28, the while part
+ /\ LET msg ==
+ AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]],
+ round |-> round[p], proposal |-> Proposal(v, t1), validRound |-> vr])
+ IN
+ /\ <> \in receivedTimelyProposal \* updated line 28
+ /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(Proposal(v, t2)) } IN
+ /\ Cardinality(PV) >= THRESHOLD2 \* line 28
+ /\ evidence' = PV \union {msg} \union evidence
+ /\ LET mid == (* line 29 *)
+ IF IsValid(v) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v)
+ THEN Id(Proposal(v, t1))
+ ELSE NilProposal
+ IN
+ BroadcastPrevote(p, round[p], mid) \* lines 24-26
+ /\ step' = [step EXCEPT ![p] = "PREVOTE"]
+ /\ UNCHANGED <>
+ /\ action' = "UponProposalInProposeAndPrevote"
+
+ \* lines 34-35 + lines 61-64 (onTimeoutPrevote)
+UponQuorumOfPrevotesAny(p) ==
+ /\ step[p] = "PREVOTE" \* line 34 and 61
+ /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]:
+ \* find the unique voters in the evidence
+ LET Voters == { m.src: m \in MyEvidence } IN
+ \* compare the number of the unique voters against the threshold
+ /\ Cardinality(Voters) >= THRESHOLD2 \* line 34
+ /\ evidence' = MyEvidence \union evidence
+ /\ BroadcastPrecommit(p, round[p], NilProposal)
+ /\ step' = [step EXCEPT ![p] = "PRECOMMIT"]
+ /\ UNCHANGED <>
+ /\ action' = "UponQuorumOfPrevotesAny"
+
+\* lines 36-46
+\* [PBTS-ALG-NEW-PREVOTE.0]
+UponProposalInPrevoteOrCommitAndPrevote(p) ==
+ \E v \in ValidValues, t \in Timestamps, vr \in RoundsOrNil:
+ /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36
+ /\ LET msg ==
+ AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]],
+ round |-> round[p], proposal |-> Proposal(v, t), validRound |-> vr]) IN
+ /\ <> \in receivedTimelyProposal \* updated line 36
+ /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(Proposal(v, t)) } IN
+ /\ Cardinality(PV) >= THRESHOLD2 \* line 36
+ /\ evidence' = PV \union {msg} \union evidence
+ /\ IF step[p] = "PREVOTE"
+ THEN \* lines 38-41:
+ /\ lockedValue' = [lockedValue EXCEPT ![p] = v]
+ /\ lockedRound' = [lockedRound EXCEPT ![p] = round[p]]
+ /\ BroadcastPrecommit(p, round[p], Id(Proposal(v, t)))
+ /\ step' = [step EXCEPT ![p] = "PRECOMMIT"]
+ ELSE
+ UNCHANGED <>
+ \* lines 42-43
+ /\ validValue' = [validValue EXCEPT ![p] = v]
+ /\ validRound' = [validRound EXCEPT ![p] = round[p]]
+ /\ UNCHANGED <>
+ /\ action' = "UponProposalInPrevoteOrCommitAndPrevote"
+
+\* lines 47-48 + 65-67 (onTimeoutPrecommit)
+UponQuorumOfPrecommitsAny(p) ==
+ /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]:
+ \* find the unique committers in the evidence
+ LET Committers == { m.src: m \in MyEvidence } IN
+ \* compare the number of the unique committers against the threshold
+ /\ Cardinality(Committers) >= THRESHOLD2 \* line 47
+ /\ evidence' = MyEvidence \union evidence
+ /\ round[p] + 1 \in Rounds
+ /\ StartRound(p, round[p] + 1)
+ /\ UNCHANGED <>
+ /\ action' = "UponQuorumOfPrecommitsAny"
+
+\* lines 49-54
+\* [PBTS-ALG-DECIDE.0]
+UponProposalInPrecommitNoDecision(p) ==
+ /\ decision[p] = NilDecision \* line 49
+ /\ \E v \in ValidValues, t \in Timestamps (* line 50*) , r \in Rounds, vr \in RoundsOrNil:
+ /\ LET msg == AsMsg([type |-> "PROPOSAL", src |-> Proposer[r],
+ round |-> r, proposal |-> Proposal(v, t), validRound |-> vr]) IN
+ /\ msg \in msgsPropose[r] \* line 49
+ /\ p \in inspectedProposal[r]
+ /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(Proposal(v, t)) } IN
+ /\ Cardinality(PV) >= THRESHOLD2 \* line 49
+ /\ evidence' = PV \union {msg} \union evidence
+ /\ decision' = [decision EXCEPT ![p] = Decision(v, t, round[p])] \* update the decision, line 51
+ \* The original algorithm does not have 'DECIDED', but it increments the height.
+ \* We introduced 'DECIDED' here to prevent the process from changing its decision.
+ /\ endConsensus' = [endConsensus EXCEPT ![p] = localClock[p]]
+ /\ step' = [step EXCEPT ![p] = "DECIDED"]
+ /\ UNCHANGED <>
+ /\ action' = "UponProposalInPrecommitNoDecision"
+
+\* the actions below are not essential for safety, but added for completeness
+
+\* lines 20-21 + 57-60
+OnTimeoutPropose(p) ==
+ /\ step[p] = "PROPOSE"
+ /\ p /= Proposer[round[p]]
+ /\ BroadcastPrevote(p, round[p], NilProposal)
+ /\ step' = [step EXCEPT ![p] = "PREVOTE"]
+ /\ UNCHANGED <>
+ /\ action' = "OnTimeoutPropose"
+
+\* lines 44-46
+OnQuorumOfNilPrevotes(p) ==
+ /\ step[p] = "PREVOTE"
+ /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilProposal) } IN
+ /\ Cardinality(PV) >= THRESHOLD2 \* line 36
+ /\ evidence' = PV \union evidence
+ /\ BroadcastPrecommit(p, round[p], Id(NilProposal))
+ /\ step' = [step EXCEPT ![p] = "PRECOMMIT"]
+ /\ UNCHANGED <>
+ /\ action' = "OnQuorumOfNilPrevotes"
+
+\* lines 55-56
+OnRoundCatchup(p) ==
+ \E r \in {rr \in Rounds: rr > round[p]}:
+ LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN
+ \E MyEvidence \in SUBSET RoundMsgs:
+ LET Faster == { m.src: m \in MyEvidence } IN
+ /\ Cardinality(Faster) >= THRESHOLD1
+ /\ evidence' = MyEvidence \union evidence
+ /\ StartRound(p, r)
+ /\ UNCHANGED <>
+ /\ action' = "OnRoundCatchup"
+
+
+(********************* PROTOCOL TRANSITIONS ******************************)
+\* advance the global clock
+AdvanceRealTime ==
+ /\ realTime < MaxTimestamp
+ /\ realTime' = realTime + 1
+ /\ \/ /\ ~ClockDrift
+ /\ localClock' = [p \in Corr |-> localClock[p] + 1]
+ \/ /\ ClockDrift
+ /\ UNCHANGED localClock
+ /\ UNCHANGED <>
+ /\ action' = "AdvanceRealTime"
+
+\* advance the local clock of node p
+AdvanceLocalClock(p) ==
+ /\ localClock[p] < MaxTimestamp
+ /\ localClock' = [localClock EXCEPT ![p] = @ + 1]
+ /\ UNCHANGED <>
+ /\ action' = "AdvanceLocalClock"
+
+\* process timely messages
+MessageProcessing(p) ==
+ \* start round
+ \/ InsertProposal(p)
+ \* reception step
+ \/ ReceiveProposal(p)
+ \* processing step
+ \/ UponProposalInPropose(p)
+ \/ UponProposalInProposeAndPrevote(p)
+ \/ UponQuorumOfPrevotesAny(p)
+ \/ UponProposalInPrevoteOrCommitAndPrevote(p)
+ \/ UponQuorumOfPrecommitsAny(p)
+ \/ UponProposalInPrecommitNoDecision(p)
+ \* the actions below are not essential for safety, but added for completeness
+ \/ OnTimeoutPropose(p)
+ \/ OnQuorumOfNilPrevotes(p)
+ \/ OnRoundCatchup(p)
+
+(*
+ * A system transition. In this specificatiom, the system may eventually deadlock,
+ * e.g., when all processes decide. This is expected behavior, as we focus on safety.
+ *)
+Next ==
+ \/ AdvanceRealTime
+ \/ /\ ClockDrift
+ /\ \E p \in Corr: AdvanceLocalClock(p)
+ \/ /\ SynchronizedLocalClocks
+ /\ \E p \in Corr: MessageProcessing(p)
+
+-----------------------------------------------------------------------------
+
+(*************************** INVARIANTS *************************************)
+
+\* [PBTS-INV-AGREEMENT.0]
+AgreementOnValue ==
+ \A p, q \in Corr:
+ /\ decision[p] /= NilDecision
+ /\ decision[q] /= NilDecision
+ => \E v \in ValidValues, t1 \in Timestamps, t2 \in Timestamps, r1 \in Rounds, r2 \in Rounds :
+ /\ decision[p] = Decision(v, t1, r1)
+ /\ decision[q] = Decision(v, t2, r2)
+
+\* [PBTS-INV-TIME-AGR.0]
+AgreementOnTime ==
+ \A p, q \in Corr:
+ \A v1 \in ValidValues, v2 \in ValidValues, t1 \in Timestamps, t2 \in Timestamps, r \in Rounds :
+ /\ decision[p] = Decision(v1, t1, r)
+ /\ decision[q] = Decision(v2, t2, r)
+ => t1 = t2
+
+\* [PBTS-CONSENSUS-TIME-VALID.0]
+ConsensusTimeValid ==
+ \A p \in Corr, t \in Timestamps :
+ \* if a process decides on v and t
+ (\E v \in ValidValues, r \in Rounds : decision[p] = Decision(v, t, r))
+ \* then
+ => /\ beginConsensus - Precision <= t
+ /\ t < endConsensus[p] + Precision + Delay
+
+\* [PBTS-CONSENSUS-SAFE-VALID-CORR-PROP.0]
+ConsensusSafeValidCorrProp ==
+ \A v \in ValidValues, t \in Timestamps :
+ \* if the proposer in the first round is correct
+ (/\ Proposer[0] \in Corr
+ \* and there exists a process that decided on v, t
+ /\ \E p \in Corr, r \in Rounds : decision[p] = Decision(v, t, r))
+ \* then t is between the minimal and maximal initial local time
+ => /\ beginConsensus <= t
+ /\ t <= lastBeginConsensus
+
+\* [PBTS-CONSENSUS-REALTIME-VALID-CORR.0]
+ConsensusRealTimeValidCorr ==
+ \A t \in Timestamps, r \in Rounds :
+ (/\ \E p \in Corr, v \in ValidValues : decision[p] = Decision(v, t, r)
+ /\ proposalTime[r] /= NilTimestamp)
+ => /\ proposalTime[r] - Accuracy < t
+ /\ t < proposalTime[r] + Accuracy
+
+\* [PBTS-CONSENSUS-REALTIME-VALID.0]
+ConsensusRealTimeValid ==
+ \A t \in Timestamps, r \in Rounds :
+ (\E p \in Corr, v \in ValidValues : decision[p] = Decision(v, t, r))
+ => /\ proposalReceivedTime[r] - Accuracy - Precision < t
+ /\ t < proposalReceivedTime[r] + Accuracy + Precision + Delay
+
+\* [PBTS-MSG-FAIR.0]
+BoundedDelay ==
+ \A r \in Rounds :
+ (/\ proposalTime[r] /= NilTimestamp
+ /\ proposalTime[r] + Delay < realTime)
+ => inspectedProposal[r] = Corr
+
+\* [PBTS-CONSENSUS-TIME-LIVE.0]
+ConsensusTimeLive ==
+ \A r \in Rounds, p \in Corr :
+ (/\ proposalTime[r] /= NilTimestamp
+ /\ proposalTime[r] + Delay < realTime
+ /\ Proposer[r] \in Corr
+ /\ round[p] <= r)
+ => \E msg \in RoundProposals(r) : <> \in receivedTimelyProposal
+
+\* a conjunction of all invariants
+Inv ==
+ /\ AgreementOnValue
+ /\ AgreementOnTime
+ /\ ConsensusTimeValid
+ /\ ConsensusSafeValidCorrProp
+ /\ ConsensusRealTimeValid
+ /\ ConsensusRealTimeValidCorr
+ /\ BoundedDelay
+
+Liveness ==
+ ConsensusTimeLive
+
+=============================================================================
diff --git a/cometbft/v0.38/spec/core/Data_structures.mdx b/cometbft/v0.38/spec/core/Data_structures.mdx
new file mode 100644
index 00000000..ecd449ad
--- /dev/null
+++ b/cometbft/v0.38/spec/core/Data_structures.mdx
@@ -0,0 +1,501 @@
+---
+order: 1
+---
+
+# Data Structures
+
+Here we describe the data structures in the CometBFT blockchain and the rules for validating them.
+
+The CometBFT blockchain consists of a short list of data types:
+
+- [Data Structures](#data-structures)
+ - [Block](#block)
+ - [Execution](#execution)
+ - [Header](#header)
+ - [Version](#version)
+ - [BlockID](#blockid)
+ - [PartSetHeader](#partsetheader)
+ - [Part](#part)
+ - [Time](#time)
+ - [Data](#data)
+ - [Commit](#commit)
+ - [CommitSig](#commitsig)
+ - [BlockIDFlag](#blockidflag)
+ - [Vote](#vote)
+ - [CanonicalVote](#canonicalvote)
+ - [Proposal](#proposal)
+ - [SignedMsgType](#signedmsgtype)
+ - [Signature](#signature)
+ - [EvidenceList](#evidencelist)
+ - [Evidence](#evidence)
+ - [DuplicateVoteEvidence](#duplicatevoteevidence)
+ - [LightClientAttackEvidence](#lightclientattackevidence)
+ - [LightBlock](#lightblock)
+ - [SignedHeader](#signedheader)
+ - [ValidatorSet](#validatorset)
+ - [Validator](#validator)
+ - [Address](#address)
+ - [ConsensusParams](#consensusparams)
+ - [BlockParams](#blockparams)
+ - [EvidenceParams](#evidenceparams)
+ - [ValidatorParams](#validatorparams)
+ - [VersionParams](#versionparams)
+ - [Proof](#proof)
+
+
+## Block
+
+A block consists of a header, transactions, votes (the commit),
+and a list of evidence of malfeasance (ie. signing conflicting votes).
+
+| Name | Type | Description | Validation |
+|--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------|
+| Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header](#header) | Must adhere to the validation rules of [header](#header) |
+| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to CometBFT. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/abci%2B%2B_methods.md#checktx).
+| Evidence | [EvidenceList](#evidencelist) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidencelist) apply |
+| LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). |
+
+## Execution
+
+Once a block is validated, it can be executed against the state.
+
+The state follows this recursive equation:
+
+```go
+state(initialHeight) = InitialState
+state(h+1) <- Execute(state(h), ABCIApp, block(h))
+```
+
+where `InitialState` includes the initial consensus parameters and validator set,
+and `ABCIApp` is an ABCI application that can return results and changes to the validator
+set (TODO). Execute is defined as:
+
+```go
+func Execute(state State, app ABCIApp, block Block) State {
+ // Fuction ApplyBlock executes block of transactions against the app and returns the new root hash of the app state,
+ // modifications to the validator set and the changes of the consensus parameters.
+ AppHash, ValidatorChanges, ConsensusParamChanges := app.ApplyBlock(block)
+
+ nextConsensusParams := UpdateConsensusParams(state.ConsensusParams, ConsensusParamChanges)
+ return State{
+ ChainID: state.ChainID,
+ InitialHeight: state.InitialHeight,
+ LastResults: abciResponses.DeliverTxResults,
+ AppHash: AppHash,
+ LastValidators: state.Validators,
+ Validators: state.NextValidators,
+ NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges),
+ ConsensusParams: nextConsensusParams,
+ Version: {
+ Consensus: {
+ AppVersion: nextConsensusParams.Version.AppVersion,
+ },
+ },
+ }
+}
+```
+
+Validating a new block is first done prior to the `prevote`, `precommit` & `finalizeCommit` stages.
+
+The steps to validate a new block are:
+
+- Check the validity rules of the block and its fields.
+- Check the versions (Block & App) are the same as in local state.
+- Check the chainID's match.
+- Check the height is correct.
+- Check the `LastBlockID` corresponds to BlockID currently in state.
+- Check the hashes in the header match those in state.
+- Verify the LastCommit against state, this step is skipped for the initial height.
+ - This is where checking the signatures correspond to the correct block will be made.
+- Make sure the proposer is part of the validator set.
+- Validate bock time.
+ - Make sure the new blocks time is after the previous blocks time.
+ - Calculate the medianTime and check it against the blocks time.
+ - If the blocks height is the initial height then check if it matches the genesis time.
+- Validate the evidence in the block. Note: Evidence can be empty
+
+## Header
+
+A block header contains metadata about the block and about the consensus, as well as commitments to
+the data in the current block, the previous block, and the results returned by the application:
+
+| Name | Type | Description | Validation |
+|-------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Version | [Version](#version) | Version defines the application and block versions being used. | Must adhere to the validation rules of [Version](#version) |
+| ChainID | String | ChainID is the ID of the chain. This must be unique to your chain. | ChainID must be less than 50 bytes. |
+| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 |
+| Time | [Time](#time) | The timestamp is equal to the weighted median of validators present in the last commit. Read more on time in the [BFT-time section](../consensus/bft-time.md). Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. | Time must be >= previous header timestamp + consensus parameters TimeIotaMs. The timestamp of the first block must be equal to the genesis time (since there's no votes to compute the median). |
+| LastBlockID | [BlockID](#blockid) | BlockID of the previous block. | Must adhere to the validation rules of [blockID](#blockid). The first block has `block.Header.LastBlockID == BlockID{}`. |
+| LastCommitHash | slice of bytes (`[]byte`) | MerkleRoot of the lastCommit's signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. | Must be of length 32 |
+| DataHash | slice of bytes (`[]byte`) | MerkleRoot of the hash of transactions. **Note**: The transactions are hashed before being included in the merkle tree, the leaves of the Merkle tree are the hashes, not the transactions themselves. | Must be of length 32 |
+| ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 |
+| NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 |
+| ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 |
+| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. |
+| LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `ResponseDeliverTx` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. |
+| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behavior included in this block. | Must be of length 32 |
+| ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 |
+
+## Version
+
+NOTE: that this is more specifically the consensus version and doesn't include information like the
+P2P Version. (TODO: we should write a comprehensive document about
+versioning that this can refer to)
+
+| Name | type | Description | Validation |
+|-------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------|
+| Block | uint64 | This number represents the block version and must be the same throughout an operational network | Must be equal to block version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) |
+| App | uint64 | App version is decided on by the application. Read [here](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/abci/abci++_app_requirements.md) | `block.Version.App == state.Version.Consensus.App` |
+
+## BlockID
+
+The `BlockID` contains two distinct Merkle roots of the block. The `BlockID` includes these two hashes, as well as the number of parts (ie. `len(MakeParts(block))`)
+
+| Name | Type | Description | Validation |
+|---------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------|
+| Hash | slice of bytes (`[]byte`) | MerkleRoot of all the fields in the header (ie. `MerkleRoot(header)`. | hash must be of length 32 |
+| PartSetHeader | [PartSetHeader](#partsetheader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#partsetheader) |
+
+See [MerkleRoot](./encoding.md#merkleroot) for details.
+
+## PartSetHeader
+
+| Name | Type | Description | Validation |
+|-------|---------------------------|-----------------------------------|----------------------|
+| Total | int32 | Total amount of parts for a block | Must be > 0 |
+| Hash | slice of bytes (`[]byte`) | MerkleRoot of a serialized block | Must be of length 32 |
+
+## Part
+
+Part defines a part of a block. In CometBFT blocks are broken into `parts` for gossip.
+
+| Name | Type | Description | Validation |
+|-------|-----------------|-----------------------------------|----------------------|
+| index | int32 | Total amount of parts for a block | Must be > 0 |
+| bytes | bytes | MerkleRoot of a serialized block | Must be of length 32 |
+| proof | [Proof](#proof) | MerkleRoot of a serialized block | Must be of length 32 |
+
+## Time
+
+CometBFT uses the [Google.Protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp)
+format, which uses two integers, one 64 bit integer for Seconds and a 32 bit integer for Nanoseconds.
+
+## Data
+
+Data is just a wrapper for a list of transactions, where transactions are arbitrary byte arrays:
+
+| Name | Type | Description | Validation |
+|------|----------------------------|------------------------|-----------------------------------------------------------------------------|
+| Txs | Matrix of bytes ([][]byte) | Slice of transactions. | Validation does not occur on this field, this data is unknown to CometBFT |
+
+## Commit
+
+Commit is a simple wrapper for a list of signatures, with one for each validator. It also contains the relevant BlockID, height and round:
+
+| Name | Type | Description | Validation |
+|------------|----------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|
+| Height | int64 | Height at which this commit was created. | Must be > 0 |
+| Round | int32 | Round that the commit corresponds to. | Must be > 0 |
+| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | Must adhere to the validation rules of [BlockID](#blockid). |
+| Signatures | Array of [CommitSig](#commitsig) | Array of commit signatures that correspond to current validator set. | Length of signatures must be > 0 and adhere to the validation of each individual [Commitsig](#commitsig) |
+
+## ExtendedCommit
+
+`ExtendedCommit`, similarly to Commit, wraps a list of votes with signatures together with other data needed to verify them.
+In addition, it contains the verified vote extensions, one for each non-`nil` vote, along with the extension signatures.
+
+| Name | Type | Description | Validation |
+|--------------------|------------------------------------------|-------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|
+| Height | int64 | Height at which this commit was created. | Must be > 0 |
+| Round | int32 | Round that the commit corresponds to. | Must be > 0 |
+| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | Must adhere to the validation rules of [BlockID](#blockid). |
+| ExtendedSignatures | Array of [ExtendedCommitSig](#commitsig) | The current validator set's commit signatures, extension, and extension signatures. | Length of signatures must be > 0 and adhere to the validation of each individual [ExtendedCommitSig](#extendedcommitsig) |
+
+## CommitSig
+
+`CommitSig` represents a signature of a validator, who has voted either for nil,
+a particular `BlockID` or was absent. It's a part of the `Commit` and can be used
+to reconstruct the vote set given the validator set.
+
+| Name | Type | Description | Validation |
+|------------------|-----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
+| BlockIDFlag | [BlockIDFlag](#blockidflag) | Represents the validators participation in consensus: its vote was not received, voted for the block that received the majority, or voted for nil | Must be one of the fields in the [BlockIDFlag](#blockidflag) enum |
+| ValidatorAddress | [Address](#address) | Address of the validator | Must be of length 20 |
+| Timestamp | [Time](#time) | This field will vary from `CommitSig` to `CommitSig`. It represents the timestamp of the validator. | [Time](#time) |
+| Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | The length of the signature must be > 0 and < than 64 |
+
+NOTE: `ValidatorAddress` and `Timestamp` fields may be removed in the future
+(see [ADR-25](https://github.com/cometbft/cometbft/blob/v0.38.x/docs/architecture/tendermint-core/adr-025-commit.md)).
+
+## ExtendedCommitSig
+
+`ExtendedCommitSig` represents a signature of a validator that has voted either for `nil`,
+a particular `BlockID` or was absent. It is part of the `ExtendedCommit` and can be used
+to reconstruct the vote set given the validator set.
+Additionally it contains the vote extensions that were attached to each non-`nil` precommit vote.
+All these extensions have been verified by the application operating at the signing validator's node.
+
+| Name | Type | Description | Validation |
+|--------------------|-----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|
+| BlockIDFlag | [BlockIDFlag](#blockidflag) | Represents the validators participation in consensus: its vote was not received, voted for the block that received the majority, or voted for nil | Must be one of the fields in the [BlockIDFlag](#blockidflag) enum |
+| ValidatorAddress | [Address](#address) | Address of the validator | Must be of length 20 |
+| Timestamp | [Time](#time) | This field will vary from `CommitSig` to `CommitSig`. It represents the timestamp of the validator. | |
+| Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | Length must be > 0 and < 64 |
+| Extension | bytes | Vote extension provided by the Application running on the sender of the precommit vote, and verified by the local application. | Length must be zero if BlockIDFlag is not `Commit` |
+| ExtensionSignature | [Signature](#signature) | Signature of the vote extension. | Length must be > 0 and < than 64 if BlockIDFlag is `Commit`, else 0 |
+
+## BlockIDFlag
+
+BlockIDFlag represents which BlockID the [signature](#commitsig) is for.
+
+```go
+enum BlockIDFlag {
+ BLOCK_ID_FLAG_UNKNOWN = 0; // indicates an error condition
+ BLOCK_ID_FLAG_ABSENT = 1; // the vote was not received
+ BLOCK_ID_FLAG_COMMIT = 2; // voted for the block that received the majority
+ BLOCK_ID_FLAG_NIL = 3; // voted for nil
+}
+```
+
+## Vote
+
+A vote is a signed message from a validator for a particular block.
+The vote includes information about the validator signing it. When stored in the blockchain or propagated over the network, votes are encoded in Protobuf.
+
+| Name | Type | Description | Validation |
+|--------------------|---------------------------------|------------------------------------------------------------------------------------------|------------------------------------------|
+| Type | [SignedMsgType](#signedmsgtype) | The type of message the vote refers to | Must be `PrevoteType` or `PrecommitType` |
+| Height | int64 | Height for which this vote was created for | Must be > 0 |
+| Round | int32 | Round that the commit corresponds to. | Must be > 0 |
+| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | |
+| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | |
+| ValidatorAddress | bytes | Address of the validator | Length must be equal to 20 |
+| ValidatorIndex | int32 | Index at a specific block height corresponding to the Index of the validator in the set. | Must be > 0 |
+| Signature | bytes | Signature by the validator if they participated in consensus for the associated block. | Length must be > 0 and < 64 |
+| Extension | bytes | Vote extension provided by the Application running at the validator's node. | Length can be 0 |
+| ExtensionSignature | bytes | Signature for the extension | Length must be > 0 and < 64 |
+
+## CanonicalVote
+
+CanonicalVote is for validator signing. This type will not be present in a block.
+Votes are represented via `CanonicalVote` and also encoded using protobuf via `type.SignBytes` which includes the `ChainID`,
+and uses a different ordering of the fields.
+
+| Name | Type | Description | Validation |
+|-----------|---------------------------------|-----------------------------------------|------------------------------------------|
+| Type | [SignedMsgType](#signedmsgtype) | The type of message the vote refers to | Must be `PrevoteType` or `PrecommitType` |
+| Height | int64 | Height in which the vote was provided. | Must be > 0 |
+| Round | int64 | Round in which the vote was provided. | Must be > 0 |
+| BlockID | string | ID of the block the vote refers to. | |
+| Timestamp | string | Time of the vote. | |
+| ChainID | string | ID of the blockchain running consensus. | |
+
+For signing, votes are represented via [`CanonicalVote`](#canonicalvote) and also encoded using protobuf via
+`type.SignBytes` which includes the `ChainID`, and uses a different ordering of
+the fields.
+
+We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes`
+using the given ChainID:
+
+```go
+func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
+ if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) {
+ return ErrVoteInvalidValidatorAddress
+ }
+ v := vote.ToProto()
+ if !pubKey.VerifyBytes(types.VoteSignBytes(chainID, v), vote.Signature) {
+ return ErrVoteInvalidSignature
+ }
+ return nil
+}
+```
+
+### CanonicalVoteExtension
+
+Vote extensions are signed using a representation similar to votes.
+This is the structure to marshall in order to obtain the bytes to sign or verify the signature.
+
+| Name | Type | Description | Validation |
+|-----------|--------|---------------------------------------------|----------------------|
+| Extension | bytes | Vote extension provided by the Application. | Can have zero length |
+| Height | int64 | Height in which the extension was provided. | Must be > 0 |
+| Round | int64 | Round in which the extension was provided. | Must be > 0 |
+| ChainID | string | ID of the blockchain running consensus. | |
+
+## Proposal
+
+Proposal contains height and round for which this proposal is made, BlockID as a unique identifier
+of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for
+termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that
+is locked in POLRound. The message is signed by the validator private key.
+
+| Name | Type | Description | Validation |
+|-----------|---------------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------|
+| Type | [SignedMsgType](#signedmsgtype) | Represents a Proposal [SignedMsgType](#signedmsgtype) | Must be `ProposalType` [signedMsgType](#signedmsgtype) |
+| Height | uint64 | Height for which this vote was created for | Must be > 0 |
+| Round | int32 | Round that the commit corresponds to. | Must be > 0 |
+| POLRound | int64 | Proof of lock | Must be > 0 |
+| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) |
+| Timestamp | [Time](#time) | Timestamp represents the time at which a validator signed. | [Time](#time) |
+| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 |
+
+## SignedMsgType
+
+Signed message type represents a signed messages in consensus.
+
+```proto
+enum SignedMsgType {
+
+ SIGNED_MSG_TYPE_UNKNOWN = 0;
+ // Votes
+ SIGNED_MSG_TYPE_PREVOTE = 1;
+ SIGNED_MSG_TYPE_PRECOMMIT = 2;
+
+ // Proposal
+ SIGNED_MSG_TYPE_PROPOSAL = 32;
+}
+```
+
+## Signature
+
+Signatures in CometBFT are raw bytes representing the underlying signature.
+
+See the [signature spec](./encoding.md#key-types) for more.
+
+## EvidenceList
+
+EvidenceList is a simple wrapper for a list of evidence:
+
+| Name | Type | Description | Validation |
+|----------|--------------------------------|----------------------------------------|-----------------------------------------------------------------|
+| Evidence | Array of [Evidence](#evidence) | List of verified [evidence](#evidence) | Validation adheres to individual types of [Evidence](#evidence) |
+
+## Evidence
+
+Evidence in CometBFT is used to indicate breaches in the consensus by a validator.
+
+More information on how evidence works in CometBFT can be found [here](../consensus/evidence.md)
+
+### DuplicateVoteEvidence
+
+`DuplicateVoteEvidence` represents a validator that has voted for two different blocks
+in the same round of the same height. Votes are lexicographically sorted on `BlockID`.
+
+| Name | Type | Description | Validation |
+|------------------|---------------|--------------------------------------------------------------------|-----------------------------------------------------|
+| VoteA | [Vote](#vote) | One of the votes submitted by a validator when they equivocated | VoteA must adhere to [Vote](#vote) validation rules |
+| VoteB | [Vote](#vote) | The second vote submitted by a validator when they equivocated | VoteB must adhere to [Vote](#vote) validation rules |
+| TotalVotingPower | int64 | The total power of the validator set at the height of equivocation | Must be equal to nodes own copy of the data |
+| ValidatorPower | int64 | Power of the equivocating validator at the height | Must be equal to the nodes own copy of the data |
+| Timestamp | [Time](#time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data |
+
+### LightClientAttackEvidence
+
+`LightClientAttackEvidence` is a generalized evidence that captures all forms of known attacks on
+a light client such that a full node can verify, propose and commit the evidence on-chain for
+punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation
+and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the-misbehavior-of-faulty-validators)
+
+| Name | Type | Description | Validation |
+|----------------------|------------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------|
+| ConflictingBlock | [LightBlock](#lightblock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) |
+| CommonHeight | int64 | Read Below | must be > 0 |
+| Byzantine Validators | Array of [Validators](#validator) | validators that acted maliciously | Read Below |
+| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data |
+| Timestamp | [Time](#time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data |
+
+## LightBlock
+
+LightBlock is the core data structure of the [light client](../light-client/README.md). It combines two data structures needed for verification ([signedHeader](#signedheader) & [validatorSet](#validatorset)).
+
+| Name | Type | Description | Validation |
+|--------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|
+| SignedHeader | [SignedHeader](#signedheader) | The header and commit, these are used for verification purposes. To find out more visit [light client docs](../light-client/README.md) | Must not be nil and adhere to the validation rules of [signedHeader](#signedheader) |
+| ValidatorSet | [ValidatorSet](#validatorset) | The validatorSet is used to help with verify that the validators in that committed the infraction were truly in the validator set. | Must not be nil and adhere to the validation rules of [validatorSet](#validatorset) |
+
+The `SignedHeader` and `ValidatorSet` are linked by the hash of the validator set(`SignedHeader.ValidatorsHash == ValidatorSet.Hash()`.
+
+## SignedHeader
+
+The SignedhHeader is the [header](#header) accompanied by the commit to prove it.
+
+| Name | Type | Description | Validation |
+|--------|-------------------|-------------------|-----------------------------------------------------------------------------------|
+| Header | [Header](#header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#header) validation criteria |
+| Commit | [Commit](#commit) | [Commit](#commit) | Commit cannot be nil and must adhere to the [Commit](#commit) criteria |
+
+## ValidatorSet
+
+| Name | Type | Description | Validation |
+|------------|----------------------------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
+| Validators | Array of [validator](#validator) | List of the active validators at a specific height | The list of validators can not be empty or nil and must adhere to the validation rules of [validator](#validator) |
+| Proposer | [validator](#validator) | The block proposer for the corresponding block | The proposer cannot be nil and must adhere to the validation rules of [validator](#validator) |
+
+## Validator
+
+| Name | Type | Description | Validation |
+|------------------|---------------------------|---------------------------------------------------------------------------------------------------|---------------------------------------------------|
+| Address | [Address](#address) | Validators Address | Length must be of size 20 |
+| Pubkey | slice of bytes (`[]byte`) | Validators Public Key | must be a length greater than 0 |
+| VotingPower | int64 | Validators voting power | cannot be < 0 |
+| ProposerPriority | int64 | Validators proposer priority. This is used to gauge when a validator is up next to propose blocks | No validation, value can be negative and positive |
+
+## Address
+
+Address is a type alias of a slice of bytes. The address is calculated by hashing the public key using sha256 and truncating it to only use the first 20 bytes of the slice.
+
+```go
+const (
+ TruncatedSize = 20
+)
+
+func SumTruncated(bz []byte) []byte {
+ hash := sha256.Sum256(bz)
+ return hash[:TruncatedSize]
+}
+```
+
+## ConsensusParams
+
+| Name | Type | Description | Field Number |
+|-----------|-------------------------------------|------------------------------------------------------------------------------|--------------|
+| block | [BlockParams](#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 |
+| evidence | [EvidenceParams](#evidenceparams) | Parameters limiting the validity of evidence of byzantine behavior. | 2 |
+| validator | [ValidatorParams](#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 |
+| version | [BlockParams](#blockparams) | The ABCI application version. | 4 |
+
+### BlockParams
+
+| Name | Type | Description | Field Number |
+|--------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|
+| max_bytes | int64 | Max size of a block, in bytes. | 1 |
+| max_gas | int64 | Max sum of `GasWanted` in a proposed block. NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! | 2 |
+
+### EvidenceParams
+
+| Name | Type | Description | Field Number |
+|--------------------|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------|
+| max_age_num_blocks | int64 | Max age of evidence, in blocks. | 1 |
+| max_age_duration | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake attacks](https://vitalik.ca/general/2017/12/31/pos_faq.html#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). | 2 |
+| max_bytes | int64 | maximum size in bytes of total evidence allowed to be entered into a block | 3 |
+
+### ValidatorParams
+
+| Name | Type | Description | Field Number |
+|---------------|-----------------|-----------------------------------------------------------------------|--------------|
+| pub_key_types | repeated string | List of accepted public key types. Uses same naming as `PubKey.Type`. | 1 |
+
+### VersionParams
+
+| Name | Type | Description | Field Number |
+|-------------|--------|-------------------------------|--------------|
+| app_version | uint64 | The ABCI application version. | 1 |
+
+## Proof
+
+| Name | Type | Description | Field Number |
+|-----------|----------------|-----------------------------------------------|--------------|
+| total | int64 | Total number of items. | 1 |
+| index | int64 | Index item to prove. | 2 |
+| leaf_hash | bytes | Hash of item value. | 3 |
+| aunts | repeated bytes | Hashes from leaf's sibling to a root's child. | 4 |
diff --git a/cometbft/v0.38/spec/core/Overview.mdx b/cometbft/v0.38/spec/core/Overview.mdx
new file mode 100644
index 00000000..37048a2f
--- /dev/null
+++ b/cometbft/v0.38/spec/core/Overview.mdx
@@ -0,0 +1,13 @@
+---
+order: 1
+parent:
+ title: Core
+ order: 3
+---
+
+This section describes the core types and functionality of the CometBFT protocol implementation.
+
+- [Core Data Structures](/cometbft/v0.38/spec/core/Data_structures)
+- [Encoding](/cometbft/v0.38/spec/core/encoding)
+- [Genesis](/cometbft/v0.38/spec/core/genesis)
+- [State](/cometbft/v0.38/spec/core/state)
\ No newline at end of file
diff --git a/cometbft/v0.38/spec/core/encoding.mdx b/cometbft/v0.38/spec/core/encoding.mdx
new file mode 100644
index 00000000..beac7171
--- /dev/null
+++ b/cometbft/v0.38/spec/core/encoding.mdx
@@ -0,0 +1,304 @@
+---
+order: 2
+---
+
+# Encoding
+
+## Protocol Buffers
+
+CometBFT uses [Protocol Buffers](https://developers.google.com/protocol-buffers), specifically proto3, for all data structures.
+
+Please see the [Proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) for more details.
+
+## Byte Arrays
+
+The encoding of a byte array is simply the raw-bytes prefixed with the length of
+the array as a `UVarint` (what proto calls a `Varint`).
+
+For details on varints, see the [protobuf
+spec](https://developers.google.com/protocol-buffers/docs/encoding#varints).
+
+For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`,
+while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would
+be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300.
+
+## Hashing
+
+CometBFT uses `SHA256` as its hash function.
+Objects are always serialized before being hashed.
+So `SHA256(obj)` is short for `SHA256(ProtoEncoding(obj))`.
+
+## Public Key Cryptography
+
+CometBFT uses Protobuf [Oneof](https://developers.google.com/protocol-buffers/docs/proto3#oneof)
+to distinguish between different types public keys, and signatures.
+Additionally, for each public key, CometBFT
+defines an Address function that can be used as a more compact identifier in
+place of the public key. Here we list the concrete types, their names,
+and prefix bytes for public keys and signatures, as well as the address schemes
+for each PubKey. Note for brevity we don't
+include details of the private keys beyond their type and name.
+
+### Key Types
+
+Each type specifies it's own pubkey, address, and signature format.
+
+#### Ed25519
+
+The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key:
+
+```go
+address = SHA256(pubkey)[:20]
+```
+
+The signature is the raw 64-byte ED25519 signature.
+
+CometBFT adopts [zip215](https://zips.z.cash/zip-0215) for verification of ed25519 signatures.
+
+> Note: This change will be released in the next major release of CometBFT.
+
+#### Secp256k1
+
+The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key:
+
+```go
+address = SHA256(pubkey)[:20]
+```
+
+## Other Common Types
+
+### BitArray
+
+The BitArray is used in some consensus messages to represent votes received from
+validators, or parts received in a block. It is represented
+with a struct containing the number of bits (`Bits`) and the bit-array itself
+encoded in base64 (`Elems`).
+
+| Name | Type |
+|-------|----------------------------|
+| bits | int64 |
+| elems | slice of int64 (`[]int64`) |
+
+Note BitArray receives a special JSON encoding in the form of `x` and `_`
+representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as
+`"x_xx_"`
+
+### Part
+
+Part is used to break up blocks into pieces that can be gossiped in parallel
+and securely verified using a Merkle tree of the parts.
+
+Part contains the index of the part (`Index`), the actual
+underlying data of the part (`Bytes`), and a Merkle proof that the part is contained in
+the set (`Proof`).
+
+| Name | Type |
+|-------|---------------------------|
+| index | uint32 |
+| bytes | slice of bytes (`[]byte`) |
+| proof | [proof](#merkle-proof) |
+
+See details of SimpleProof, below.
+
+### MakeParts
+
+Encode an object using Protobuf and slice it into parts.
+CometBFT uses a part size of 65536 bytes, and allows a maximum of 1601 parts
+(see `types.MaxBlockPartsCount`). This corresponds to the hard-coded block size
+limit of 100MB.
+
+```go
+func MakeParts(block Block) []Part
+```
+
+## Merkle Trees
+
+For an overview of Merkle trees, see
+[wikipedia](https://en.wikipedia.org/wiki/Merkle_tree)
+
+We use the RFC 6962 specification of a merkle tree, with sha256 as the hash function.
+Merkle trees are used throughout CometBFT to compute a cryptographic digest of a data structure.
+The differences between RFC 6962 and the simplest form a merkle tree are that:
+
+1. leaf nodes and inner nodes have different hashes.
+ This is for "second pre-image resistance", to prevent the proof to an inner node being valid as the proof of a leaf.
+ The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`.
+
+2. When the number of items isn't a power of two, the left half of the tree is as big as it could be.
+ (The largest power of two less than the number of items) This allows new leaves to be added with less
+ recomputation. For example:
+
+```md
+ Simple Tree with 6 items Simple Tree with 7 items
+
+ * *
+ / \ / \
+ / \ / \
+ / \ / \
+ / \ / \
+ * * * *
+ / \ / \ / \ / \
+ / \ / \ / \ / \
+ / \ / \ / \ / \
+ * * h4 h5 * * * h6
+ / \ / \ / \ / \ / \
+h0 h1 h2 h3 h0 h1 h2 h3 h4 h5
+```
+
+### MerkleRoot
+
+The function `MerkleRoot` is a simple recursive function defined as follows:
+
+```go
+// SHA256([]byte{})
+func emptyHash() []byte {
+ return tmhash.Sum([]byte{})
+}
+
+// SHA256(0x00 || leaf)
+func leafHash(leaf []byte) []byte {
+ return tmhash.Sum(append(0x00, leaf...))
+}
+
+// SHA256(0x01 || left || right)
+func innerHash(left []byte, right []byte) []byte {
+ return tmhash.Sum(append(0x01, append(left, right...)...))
+}
+
+// largest power of 2 less than k
+func getSplitPoint(k int) { ... }
+
+func MerkleRoot(items [][]byte) []byte{
+ switch len(items) {
+ case 0:
+ return empthHash()
+ case 1:
+ return leafHash(items[0])
+ default:
+ k := getSplitPoint(len(items))
+ left := MerkleRoot(items[:k])
+ right := MerkleRoot(items[k:])
+ return innerHash(left, right)
+ }
+}
+```
+
+Note: `MerkleRoot` operates on items which are arbitrary byte arrays, not
+necessarily hashes. For items which need to be hashed first, we introduce the
+`Hashes` function:
+
+```go
+func Hashes(items [][]byte) [][]byte {
+ return SHA256 of each item
+}
+```
+
+Note: we will abuse notion and invoke `MerkleRoot` with arguments of type `struct` or type `[]struct`.
+For `struct` arguments, we compute a `[][]byte` containing the protobuf encoding of each
+field in the struct, in the same order the fields appear in the struct.
+For `[]struct` arguments, we compute a `[][]byte` by protobuf encoding the individual `struct` elements.
+
+### Merkle Proof
+
+Proof that a leaf is in a Merkle tree is composed as follows:
+
+| Name | Type |
+|----------|----------------------------|
+| total | int64 |
+| index | int64 |
+| leafHash | slice of bytes (`[]byte`) |
+| aunts | Matrix of bytes ([][]byte) |
+
+Which is verified as follows:
+
+```golang
+func (proof Proof) Verify(rootHash []byte, leaf []byte) bool {
+ assert(proof.LeafHash, leafHash(leaf)
+
+ computedHash := computeHashFromAunts(proof.Index, proof.Total, proof.LeafHash, proof.Aunts)
+ return computedHash == rootHash
+}
+
+func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byte) []byte{
+ assert(index < total && index >= 0 && total > 0)
+
+ if total == 1{
+ assert(len(proof.Aunts) == 0)
+ return leafHash
+ }
+
+ assert(len(innerHashes) > 0)
+
+ numLeft := getSplitPoint(total) // largest power of 2 less than total
+ if index < numLeft {
+ leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1])
+ assert(leftHash != nil)
+ return innerHash(leftHash, innerHashes[len(innerHashes)-1])
+ }
+ rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1])
+ assert(rightHash != nil)
+ return innerHash(innerHashes[len(innerHashes)-1], rightHash)
+}
+```
+
+The number of aunts is limited to 100 (`MaxAunts`) to protect the node against DOS attacks.
+This limits the tree size to 2^100 leaves, which should be sufficient for any
+conceivable purpose.
+
+### IAVL+ Tree
+
+Because CometBFT only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/ae77f0080a724b159233bd9b289b2e91c0de21b5/docs/interfaces/lite/specification.md)
+
+## JSON
+
+CometBFT has its own JSON encoding in order to keep backwards compatibility with the previous RPC layer.
+
+Registered types are encoded as:
+
+```json
+{
+ "type": "",
+ "value":
+}
+```
+
+For instance, an ED25519 PubKey would look like:
+
+```json
+{
+ "type": "tendermint/PubKeyEd25519",
+ "value": "uZ4h63OFWuQ36ZZ4Bd6NF+/w9fWUwrOncrQsackrsTk="
+}
+```
+
+Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the
+`"type"` is the type name for Ed25519 pubkeys.
+
+### Signed Messages
+
+Signed messages (eg. votes, proposals) in the consensus are encoded using protobuf.
+
+When signing, the elements of a message are re-ordered so the fixed-length fields
+are first, making it easy to quickly check the type, height, and round.
+The `ChainID` is also appended to the end.
+We call this encoding the SignBytes. For instance, SignBytes for a vote is the protobuf encoding of the following struct:
+
+```protobuf
+message CanonicalVote {
+ SignedMsgType type = 1;
+ sfixed64 height = 2; // canonicalization requires fixed size encoding here
+ sfixed64 round = 3; // canonicalization requires fixed size encoding here
+ CanonicalBlockID block_id = 4;
+ google.protobuf.Timestamp timestamp = 5;
+ string chain_id = 6;
+}
+```
+
+The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes
+in HSMs. It creates fixed offsets for relevant fields that need to be read in this context.
+
+> Note: All canonical messages are length prefixed.
+
+For more details, see the [signing spec](/cometbft/v0.38/spec/consensus/Validator-Signing).
+Also, see the motivating discussion in
+[#1622](https://github.com/tendermint/tendermint/issues/1622).
diff --git a/cometbft/v0.38/spec/core/genesis.mdx b/cometbft/v0.38/spec/core/genesis.mdx
new file mode 100644
index 00000000..843c4f69
--- /dev/null
+++ b/cometbft/v0.38/spec/core/genesis.mdx
@@ -0,0 +1,38 @@
+---
+order: 3
+---
+
+# Genesis
+
+The genesis file is the starting point of a chain. An application will populate the `app_state` field in the genesis with their required fields. CometBFT is not able to validate this section because it is unaware what application state consists of.
+
+## Genesis Fields
+
+- `genesis_time`: The genesis time is the time the blockchain started or will start. If nodes are started before this time they will sit idle until the time specified.
+- `chain_id`: The chainid is the chain identifier. Every chain should have a unique identifier. When conducting a fork based upgrade, we recommend changing the chainid to avoid network or consensus errors.
+- `initial_height`: This field is the starting height of the blockchain. When conducting a chain restart to avoid restarting at height 1, the network is able to start at a specified height.
+- `consensus_params`
+ - `block`
+ - `max_bytes`: The max amount of bytes a block can be.
+ - `max_gas`: The maximum amount of gas that a block can have.
+ - `time_iota_ms`: This parameter has no value anymore in CometBFT.
+
+- `evidence`
+ - `max_age_num_blocks`: After this preset amount of blocks has passed a single piece of evidence is considered invalid
+ - `max_age_duration`: After this preset amount of time has passed a single piece of evidence is considered invalid.
+ - `max_bytes`: The max amount of bytes of all evidence included in a block.
+
+> Note: For evidence to be considered invalid, evidence must be older than both `max_age_num_blocks` and `max_age_duration`
+
+- `validator`
+ - `pub_key_types`: Defines which curves are to be accepted as a valid validator consensus key. CometBFT supports ed25519, sr25519 and secp256k1.
+
+- `version`
+ - `app_version`: The version of the application. This is set by the application and is used to identify which version of the app a user should be using in order to operate a node.
+
+- `validators`
+ - This is an array of validators. This validator set is used as the starting validator set of the chain. This field can be empty, if the application sets the validator set in `InitChain`.
+
+- `app_hash`: The applications state root hash. This field does not need to be populated at the start of the chain, the application may provide the needed information via `Initchain`.
+
+- `app_state`: This section is filled in by the application and is unknown to CometBFT.
diff --git a/cometbft/v0.38/spec/core/state.mdx b/cometbft/v0.38/spec/core/state.mdx
new file mode 100644
index 00000000..9e350a78
--- /dev/null
+++ b/cometbft/v0.38/spec/core/state.mdx
@@ -0,0 +1,131 @@
+---
+order: 4
+---
+
+# State
+
+The state contains information whose cryptographic digest is included in block headers, and thus is
+necessary for validating new blocks. For instance, the validators set and the results of
+transactions are never included in blocks, but their Merkle roots are:
+the state keeps track of them.
+
+The `State` object itself is an implementation detail, since it is never
+included in a block or gossiped over the network, and we never compute
+its hash. The persistence or query interface of the `State` object
+is an implementation detail and not included in the specification.
+However, the types in the `State` object are part of the specification, since
+the Merkle roots of the `State` objects are included in blocks and values are used during
+validation.
+
+```go
+type State struct {
+ ChainID string
+ InitialHeight int64
+
+ LastBlockHeight int64
+ LastBlockID types.BlockID
+ LastBlockTime time.Time
+
+ Version Version
+ LastResults []Result
+ AppHash []byte
+
+ LastValidators ValidatorSet
+ Validators ValidatorSet
+ NextValidators ValidatorSet
+
+ ConsensusParams ConsensusParams
+}
+```
+
+The chain ID and initial height are taken from the genesis file, and not changed again. The
+initial height will be `1` in the typical case, `0` is an invalid value.
+
+Note there is a hard-coded limit of 10000 validators. This is inherited from the
+limit on the number of votes in a commit.
+
+Further information on [`Validator`'s](/cometbft/v0.38/spec/core/Data_structures#validator),
+[`ValidatorSet`'s](/cometbft/v0.38/spec/core/Data_structures#validatorset) and
+[`ConsensusParams`'s](/cometbft/v0.38/spec/core/Data_structures#consensusparams) can
+be found in [data structures](/cometbft/v0.38/spec/core/Data_structures)
+
+## Execution
+
+State gets updated at the end of executing a block. Of specific interest is `ResponseEndBlock` and
+`ResponseCommit`
+
+```go
+type ResponseEndBlock struct {
+ ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"`
+ ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"`
+ Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"`
+}
+```
+
+where
+
+```go
+type ValidatorUpdate struct {
+ PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"`
+ Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"`
+}
+```
+
+and
+
+```go
+type ResponseCommit struct {
+ // reserve 1
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"`
+}
+```
+
+`ValidatorUpdates` are used to add and remove validators to the current set as well as update
+validator power. Setting validator power to 0 in `ValidatorUpdate` will cause the validator to be
+removed. `ConsensusParams` are safely copied across (i.e. if a field is nil it gets ignored) and the
+`Data` from the `ResponseCommit` is used as the `AppHash`
+
+## Version
+
+```go
+type Version struct {
+ consensus Consensus
+ software string
+}
+```
+
+[`Consensus`](/cometbft/v0.38/spec/core/Data_structures#version) contains the protocol version for the blockchain and the
+application.
+
+## Block
+
+The total size of a block is limited in bytes by the `ConsensusParams.Block.MaxBytes`.
+Proposed blocks must be less than this size, and will be considered invalid
+otherwise.
+
+The Application may set `ConsensusParams.Block.MaxBytes` to -1.
+In that case, the actual block limit is set to 100 MB,
+and CometBFT will provide all transactions in the mempool as part of `PrepareProposal`.
+The application has to be careful to return a list of transactions in `ResponsePrepareProposal`
+whose size is less than or equal to `RequestPrepareProposal.MaxTxBytes`.
+
+Blocks should additionally be limited by the amount of "gas" consumed by the
+transactions in the block, though this is not yet implemented.
+
+## Evidence
+
+For evidence in a block to be valid, it must satisfy:
+
+```go
+block.Header.Time-evidence.Time < ConsensusParams.Evidence.MaxAgeDuration &&
+ block.Header.Height-evidence.Height < ConsensusParams.Evidence.MaxAgeNumBlocks
+```
+
+A block must not contain more than `ConsensusParams.Evidence.MaxBytes` of evidence. This is
+implemented to mitigate spam attacks.
+
+## Validator
+
+Validators from genesis file and `ResponseEndBlock` must have pubkeys of type ∈
+`ConsensusParams.Validator.PubKeyTypes`.
diff --git a/cometbft/v0.38/spec/ivy-proofs/Dockerfile b/cometbft/v0.38/spec/ivy-proofs/Dockerfile
new file mode 100644
index 00000000..be60151f
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/Dockerfile
@@ -0,0 +1,37 @@
+# we need python2 support, which was dropped after buster:
+FROM debian:buster
+
+RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
+RUN apt-get update
+RUN apt-get install -y apt-utils
+
+# Install and configure locale `en_US.UTF-8`
+RUN apt-get install -y locales && \
+ sed -i -e "s/# $en_US.*/en_US.UTF-8 UTF-8/" /etc/locale.gen && \
+ dpkg-reconfigure --frontend=noninteractive locales && \
+ update-locale LANG=en_US.UTF-8
+ENV LANG=en_US.UTF-8
+
+RUN apt-get update
+RUN apt-get install -y git python2 python-pip g++ cmake python-ply python-tk tix pkg-config libssl-dev python-setuptools
+
+# create a user:
+RUN useradd -ms /bin/bash user
+USER user
+WORKDIR /home/user
+
+RUN git clone --recurse-submodules https://github.com/kenmcmil/ivy.git
+WORKDIR /home/user/ivy/
+RUN git checkout 271ee38980699115508eb90a0dd01deeb750a94b
+
+RUN python2.7 build_submodules.py
+RUN mkdir -p "/home/user/python/lib/python2.7/site-packages"
+ENV PYTHONPATH="/home/user/python/lib/python2.7/site-packages"
+# need to install pyparsing manually because otherwise wrong version found
+RUN pip install pyparsing
+RUN python2.7 setup.py install --prefix="/home/user/python/"
+ENV PATH=$PATH:"/home/user/python/bin/"
+WORKDIR /home/user/tendermint-proof/
+
+ENTRYPOINT ["/home/user/tendermint-proof/check_proofs.sh"]
+
diff --git a/cometbft/v0.38/spec/ivy-proofs/README.md b/cometbft/v0.38/spec/ivy-proofs/README.md
new file mode 100644
index 00000000..00a4bed2
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/README.md
@@ -0,0 +1,33 @@
+# Ivy Proofs
+
+```copyright
+Copyright (c) 2020 Galois, Inc.
+SPDX-License-Identifier: Apache-2.0
+```
+
+## Contents
+
+This folder contains:
+
+* `tendermint.ivy`, a specification of Tendermint algorithm as described in *The latest gossip on BFT consensus* by E. Buchman, J. Kwon, Z. Milosevic.
+* `abstract_tendermint.ivy`, a more abstract specification of Tendermint that is more verification-friendly.
+* `classic_safety.ivy`, a proof that Tendermint satisfies the classic safety property of BFT consensus: if every two quorums have a well-behaved node in common, then no two well-behaved nodes ever disagree.
+* `accountable_safety_1.ivy`, a proof that, assuming every quorum contains at least one well-behaved node, if two well-behaved nodes disagree, then there is evidence demonstrating at least f+1 nodes misbehaved.
+* `accountable_safety_2.ivy`, a proof that, regardless of any assumption about quorums, well-behaved nodes cannot be framed by malicious nodes. In other words, malicious nodes can never construct evidence that incriminates a well-behaved node.
+* `network_shim.ivy`, the network model and a convenience `shim` object to interface with the Tendermint specification.
+* `domain_model.ivy`, a specification of the domain model underlying the Tendermint specification, i.e. rounds, value, quorums, etc.
+
+All specifications and proofs are written in [Ivy](https://github.com/kenmcmil/ivy).
+
+The license above applies to all files in this folder.
+
+
+## Building and running
+
+The easiest way to check the proofs is to use [Docker](https://www.docker.com/).
+
+1. Install [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/).
+2. Build a Docker image: `docker-compose build`
+3. Run the proofs inside the Docker container: `docker-compose run
+tendermint-proof`. This will check all the proofs with the `ivy_check`
+command and write the output of `ivy_check` to a subdirectory of `./output/'
diff --git a/cometbft/v0.38/spec/ivy-proofs/abstract_tendermint.ivy b/cometbft/v0.38/spec/ivy-proofs/abstract_tendermint.ivy
new file mode 100644
index 00000000..4a160be2
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/abstract_tendermint.ivy
@@ -0,0 +1,178 @@
+#lang ivy1.7
+# ---
+# layout: page
+# title: Abstract specification of Tendermint in Ivy
+# ---
+
+# Here we define an abstract version of the Tendermint specification. We use
+# two main forms of abstraction: a) We abstract over how information is
+# transmitted (there is no network). b) We abstract functions using relations.
+# For example, we abstract over a node's current round, instead only tracking
+# with a relation which rounds the node has left. We do something similar for
+# the `lockedRound` variable. This is in order to avoid using a function from
+# node to round, and it allows us to emit verification conditions that are
+# efficiently solvable by Z3.
+
+# This specification also defines the observations that are used to adjudicate
+# misbehavior. Well-behaved nodes faithfully observe every message that they
+# use to take a step, while Byzantine nodes can fake observations about
+# themselves (including withholding observations). Misbehavior is defined using
+# the collection of all observations made (in reality, those observations must
+# be collected first, but we do not model this process).
+
+include domain_model
+
+module abstract_tendermint = {
+
+# Protocol state
+# ##############
+
+ relation left_round(N:node, R:round)
+ relation prevoted(N:node, R:round, V:value)
+ relation precommitted(N:node, R:round, V:value)
+ relation decided(N:node, R:round, V:value)
+ relation locked(N:node, R:round, V:value)
+
+# Accountability relations
+# ########################
+
+ relation observed_prevoted(N:node, R:round, V:value)
+ relation observed_precommitted(N:node, R:round, V:value)
+
+# relations that are defined in terms of the previous two:
+ relation observed_equivocation(N:node)
+ relation observed_unlawful_prevote(N:node)
+ relation agreement
+ relation accountability_violation
+
+ object defs = { # we hide those definitions and use them only when needed
+ private {
+ definition [observed_equivocation_def] observed_equivocation(N) = exists V1,V2,R .
+ V1 ~= V2 & (observed_precommitted(N,R,V1) & observed_precommitted(N,R,V2) | observed_prevoted(N,R,V1) & observed_prevoted(N,R,V2))
+
+ definition [observed_unlawful_prevote_def] observed_unlawful_prevote(N) = exists V1,V2,R1,R2 .
+ V1 ~= value.nil & V2 ~= value.nil & V1 ~= V2 & R1 < R2 & observed_precommitted(N,R1,V1) & observed_prevoted(N,R2,V2)
+ & forall Q,R . R1 <= R & R < R2 & nset.is_quorum(Q) -> exists N2 . nset.member(N2,Q) & ~observed_prevoted(N2,R,V2)
+
+ definition [agreement_def] agreement = forall N1,N2,R1,R2,V1,V2 . well_behaved(N1) & well_behaved(N2) & decided(N1,R1,V1) & decided(N2,R2,V2) -> V1 = V2
+
+ definition [accountability_violation_def] accountability_violation = exists Q1,Q2 . nset.is_quorum(Q1) & nset.is_quorum(Q2) & (forall N . nset.member(N,Q1) & nset.member(N,Q2) -> observed_equivocation(N) | observed_unlawful_prevote(N))
+ }
+ }
+
+# Protocol transitions
+# ####################
+
+ after init {
+ left_round(N,R) := R < 0;
+ prevoted(N,R,V) := false;
+ precommitted(N,R,V) := false;
+ decided(N,R,V) := false;
+ locked(N,R,V) := false;
+
+ observed_prevoted(N,R,V) := false;
+ observed_precommitted(N,R,V) := false;
+ }
+
+# Actions are named after the corresponding line numbers in the Tendermint
+# arXiv paper.
+
+ action l_11(n:node, r:round) = { # start round r
+ require ~left_round(n,r);
+ left_round(n,R) := R < r;
+ }
+
+ action l_22(n:node, rp:round, v:value) = {
+ require ~left_round(n,rp);
+ require ~prevoted(n,rp,V) & ~precommitted(n,rp,V);
+ require (forall R,V . locked(n,R,V) -> V = v) | v = value.nil;
+ prevoted(n, rp, v) := true;
+ left_round(n, R) := R < rp; # leave all lower rounds.
+
+ observed_prevoted(n, rp, v) := observed_prevoted(n, rp, v) | well_behaved(n); # the node observes itself
+ }
+
+ action l_28(n:node, rp:round, v:value, vr:round, q:nset) = {
+ require ~left_round(n,rp) & ~prevoted(n,rp,V);
+ require ~prevoted(n,rp,V) & ~precommitted(n,rp,V);
+ require vr < rp;
+ require nset.is_quorum(q) & (forall N . nset.member(N,q) -> (prevoted(N,vr,v) | ~well_behaved(N)));
+ var proposal:value;
+ if value.valid(v) & ((forall R0,V0 . locked(n,R0,V0) -> R0 <= vr) | (forall R,V . locked(n,R,V) -> V = v)) {
+ proposal := v;
+ }
+ else {
+ proposal := value.nil;
+ };
+ prevoted(n, rp, proposal) := true;
+ left_round(n, R) := R < rp; # leave all lower rounds
+
+ observed_prevoted(N, vr, v) := observed_prevoted(N, vr, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q
+ observed_prevoted(n, rp, proposal) := observed_prevoted(n, rp, proposal) | well_behaved(n); # the node observes itself
+ }
+
+ action l_36(n:node, rp:round, v:value, q:nset) = {
+ require v ~= value.nil;
+ require ~left_round(n,rp);
+ require exists V . prevoted(n,rp,V);
+ require ~precommitted(n,rp,V);
+ require nset.is_quorum(q) & (forall N . nset.member(N,q) -> (prevoted(N,rp,v) | ~well_behaved(N)));
+ precommitted(n, rp, v) := true;
+ left_round(n, R) := R < rp; # leave all lower rounds
+ locked(n,R,V) := R <= rp & V = v;
+
+ observed_prevoted(N, rp, v) := observed_prevoted(N, rp, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q
+ observed_precommitted(n, rp, v) := observed_precommitted(n, rp, v) | well_behaved(n); # the node observes itself
+ }
+
+ action l_44(n:node, rp:round, q:nset) = {
+ require ~left_round(n,rp);
+ require ~precommitted(n,rp,V);
+ require nset.is_quorum(q) & (forall N .nset.member(N,q) -> (prevoted(N,rp,value.nil) | ~well_behaved(N)));
+ precommitted(n, rp, value.nil) := true;
+ left_round(n, R) := R < rp; # leave all lower rounds
+
+ observed_prevoted(N, rp, value.nil) := observed_prevoted(N, rp, value.nil) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q
+ observed_precommitted(n, rp, value.nil) := observed_precommitted(n, rp, value.nil) | well_behaved(n); # the node observes itself
+ }
+
+ action l_57(n:node, rp:round) = {
+ require ~left_round(n,rp);
+ require ~prevoted(n,rp,V);
+ prevoted(n, rp, value.nil) := true;
+ left_round(n, R) := R < rp; # leave all lower rounds
+
+ observed_prevoted(n, rp, value.nil) := observed_prevoted(n, rp, value.nil) | well_behaved(n); # the node observes itself
+ }
+
+ action l_61(n:node, rp:round) = {
+ require ~left_round(n,rp);
+ require ~precommitted(n,rp,V);
+ precommitted(n, rp, value.nil) := true;
+ left_round(n, R) := R < rp; # leave all lower rounds
+
+ observed_precommitted(n, rp, value.nil) := observed_precommitted(n, rp, value.nil) | well_behaved(n); # the node observes itself
+ }
+
+ action decide(n:node, r:round, v:value, q:nset) = {
+ require v ~= value.nil;
+ require nset.is_quorum(q) & (forall N . nset.member(N, q) -> (precommitted(N, r, v) | ~well_behaved(N)));
+ decided(n, r, v) := true;
+
+ observed_precommitted(N, r, v) := observed_precommitted(N, r, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the precommits of quorum q
+
+ }
+
+ action misbehave = {
+# Byzantine nodes can claim they observed whatever they want about themselves,
+# but they cannot remove observations. Note that we use assume because we don't
+# want those to be checked; we just want them to be true (that's the model of
+# Byzantine behavior).
+ observed_prevoted(N,R,V) := *;
+ assume (old observed_prevoted(N,R,V)) -> observed_prevoted(N,R,V);
+ assume well_behaved(N) -> old observed_prevoted(N,R,V) = observed_prevoted(N,R,V);
+ observed_precommitted(N,R,V) := *;
+ assume (old observed_precommitted(N,R,V)) -> observed_precommitted(N,R,V);
+ assume well_behaved(N) -> old observed_precommitted(N,R,V) = observed_precommitted(N,R,V);
+ }
+}
diff --git a/cometbft/v0.38/spec/ivy-proofs/accountable_safety_1.ivy b/cometbft/v0.38/spec/ivy-proofs/accountable_safety_1.ivy
new file mode 100644
index 00000000..02bdf1ad
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/accountable_safety_1.ivy
@@ -0,0 +1,143 @@
+#lang ivy1.7
+# ---
+# layout: page
+# title: Proof of Classic Safety
+# ---
+
+include tendermint
+include abstract_tendermint
+
+# Here we prove the first accountability property: if two well-behaved nodes
+# disagree, then there are two quorums Q1 and Q2 such that all members of the
+# intersection of Q1 and Q2 have violated the accountability properties.
+
+# The proof is done in two steps: first we prove the abstract specification
+# satisfies the property, and then we show by refinement that this property
+# also holds in the concrete specification.
+
+# To see what is checked in the refinement proof, use `ivy_show isolate=accountable_safety_1 accountable_safety_1.ivy`
+# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_accountable_safety_1 accountable_safety_1.ivy`
+# To check the whole proof, use `ivy_check accountable_safety_1.ivy`.
+
+
+# Proof of the accountability property in the abstract specification
+# ==================================================================
+
+# We prove with tactics (see `lemma_1` and `lemma_2`) that, if some basic
+# invariants hold (see `invs` below), then the accountability property holds.
+
+isolate abstract_accountable_safety = {
+
+ instantiate abstract_tendermint
+
+# The main property
+# -----------------
+
+# If there is disagreement, then there is evidence that a third of the nodes
+# have violated the protocol:
+ invariant [accountability] agreement | accountability_violation
+ proof {
+ apply lemma_1.thm # this reduces to goal to three subgoals: p1, p2, and p3 (see their definition below)
+ proof [p1] {
+ assume invs.inv1
+ }
+ proof [p2] {
+ assume invs.inv2
+ }
+ proof [p3] {
+ assume invs.inv3
+ }
+ }
+
+# The invariants
+# --------------
+
+ isolate invs = {
+
+ # well-behaved nodes observe their own actions faithfully:
+ invariant [inv1] well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V))
+ # if a value is precommitted by a well-behaved node, then a quorum is observed to prevote it:
+ invariant [inv2] (exists N . well_behaved(N) & precommitted(N,R,V)) & V ~= value.nil -> exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V)
+ # if a value is decided by a well-behaved node, then a quorum is observed to precommit it:
+ invariant [inv3] (exists N . well_behaved(N) & decided(N,R,V)) -> 0 <= R & V ~= value.nil & exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_precommitted(N2,R,V)
+ private {
+ invariant (precommitted(N,R,V) | prevoted(N,R,V)) -> 0 <= R
+ invariant R < 0 -> left_round(N,R)
+ }
+
+ } with this, nset, round, accountable_bft.max_2f_byzantine
+
+# The theorems proved with tactics
+# --------------------------------
+
+# Using complete induction on rounds, we prove that, assuming that the
+# invariants inv1, inv2, and inv3 hold, the accountability property holds.
+
+# For technical reasons, we separate the proof in two steps
+ isolate lemma_1 = {
+
+ specification {
+ theorem [thm] {
+ property [p1] forall N,R,V . well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V))
+ property [p2] forall R,V . (exists N . well_behaved(N) & precommitted(N,R,V)) & V ~= value.nil -> exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V)
+ property [p3] forall R,V. (exists N . well_behaved(N) & decided(N,R,V)) -> 0 <= R & V ~= value.nil & exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_precommitted(N2,R,V)
+ #-------------------------------------------------------------------------------------------------------------------------------------------
+ property agreement | accountability_violation
+ }
+ proof {
+ assume inductive_property # the theorem follows from what we prove by induction below
+ }
+ }
+
+ implementation {
+ # complete induction is not built-in, so we introduce it with an axiom. Note that this only holds for a type where 0 is the smallest element
+ axiom [complete_induction] {
+ relation p(X:round)
+ { # base case
+ property p(0)
+ }
+ { # inductive step: show that if the property is true for all X lower or equal to x and y=x+1, then the property is true of y
+ individual a:round
+ individual b:round
+ property (forall X. 0 <= X & X <= a -> p(X)) & round.succ(a,b) -> p(b)
+ }
+ #--------------------------
+ property forall X . 0 <= X -> p(X)
+ }
+
+ # The main lemma: if inv1 and inv2 below hold and a quorum is observed to
+ # precommit V1 at R1 and another quorum is observed to precommit V2~=V1 at
+ # R2>=R1, then the intersection of two quorums (i.e. f+1 nodes) is observed to
+ # violate the protocol. We prove this by complete induction on R2.
+ theorem [inductive_property] {
+ property [p1] forall N,R,V . well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V))
+ property [p2] forall R,V . (exists N . well_behaved(N) & precommitted(N,R,V)) -> V = value.nil | exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V)
+ #-----------------------------------------------------------------------------------------------------------------------
+ property forall R2. 0 <= R2 -> ((exists V2,Q1,R1,V1,Q1 . V1 ~= value.nil & V2 ~= value.nil & V1 ~= V2 & 0 <= R1 & R1 <= R2 & nset.is_quorum(Q1) & (forall N . nset.member(N,Q1) -> observed_precommitted(N,R1,V1)) & (exists Q2 . nset.is_quorum(Q2) & forall N . nset.member(N,Q2) -> observed_prevoted(N,R2,V2))) -> accountability_violation)
+ }
+ proof {
+ apply complete_induction # the two subgoals (base case and inductive case) are then discharged automatically
+ # NOTE: this can take a long time depending on the SMT random seed (to try a different seed, use `ivy_check seed=$RANDOM`
+ }
+ }
+ } with this, round, nset, accountable_bft.max_2f_byzantine, defs.observed_equivocation_def, defs.observed_unlawful_prevote_def, defs.accountability_violation_def, defs.agreement_def
+
+} with round
+
+# The final proof
+# ===============
+
+isolate accountable_safety_1 = {
+
+# First we instantiate the concrete protocol:
+ instantiate tendermint(abstract_accountable_safety)
+
+# We then define what we mean by agreement
+ relation agreement
+ definition [agreement_def] agreement = forall N1,N2. well_behaved(N1) & well_behaved(N2) & server.decision(N1) ~= value.nil & server.decision(N2) ~= value.nil -> server.decision(N1) = server.decision(N2)
+
+ invariant abstract_accountable_safety.agreement -> agreement
+
+ invariant [accountability] agreement | abstract_accountable_safety.accountability_violation
+
+} with value, round, proposers, shim, abstract_accountable_safety, abstract_accountable_safety.defs.agreement_def, accountable_safety_1.agreement_def
diff --git a/cometbft/v0.38/spec/ivy-proofs/accountable_safety_2.ivy b/cometbft/v0.38/spec/ivy-proofs/accountable_safety_2.ivy
new file mode 100644
index 00000000..7fb92890
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/accountable_safety_2.ivy
@@ -0,0 +1,52 @@
+#lang ivy1.7
+
+include tendermint
+include abstract_tendermint
+
+# Here we prove the second accountability property: no well-behaved node is
+# ever observed to violate the accountability properties.
+
+# The proof is done in two steps: first we prove the the abstract specification
+# satisfies the property, and then we show by refinement that this property
+# also holds in the concrete specification.
+
+# To see what is checked in the refinement proof, use `ivy_show isolate=accountable_safety_2 accountable_safety_2.ivy`
+# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_accountable_safety_2 accountable_safety_2.ivy`
+# To check the whole proof, use `ivy_check complete=fo accountable_safety_2.ivy`.
+
+# Proof that the property holds in the abstract specification
+# ============================================================
+
+isolate abstract_accountable_safety_2 = {
+
+ instantiate abstract_tendermint
+
+# the main property:
+ invariant [wb_never_punished] well_behaved(N) -> ~(observed_equivocation(N) | observed_unlawful_prevote(N))
+
+# the main invariant for proving wb_not_punished:
+ invariant well_behaved(N) & precommitted(N,R,V) & ~locked(N,R,V) & V ~= value.nil -> exists R2,V2 . V2 ~= value.nil & R < R2 & precommitted(N,R2,V2) & locked(N,R2,V2)
+
+ invariant (exists N . well_behaved(N) & precommitted(N,R,V) & V ~= value.nil) -> exists Q . nset.is_quorum(Q) & forall N . nset.member(N,Q) -> observed_prevoted(N,R,V)
+
+ invariant well_behaved(N) -> (observed_prevoted(N,R,V) <-> prevoted(N,R,V))
+ invariant well_behaved(N) -> (observed_precommitted(N,R,V) <-> precommitted(N,R,V))
+
+# nodes stop prevoting or precommitting in lower rounds when doing so in a higher round:
+ invariant well_behaved(N) & prevoted(N,R2,V2) & R1 < R2 -> left_round(N,R1)
+ invariant well_behaved(N) & locked(N,R2,V2) & R1 < R2 -> left_round(N,R1)
+
+ invariant [precommit_unique_per_round] well_behaved(N) & precommitted(N,R,V1) & precommitted(N,R,V2) -> V1 = V2
+
+} with nset, round, abstract_accountable_safety_2.defs.observed_equivocation_def, abstract_accountable_safety_2.defs.observed_unlawful_prevote_def
+
+# Proof that the property holds in the concrete specification
+# ===========================================================
+
+isolate accountable_safety_2 = {
+
+ instantiate tendermint(abstract_accountable_safety_2)
+
+ invariant well_behaved(N) -> ~(abstract_accountable_safety_2.observed_equivocation(N) | abstract_accountable_safety_2.observed_unlawful_prevote(N))
+
+} with round, value, shim, abstract_accountable_safety_2, abstract_accountable_safety_2.defs.observed_equivocation_def, abstract_accountable_safety_2.defs.observed_unlawful_prevote_def
diff --git a/cometbft/v0.38/spec/ivy-proofs/check_proofs.sh b/cometbft/v0.38/spec/ivy-proofs/check_proofs.sh
new file mode 100755
index 00000000..6afd1a96
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/check_proofs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# returns non-zero error code if any proof fails
+
+success=0
+log_dir=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 6)
+cmd="ivy_check seed=$RANDOM"
+mkdir -p output/$log_dir
+
+echo "Checking classic safety:"
+res=$($cmd classic_safety.ivy | tee "output/$log_dir/classic_safety.txt" | tail -n 1)
+if [ "$res" = "OK" ]; then
+ echo "OK"
+else
+ echo "FAILED"
+ success=1
+fi
+
+echo "Checking accountable safety 1:"
+res=$($cmd accountable_safety_1.ivy | tee "output/$log_dir/accountable_safety_1.txt" | tail -n 1)
+if [ "$res" = "OK" ]; then
+ echo "OK"
+else
+ echo "FAILED"
+ success=1
+fi
+
+echo "Checking accountable safety 2:"
+res=$($cmd complete=fo accountable_safety_2.ivy | tee "output/$log_dir/accountable_safety_2.txt" | tail -n 1)
+if [ "$res" = "OK" ]; then
+ echo "OK"
+else
+ echo "FAILED"
+ success=1
+fi
+
+echo
+echo "See ivy_check output in the output/ folder"
+exit $success
diff --git a/cometbft/v0.38/spec/ivy-proofs/classic_safety.ivy b/cometbft/v0.38/spec/ivy-proofs/classic_safety.ivy
new file mode 100644
index 00000000..b422a2c1
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/classic_safety.ivy
@@ -0,0 +1,85 @@
+#lang ivy1.7
+# ---
+# layout: page
+# title: Proof of Classic Safety
+# ---
+
+include tendermint
+include abstract_tendermint
+
+# Here we prove the classic safety property: assuming that every two quorums
+# have a well-behaved node in common, no two well-behaved nodes ever disagree.
+
+# The proof is done in two steps: first we prove the the abstract specification
+# satisfies the property, and then we show by refinement that this property
+# also holds in the concrete specification.
+
+# To see what is checked in the refinement proof, use `ivy_show isolate=classic_safety classic_safety.ivy`
+# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_classic_safety classic_safety.ivy`
+
+# To check the whole proof, use `ivy_check classic_safety.ivy`.
+
+# Note that all the verification conditions sent to Z3 for this proof are in
+# EPR.
+
+# Classic safety in the abstract model
+# ====================================
+
+# We start by proving that classic safety holds in the abstract model.
+
+isolate abstract_classic_safety = {
+
+ instantiate abstract_tendermint
+
+ invariant [classic_safety] classic_bft.quorum_intersection & decided(N1,R1,V1) & decided(N2,R2,V2) -> V1 = V2
+
+# The notion of choosable value
+# -----------------------------
+
+ relation choosable(R:round, V:value)
+ definition choosable(R,V) = exists Q . nset.is_quorum(Q) & forall N . well_behaved(N) & nset.member(N,Q) -> ~left_round(N,R) | precommitted(N,R,V)
+
+# Main invariants
+# ---------------
+
+# `classic_safety` is inductive relative to those invariants
+
+ invariant [decision_is_quorum_precommit] (exists N1 . decided(N1,R,V)) -> exists Q. nset.is_quorum(Q) & forall N2. well_behaved(N2) & nset.member(N2, Q) -> precommitted(N2,R,V)
+
+ invariant [precommitted_is_quorum_prevote] V ~= value.nil & (exists N1 . precommitted(N1,R,V)) -> exists Q. nset.is_quorum(Q) & forall N2. well_behaved(N2) & nset.member(N2, Q) -> prevoted(N2,R,V)
+
+ invariant [prevote_unique_per_round] prevoted(N,R,V1) & prevoted(N,R,V2) -> V1 = V2
+
+# This is the core invariant: as long as a precommitted value is still choosable, it remains protected by a lock and prevents any new value from being prevoted:
+ invariant [locks] classic_bft.quorum_intersection & V ~= value.nil & precommitted(N,R,V) & choosable(R,V) -> locked(N,R,V) & forall R2,V2 . R < R2 & prevoted(N,R2,V2) -> V2 = V | V2 = value.nil
+
+# Supporting invariants
+# ---------------------
+
+# The main invariants are inductive relative to those
+
+ invariant decided(N,R,V) -> V ~= value.nil
+
+ invariant left_round(N,R2) & R1 < R2 -> left_round(N,R1) # if a node left round R2>R1, then it also left R1:
+
+ invariant prevoted(N,R2,V2) & R1 < R2 -> left_round(N,R1)
+ invariant precommitted(N,R2,V2) & R1 < R2 -> left_round(N,R1)
+
+} with round, nset, classic_bft.quorum_intersection_def
+
+# The refinement proof
+# ====================
+
+# Now, thanks to the refinement relation that we establish in
+# `concrete_tendermint.ivy`, we prove that classic safety transfers to the
+# concrete specification:
+isolate classic_safety = {
+
+ # We instantiate the `tendermint` module providing `abstract_classic_safety` as abstract model.
+ instantiate tendermint(abstract_classic_safety)
+
+ # We prove that if every two quorums have a well-behaved node in common,
+ # then well-behaved nodes never disagree:
+ invariant [classic_safety] classic_bft.quorum_intersection & server.decision(N1) ~= value.nil & server.decision(N2) ~= value.nil -> server.decision(N1) = server.decision(N2)
+
+} with value, round, proposers, shim, abstract_classic_safety # here we list all the specifications that we rely on for this proof
diff --git a/cometbft/v0.38/spec/ivy-proofs/count_lines.sh b/cometbft/v0.38/spec/ivy-proofs/count_lines.sh
new file mode 100755
index 00000000..b2c457e2
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/count_lines.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+r='^\s*$\|^\s*\#\|^\s*\}\s*$\|^\s*{\s*$' # removes comments and blank lines and lines that contain only { or }
+N1=`cat tendermint.ivy domain_model.ivy network_shim.ivy | grep -v $r'\|.*invariant.*' | wc -l`
+N2=`cat abstract_tendermint.ivy | grep "observed_" | wc -l` # the observed_* variables specify the observations of the nodes
+SPEC_LINES=`expr $N1 + $N2`
+echo "spec lines: $SPEC_LINES"
+N3=`cat abstract_tendermint.ivy | grep -v $r'\|.*observed_.*' | wc -l`
+N4=`cat accountable_safety_1.ivy | grep -v $r | wc -l`
+PROOF_LINES=`expr $N3 + $N4`
+echo "proof lines: $PROOF_LINES"
+RATIO=`bc <<< "scale=2;$PROOF_LINES / $SPEC_LINES"`
+echo "proof-to-code ratio for the accountable-safety property: $RATIO"
diff --git a/cometbft/v0.38/spec/ivy-proofs/docker-compose.yml b/cometbft/v0.38/spec/ivy-proofs/docker-compose.yml
new file mode 100644
index 00000000..e0612d4b
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/docker-compose.yml
@@ -0,0 +1,7 @@
+version: '3'
+services:
+ tendermint-proof:
+ build: .
+ volumes:
+ - ./:/home/user/tendermint-proof:ro
+ - ./output:/home/user/tendermint-proof/output:rw
diff --git a/cometbft/v0.38/spec/ivy-proofs/domain_model.ivy b/cometbft/v0.38/spec/ivy-proofs/domain_model.ivy
new file mode 100644
index 00000000..1fd3cc99
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/domain_model.ivy
@@ -0,0 +1,144 @@
+#lang ivy1.7
+
+include order # this is a file from the standard library (`ivy/ivy/include/1.7/order.ivy`)
+
+isolate round = {
+ type this
+ individual minus_one:this
+ relation succ(R1:round, R2:round)
+ action incr(i:this) returns (j:this)
+ specification {
+# to simplify verification, we treat rounds as an abstract totally ordered set with a successor relation.
+ instantiate totally_ordered(this)
+ property minus_one < 0
+ property succ(X,Z) -> (X < Z & ~(X < Y & Y < Z))
+ after incr {
+ ensure succ(i,j)
+ }
+ }
+ implementation {
+# here we prove that the abstraction is sound.
+ interpret this -> int # rounds are integers in the Tendermint specification.
+ definition minus_one = 0-1
+ definition succ(R1,R2) = R2 = R1 + 1
+ implement incr {
+ j := i+1;
+ }
+ }
+}
+
+instance node : iterable # nodes are a set with an order, that can be iterated over (see order.ivy in the standard library)
+
+relation well_behaved(N:node) # whether a node is well-behaved or not. NOTE: Used only in the proof and the Byzantine model; Nodes do know know who is well-behaved and who is not.
+
+isolate proposers = {
+ # each round has a unique proposer in Tendermint. In order to avoid a
+ # function from round to node (which makes verification more difficult), we
+ # abstract over this function using a relation.
+ relation is_proposer(N:node, R:round)
+ export action get_proposer(r:round) returns (n:node)
+ specification {
+ property is_proposer(N1,R) & is_proposer(N2,R) -> N1 = N2
+ after get_proposer {
+ ensure is_proposer(n,r);
+ }
+ }
+ implementation {
+ function f(R:round):node
+ definition f(r:round) = <<>>
+ definition is_proposer(N,R) = N = f(R)
+ implement get_proposer {
+ n := f(r);
+ }
+ }
+}
+
+isolate value = { # the type of values
+ type this
+ relation valid(V:value)
+ individual nil:value
+ specification {
+ property ~valid(nil)
+ }
+ implementation {
+ interpret value -> bv[2]
+ definition nil = <<< -1 >>> # let's say nil is -1
+ definition valid(V) = V ~= nil
+ }
+}
+
+object nset = { # the type of node sets
+ type this # a set of N=3f+i nodes for 0
+ #include
+ namespace hash_space {
+ template
+ class hash > {
+ public:
+ size_t operator()(const std::set &s) const {
+ hash h;
+ size_t res = 0;
+ for (const T &e : s)
+ res += h(e);
+ return res;
+ }
+ };
+ }
+ >>>
+ interpret nset -> <<< std::set<`node`> >>>
+ definition member(n:node, s:nset) = <<< `s`.find(`n`) != `s`.end() >>>
+ definition is_quorum(s:nset) = <<< 3*`s`.size() > 2*`node.size` >>>
+ definition is_blocking(s:nset) = <<< 3*`s`.size() > `node.size` >>>
+ implement empty {
+ <<<
+ >>>
+ }
+ implement insert {
+ <<<
+ `t` = `s`;
+ `t`.insert(`n`);
+ >>>
+ }
+ <<< encode `nset`
+
+ std::ostream &operator <<(std::ostream &s, const `nset` &a) {
+ s << "{";
+ for (auto iter = a.begin(); iter != a.end(); iter++) {
+ if (iter != a.begin()) s << ", ";
+ s << *iter;
+ }
+ s << "}";
+ return s;
+ }
+
+ template <>
+ `nset` _arg<`nset`>(std::vector &args, unsigned idx, long long bound) {
+ throw std::invalid_argument("Not implemented"); // no syntax for nset values in the REPL
+ }
+
+ >>>
+ }
+}
+
+object classic_bft = {
+ relation quorum_intersection
+ private {
+ definition [quorum_intersection_def] quorum_intersection = forall Q1,Q2. nset.is_quorum(Q1) & nset.is_quorum(Q2)
+ -> exists N. well_behaved(N) & nset.member(N, Q1) & nset.member(N, Q2) # every two quorums have a well-behaved node in common
+ }
+}
+
+trusted isolate accountable_bft = {
+ # this is our baseline assumption about quorums:
+ private {
+ property [max_2f_byzantine] nset.is_quorum(Q) -> exists N . well_behaved(N) & nset.member(N,Q) # every quorum has a well-behaved member
+ }
+}
diff --git a/cometbft/v0.38/spec/ivy-proofs/network_shim.ivy b/cometbft/v0.38/spec/ivy-proofs/network_shim.ivy
new file mode 100644
index 00000000..ebc3a04f
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/network_shim.ivy
@@ -0,0 +1,133 @@
+#lang ivy1.7
+# ---
+# layout: page
+# title: Network model and network shim
+# ---
+
+# Here we define a network module, which is our model of the network, and a
+# shim module that sits on top of the network and which, upon receiving a
+# message, calls the appropriate protocol handler.
+
+include domain_model
+
+# Here we define an enumeration type for identifying the 3 different types of
+# messages that nodes send.
+object msg_kind = { # TODO: merge with step_t
+ type this = {proposal, prevote, precommit}
+}
+
+# Here we define the type of messages `msg`. Its members are structs with the fields described below.
+object msg = {
+ type this = struct {
+ m_kind : msg_kind,
+ m_src : node,
+ m_round : round,
+ m_value : value,
+ m_vround : round
+ }
+}
+
+# This is our model of the network:
+isolate net = {
+
+ export action recv(dst:node,v:msg)
+ action send(src:node,dst:node,v:msg)
+ # Note that the `recv` action is exported, meaning that it can be called
+ # non-deterministically by the environment any time it is enabled. In other
+ # words, a packet that is in flight can be received at any time. In this
+ # sense, the network is fully asynchronous. Moreover, there is no
+ # requirement that a given message will be received at all.
+
+ # The state of the network consists of all the packets that have been
+ # sent so far, along with their destination.
+ relation sent(V:msg, N:node)
+
+ after init {
+ sent(V, N) := false
+ }
+
+ before send {
+ sent(v,dst) := true
+ }
+
+ before recv {
+ require sent(v,dst) # only sent messages can be received.
+ }
+}
+
+# The network shim sits on top of the network and, upon receiving a message,
+# calls the appropriate protocol handler. It also exposes a `broadcast` action
+# that sends to all nodes.
+
+isolate shim = {
+
+ # In order not repeat the same code for each handler, we use a handler
+ # module parameterized by the type of message it will handle. Below we
+ # instantiate this module for the 3 types of messages of Tendermint
+ module handler(p_kind) = {
+ action handle(dst:node,m:msg)
+ object spec = {
+ before handle {
+ assert sent(m,dst) & m.m_kind = p_kind
+ }
+ }
+ }
+
+ instance proposal_handler : handler(msg_kind.proposal)
+ instance prevote_handler : handler(msg_kind.prevote)
+ instance precommit_handler : handler(msg_kind.precommit)
+
+ relation sent(M:msg,N:node)
+
+ action broadcast(src:node,m:msg)
+ action send(src:node,dst:node,m:msg)
+
+ specification {
+ after init {
+ sent(M,D) := false;
+ }
+ before broadcast {
+ sent(m,D) := true
+ }
+ before send {
+ sent(m,dst) := true
+ }
+ }
+
+ # Here we give an implementation of it that satisfies its specification:
+ implementation {
+
+ implement net.recv(dst:node,m:msg) {
+
+ if m.m_kind = msg_kind.proposal {
+ call proposal_handler.handle(dst,m)
+ }
+ else if m.m_kind = msg_kind.prevote {
+ call prevote_handler.handle(dst,m)
+ }
+ else if m.m_kind = msg_kind.precommit {
+ call precommit_handler.handle(dst,m)
+ }
+ }
+
+ implement broadcast { # broadcast sends to all nodes, including the sender.
+ var iter := node.iter.create(0);
+ while ~iter.is_end
+ invariant net.sent(M,D) -> sent(M,D)
+ {
+ var n := iter.val;
+ call net.send(src,n,m);
+ iter := iter.next;
+ }
+ }
+
+ implement send {
+ call net.send(src,dst,m)
+ }
+
+ private {
+ invariant net.sent(M,D) -> sent(M,D)
+ }
+ }
+
+} with net, node # to prove that the shim implementation satisfies the shim specification, we rely on the specification of net and node.
diff --git a/cometbft/v0.38/spec/ivy-proofs/output/.gitignore b/cometbft/v0.38/spec/ivy-proofs/output/.gitignore
new file mode 100644
index 00000000..5e7d2734
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/output/.gitignore
@@ -0,0 +1,4 @@
+# Ignore everything in this directory
+*
+# Except this file
+!.gitignore
diff --git a/cometbft/v0.38/spec/ivy-proofs/tendermint.ivy b/cometbft/v0.38/spec/ivy-proofs/tendermint.ivy
new file mode 100644
index 00000000..b7678bef
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/tendermint.ivy
@@ -0,0 +1,420 @@
+#lang ivy1.7
+# ---
+# layout: page
+# title: Specification of Tendermint in Ivy
+# ---
+
+# This specification closely follows the pseudo-code given in "The latest
+# gossip on BFT consensus" by E. Buchman, J. Kwon, Z. Milosevic
+#
+
+include domain_model
+include network_shim
+
+# We model the Tendermint protocol as an Ivy object. Like in Object-Oriented
+# Programming, the basic structuring unit in Ivy is the object. Objects have
+# internal state and actions (i.e. methods in OO parlance) that modify their
+# state. We model Tendermint as an object whose actions represent steps taken
+# by individual nodes in the protocol. Actions in Ivy can have preconditions,
+# and a valid execution is a sequence of actions whose preconditions are all
+# satisfied in the state in which they are called.
+
+# For technical reasons, we define below a `tendermint` module instead of an
+# object. Ivy modules are a little bit like classes in OO programs, and like
+# classes they can be instantiated to obtain objects. To instantiate the
+# `tendermint` module, we must provide an abstract-protocol object. This allows
+# us to use different abstract-protocol objects for different parts of the
+# proof, and to do so without too much notational burden (we could have used
+# Ivy monitors, but then we would need to prefix every variable name by the
+# name of the object containing it, which clutters things a bit compared to the
+# approach we took).
+
+# The abstract-protocol object is called by the resulting tendermint object so
+# as to run the abstract protocol alongside the concrete protocol. This allows
+# us to transfer properties proved of the abstract protocol to the concrete
+# protocol, as follows. First, we prove that running the abstract protocol in
+# this way results in a valid execution of the abstract protocol. This is done
+# by checking that all preconditions of the abstract actions are satisfied at
+# their call sites. Second, we establish a relation between abstract state and
+# concrete state (in the form of invariants of the resulting, two-object
+# transition system) that allow us to transfer properties proved in the
+# abstract protocol to the concrete protocol (for example, we prove that any
+# decision made in the Tendermint protocol is also made in the abstract
+# protocol; if the abstract protocol satisfies the agreement property, this
+# allows us to conclude that the Tendermint protocol also does).
+
+# The abstract protocol object that we will use is always the same, and only
+# the abstract properties that we prove about it change in the different
+# instantiations of the `tendermint` module. Thus we provide common invariants
+# that a) allow to prove that the abstract preconditions are met, and b)
+# provide a refinement relation (see end of the module) relating the state of
+# Tendermint to the state of the abstract protocol.
+
+# In the model, Byzantine nodes can send whatever messages they want, except
+# that they cannot forge sender identities. This reflects the fact that, in
+# practice, nodes use public key cryptography to sign their messages.
+
+# Finally, note that the observations that serve to adjudicate misbehavior are
+# defined only in the abstract protocol (they happen in the abstract actions).
+
+module tendermint(abstract_protocol) = {
+
+ # the initial value of a node:
+ function init_val(N:node): value
+
+ # the three type of steps
+ object step_t = {
+ type this = {propose, prevote, precommit}
+ } # refer to those e.g. as step_t.propose
+
+ object server(n:node) = {
+
+ # the current round of a node
+ individual round_p: round
+
+ individual step: step_t
+
+ individual decision: value
+
+ individual lockedValue: value
+ individual lockedRound: round
+
+ individual validValue: value
+ individual validRound: round
+
+
+ relation done_l34(R:round)
+ relation done_l36(R:round, V:value)
+ relation done_l47(R:round)
+
+ # variables for scheduling request
+ relation propose_timer_scheduled(R:round)
+ relation prevote_timer_scheduled(R:round)
+ relation precommit_timer_scheduled(R:round)
+
+ relation _recved_proposal(Sender:node, R:round, V:value, VR:round)
+ relation _recved_prevote(Sender:node, R:round, V:value)
+ relation _recved_precommit(Sender:node, R:round, V:value)
+
+ relation _has_started
+
+ after init {
+ round_p := 0;
+ step := step_t.propose;
+ decision := value.nil;
+
+ lockedValue := value.nil;
+ lockedRound := round.minus_one;
+
+ validValue := value.nil;
+ validRound := round.minus_one;
+
+ done_l34(R) := false;
+ done_l36(R, V) := false;
+ done_l47(R) := false;
+
+ propose_timer_scheduled(R) := false;
+ prevote_timer_scheduled(R) := false;
+ precommit_timer_scheduled(R) := false;
+
+ _recved_proposal(Sender, R, V, VR) := false;
+ _recved_prevote(Sender, R, V) := false;
+ _recved_precommit(Sender, R, V) := false;
+
+ _has_started := false;
+ }
+
+ action getValue returns (v:value) = {
+ v := init_val(n)
+ }
+
+ export action start = {
+ require ~_has_started;
+ _has_started := true;
+ # line 10
+ call startRound(0);
+ }
+
+ # line 11-21
+ action startRound(r:round) = {
+ # line 12
+ round_p := r;
+
+ # line 13
+ step := step_t.propose;
+
+ var proposal : value;
+
+ # line 14
+ if (proposers.get_proposer(r) = n) {
+ if validValue ~= value.nil { # line 15
+ proposal := validValue; # line 16
+ } else {
+ proposal := getValue(); # line 18
+ };
+ call broadcast_proposal(r, proposal, validRound); # line 19
+ } else {
+ propose_timer_scheduled(r) := true; # line 21
+ };
+
+ call abstract_protocol.l_11(n, r);
+ }
+
+ # This action, as not exported, can only be called at specific call sites.
+ action broadcast_proposal(r:round, v:value, vr:round) = {
+ var m: msg;
+ m.m_kind := msg_kind.proposal;
+ m.m_src := n;
+ m.m_round := r;
+ m.m_value := v;
+ m.m_vround := vr;
+ call shim.broadcast(n,m);
+ }
+
+ implement shim.proposal_handler.handle(msg:msg) {
+ _recved_proposal(msg.m_src, msg.m_round, msg.m_value, msg.m_vround) := true;
+ }
+
+ # line 22-27
+ export action l_22(v:value) = {
+ require _has_started;
+ require _recved_proposal(proposers.get_proposer(round_p), round_p, v, round.minus_one);
+ require step = step_t.propose;
+
+ if (value.valid(v) & (lockedRound = round.minus_one | lockedValue = v)) {
+ call broadcast_prevote(round_p, v); # line 24
+ call abstract_protocol.l_22(n, round_p, v);
+ } else {
+ call broadcast_prevote(round_p, value.nil); # line 26
+ call abstract_protocol.l_22(n, round_p, value.nil);
+ };
+
+ # line 27
+ step := step_t.prevote;
+ }
+
+ # line 28-33
+ export action l_28(r:round, v:value, vr:round, q:nset) = {
+ require _has_started;
+ require r = round_p;
+ require _recved_proposal(proposers.get_proposer(r), r, v, vr);
+ require nset.is_quorum(q);
+ require nset.member(N,q) -> _recved_prevote(N,vr,v);
+ require step = step_t.propose;
+ require vr >= 0 & vr < r;
+
+ # line 29
+ if (value.valid(v) & (lockedRound <= vr | lockedValue = v)) {
+ call broadcast_prevote(r, v);
+ } else {
+ call broadcast_prevote(r, value.nil);
+ };
+
+ call abstract_protocol.l_28(n,r,v,vr,q);
+ step := step_t.prevote;
+ }
+
+ action broadcast_prevote(r:round, v:value) = {
+ var m: msg;
+ m.m_kind := msg_kind.prevote;
+ m.m_src := n;
+ m.m_round := r;
+ m.m_value := v;
+ call shim.broadcast(n,m);
+ }
+
+ implement shim.prevote_handler.handle(msg:msg) {
+ _recved_prevote(msg.m_src, msg.m_round, msg.m_value) := true;
+ }
+
+ # line 34-35
+ export action l_34(r:round, q:nset) = {
+ require _has_started;
+ require round_p = r;
+ require nset.is_quorum(q);
+ require exists V . nset.member(N,q) -> _recved_prevote(N,r,V);
+ require step = step_t.prevote;
+ require ~done_l34(r);
+ done_l34(r) := true;
+
+ prevote_timer_scheduled(r) := true;
+ }
+
+
+ # line 36-43
+ export action l_36(r:round, v:value, q:nset) = {
+ require _has_started;
+ require r = round_p;
+ require exists VR . round.minus_one <= VR & VR < r & _recved_proposal(proposers.get_proposer(r), r, v, VR);
+ require nset.is_quorum(q);
+ require nset.member(N,q) -> _recved_prevote(N,r,v);
+ require value.valid(v);
+ require step = step_t.prevote | step = step_t.precommit;
+
+ require ~done_l36(r,v);
+ done_l36(r, v) := true;
+
+ if step = step_t.prevote {
+ lockedValue := v; # line 38
+ lockedRound := r; # line 39
+ call broadcast_precommit(r, v); # line 40
+ step := step_t.precommit; # line 41
+ call abstract_protocol.l_36(n, r, v, q);
+ };
+
+ validValue := v; # line 42
+ validRound := r; # line 43
+ }
+
+ # line 44-46
+ export action l_44(r:round, q:nset) = {
+ require _has_started;
+ require r = round_p;
+ require nset.is_quorum(q);
+ require nset.member(N,q) -> _recved_prevote(N,r,value.nil);
+ require step = step_t.prevote;
+
+ call broadcast_precommit(r, value.nil); # line 45
+ step := step_t.precommit; # line 46
+
+ call abstract_protocol.l_44(n, r, q);
+ }
+
+ action broadcast_precommit(r:round, v:value) = {
+ var m: msg;
+ m.m_kind := msg_kind.precommit;
+ m.m_src := n;
+ m.m_round := r;
+ m.m_value := v;
+ call shim.broadcast(n,m);
+ }
+
+ implement shim.precommit_handler.handle(msg:msg) {
+ _recved_precommit(msg.m_src, msg.m_round, msg.m_value) := true;
+ }
+
+
+ # line 47-48
+ export action l_47(r:round, q:nset) = {
+ require _has_started;
+ require round_p = r;
+ require nset.is_quorum(q);
+ require nset.member(N,q) -> exists V . _recved_precommit(N,r,V);
+ require ~done_l47(r);
+ done_l47(r) := true;
+
+ precommit_timer_scheduled(r) := true;
+ }
+
+
+ # line 49-54
+ export action l_49_decide(r:round, v:value, q:nset) = {
+ require _has_started;
+ require exists VR . round.minus_one <= VR & VR < r & _recved_proposal(proposers.get_proposer(r), r, v, VR);
+ require nset.is_quorum(q);
+ require nset.member(N,q) -> _recved_precommit(N,r,v);
+ require decision = value.nil;
+
+ if value.valid(v) {
+ decision := v;
+ # MORE for next height
+ call abstract_protocol.decide(n, r, v, q);
+ }
+ }
+
+ # line 55-56
+ export action l_55(r:round, b:nset) = {
+ require _has_started;
+ require nset.is_blocking(b);
+ require nset.member(N,b) -> exists VR . round.minus_one <= VR & VR < r & exists V . _recved_proposal(N,r,V,VR) | _recved_prevote(N,r,V) | _recved_precommit(N,r,V);
+ require r > round_p;
+ call startRound(r); # line 56
+ }
+
+ # line 57-60
+ export action onTimeoutPropose(r:round) = {
+ require _has_started;
+ require propose_timer_scheduled(r);
+ require r = round_p;
+ require step = step_t.propose;
+ call broadcast_prevote(r,value.nil);
+ step := step_t.prevote;
+
+ call abstract_protocol.l_57(n,r);
+
+ propose_timer_scheduled(r) := false;
+ }
+
+ # line 61-64
+ export action onTimeoutPrevote(r:round) = {
+ require _has_started;
+ require prevote_timer_scheduled(r);
+ require r = round_p;
+ require step = step_t.prevote;
+ call broadcast_precommit(r,value.nil);
+ step := step_t.precommit;
+
+ call abstract_protocol.l_61(n,r);
+
+ prevote_timer_scheduled(r) := false;
+ }
+
+ # line 65-67
+ export action onTimeoutPrecommit(r:round) = {
+ require _has_started;
+ require precommit_timer_scheduled(r);
+ require r = round_p;
+ call startRound(round.incr(r));
+
+ precommit_timer_scheduled(r) := false;
+ }
+
+# The Byzantine actions
+# ---------------------
+
+# Byzantine nodes can send whatever they want, but they cannot send
+# messages on behalf of well-behaved nodes. In practice this is implemented
+# using cryptography (e.g. public-key cryptography).
+
+ export action byzantine_send(m:msg, dst:node) = {
+ require ~well_behaved(n);
+ require ~well_behaved(m.m_src); # cannot forge the identity of well-behaved nodes
+ call shim.send(n,dst,m);
+ }
+
+# Byzantine nodes can also report fake observations, as defined in the abstract protocol.
+ export action fake_observations = {
+ call abstract_protocol.misbehave
+ }
+
+# Invariants
+# ----------
+
+# We provide common invariants that a) allow to prove that the abstract
+# preconditions are met, and b) provide a refinement relation.
+
+
+ specification {
+
+ invariant 0 <= round_p
+ invariant abstract_protocol.left_round(n,R) <-> R < round_p
+
+ invariant lockedRound ~= round.minus_one -> forall R,V . abstract_protocol.locked(n,R,V) <-> R <= lockedRound & lockedValue = V
+ invariant lockedRound = round.minus_one -> forall R,V . ~abstract_protocol.locked(n,R,V)
+
+ invariant forall M:msg . well_behaved(M.m_src) & M.m_kind = msg_kind.prevote & shim.sent(M,N) -> abstract_protocol.prevoted(M.m_src,M.m_round,M.m_value)
+ invariant well_behaved(N) & _recved_prevote(N,R,V) -> abstract_protocol.prevoted(N,R,V)
+ invariant forall M:msg . well_behaved(M.m_src) & M.m_kind = msg_kind.precommit & shim.sent(M,N) -> abstract_protocol.precommitted(M.m_src,M.m_round,M.m_value)
+ invariant well_behaved(N) & _recved_precommit(N,R,V) -> abstract_protocol.precommitted(N,R,V)
+
+ invariant (step = step_t.prevote | step = step_t.propose) -> ~abstract_protocol.precommitted(n,round_p,V)
+ invariant step = step_t.propose -> ~abstract_protocol.prevoted(n,round_p,V)
+ invariant step = step_t.prevote -> exists V . abstract_protocol.prevoted(n,round_p,V)
+
+ invariant round_p < R -> ~(abstract_protocol.prevoted(n,R,V) | abstract_protocol.precommitted(n,R,V))
+ invariant ~_has_started -> step = step_t.propose & ~(abstract_protocol.prevoted(n,R,V) | abstract_protocol.precommitted(n,R,V)) & round_p = 0
+
+ invariant decision ~= value.nil -> exists R . abstract_protocol.decided(n,R,decision)
+ }
+ }
+}
diff --git a/cometbft/v0.38/spec/ivy-proofs/tendermint_test.ivy b/cometbft/v0.38/spec/ivy-proofs/tendermint_test.ivy
new file mode 100644
index 00000000..1299fc08
--- /dev/null
+++ b/cometbft/v0.38/spec/ivy-proofs/tendermint_test.ivy
@@ -0,0 +1,127 @@
+#lang ivy1.7
+
+include tendermint
+include abstract_tendermint
+
+isolate ghost_ = {
+ instantiate abstract_tendermint
+}
+
+isolate protocol = {
+ instantiate tendermint(ghost_) # here we instantiate the parameter of the tendermint module with `ghost_`; however note that we don't extract any code for `ghost_` (it's not in the list of object in the extract, and it's thus sliced away).
+ implementation {
+ definition init_val(n:node) = <<< `n`%2 >>>
+ }
+ # attribute test = impl
+} with ghost_, shim, value, round, proposers
+
+# Here we run a simple scenario that exhibits an execution in which nodes make
+# a decision. We do this to rule out trivial modeling errors.
+
+# One option to check that this scenario is valid is to run it in Ivy's REPL.
+# For this, first compile the scenario:
+#```ivyc target=repl isolate=code trace=true tendermint_test.ivy
+# Then, run the produced binary (e.g. for 4 nodes):
+#``` ./tendermint_test 4
+# Finally, call the action:
+#``` scenarios.scenario_1
+# Note that Ivy will check at runtime that all action preconditions are
+# satisfied. For example, runing the scenario twice will cause a violation of
+# the precondition of the `start` action, because a node cannot start twice
+# (see `require ~_has_started` in action `start`).
+
+# Another possibility would be to run `ivy_check` on the scenario, but that
+# does not seem to work at the moment.
+
+isolate scenarios = {
+ individual all:nset # will be used as parameter to actions requiring a quorum
+
+ after init {
+ var iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ all := all.insert(iter.val);
+ iter := iter.next;
+ };
+ assert nset.is_quorum(all); # we can also use asserts to make sure we are getting what we expect
+ }
+
+ export action scenario_1 = {
+ # all nodes start:
+ var iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ call protocol.server.start(iter.val);
+ iter := iter.next;
+ };
+ # all nodes receive the leader's proposal:
+ var m:msg;
+ m.m_kind := msg_kind.proposal;
+ m.m_src := 0;
+ m.m_round := 0;
+ m.m_value := 0;
+ m.m_vround := round.minus_one;
+ iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ call net.recv(iter.val,m);
+ iter := iter.next;
+ };
+ # all nodes prevote:
+ iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ call protocol.server.l_22(iter.val,0);
+ iter := iter.next;
+ };
+ # all nodes receive each other's prevote messages;
+ m.m_kind := msg_kind.prevote;
+ m.m_vround := 0;
+ iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ var iter2 := node.iter.create(0); # the sender
+ while ~iter2.is_end
+ {
+ m.m_src := iter2.val;
+ call net.recv(iter.val,m);
+ iter2 := iter2.next;
+ };
+ iter := iter.next;
+ };
+ # all nodes precommit:
+ iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ call protocol.server.l_36(iter.val,0,0,all);
+ iter := iter.next;
+ };
+ # all nodes receive each other's pre-commits
+ m.m_kind := msg_kind.precommit;
+ iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ var iter2 := node.iter.create(0); # the sender
+ while ~iter2.is_end
+ {
+ m.m_src := iter2.val;
+ call net.recv(iter.val,m);
+ iter2 := iter2.next;
+ };
+ iter := iter.next;
+ };
+ # now all nodes can decide:
+ iter := node.iter.create(0);
+ while ~iter.is_end
+ {
+ call protocol.server.l_49_decide(iter.val,0,0,all);
+ iter := iter.next;
+ };
+ }
+
+ # TODO: add more scenarios
+
+} with round, node, proposers, value, nset, protocol, shim, net
+
+# extract code = protocol, shim, round, node
+extract code = round, node, proposers, value, nset, protocol, shim, net, scenarios
diff --git a/cometbft/v0.38/spec/light-client/Accountability.mdx b/cometbft/v0.38/spec/light-client/Accountability.mdx
new file mode 100644
index 00000000..07179dd6
--- /dev/null
+++ b/cometbft/v0.38/spec/light-client/Accountability.mdx
@@ -0,0 +1,305 @@
+---
+order: 1
+parent:
+ title: Accountability
+ order: 4
+---
+
+# Fork accountability
+
+## Problem Statement
+
+Tendermint consensus algorithm guarantees the following specifications for all heights:
+
+* agreement -- no two correct full nodes decide differently.
+* validity -- the decided block satisfies the predefined predicate *valid()*.
+* termination -- all correct full nodes eventually decide,
+
+If the faulty validators have less than 1/3 of voting power in the current validator set. In the case where this assumption
+does not hold, each of the specification may be violated.
+
+The agreement property says that for a given height, any two correct validators that decide on a block for that height decide on the same block. That the block was indeed generated by the blockchain, can be verified starting from a trusted (genesis) block, and checking that all subsequent blocks are properly signed.
+
+However, faulty nodes may forge blocks and try to convince users (light clients) that the blocks had been correctly generated. In addition, Tendermint agreement might be violated in the case where 1/3 or more of the voting power belongs to faulty validators: Two correct validators decide on different blocks. The latter case motivates the term "fork": as Tendermint consensus also agrees on the next validator set, correct validators may have decided on disjoint next validator sets, and the chain branches into two or more partitions (possibly having faulty validators in common) and each branch continues to generate blocks independently of the other.
+
+We say that a fork is a case in which there are two commits for different blocks at the same height of the blockchain. The problem is to ensure that in those cases we are able to detect faulty validators (and not mistakenly accuse correct validators), and incentivize therefore validators to behave according to the protocol specification.
+
+**Conceptual Limit.** In order to prove misbehavior of a node, we have to show that the behavior deviates from correct behavior with respect to a given algorithm. Thus, an algorithm that detects misbehavior of nodes executing some algorithm *A* must be defined with respect to algorithm *A*. In our case, *A* is Tendermint consensus (+ other protocols in the infrastructure; e.g., Cosmos full nodes and the Light Client). If the consensus algorithm is changed/updated/optimized in the future, we have to check whether changes to the accountability algorithm are also required. All the discussions in this document are thus inherently specific to Tendermint consensus and the Light Client specification.
+
+**Q:** Should we distinguish agreement for validators and full nodes for agreement? The case where all correct validators agree on a block, but a correct full node decides on a different block seems to be slightly less severe that the case where two correct validators decide on different blocks. Still, if a contaminated full node becomes validator that may be problematic later on. Also it is not clear how gossiping is impaired if a contaminated full node is on a different branch.
+
+*Remark.* In the case 1/3 or more of the voting power belongs to faulty validators, also validity and termination can be broken. Termination can be broken if faulty processes just do not send the messages that are needed to make progress. Due to asynchrony, this is not punishable, because faulty validators can always claim they never received the messages that would have forced them to send messages.
+
+## The Misbehavior of Faulty Validators
+
+Forks are the result of faulty validators deviating from the protocol. In principle several such deviations can be detected without a fork actually occurring:
+
+1. double proposal: A faulty proposer proposes two different values (blocks) for the same height and the same round in Tendermint consensus.
+
+2. double signing: Tendermint consensus forces correct validators to prevote and precommit for at most one value per round. In case a faulty validator sends multiple prevote and/or precommit messages for different values for the same height/round, this is a misbehavior.
+
+3. lunatic validator: Tendermint consensus forces correct validators to prevote and precommit only for values *v* that satisfy *valid(v)*. If faulty validators prevote and precommit for *v* although *valid(v)=false* this is misbehavior.
+
+*Remark.* In isolation, Point 3 is an attack on validity (rather than agreement). However, the prevotes and precommits can then also be used to forge blocks.
+
+1. amnesia: Tendermint consensus has a locking mechanism. If a validator has some value v locked, then it can only prevote/precommit for v or nil. Sending prevote/precomit message for a different value v' (that is not nil) while holding lock on value v is misbehavior.
+
+2. spurious messages: In Tendermint consensus most of the message send instructions are guarded by threshold guards, e.g., one needs to receive *2f + 1* prevote messages to send precommit. Faulty validators may send precommit without having received the prevote messages.
+
+Independently of a fork happening, punishing this behavior might be important to prevent forks altogether. This should keep attackers from misbehaving: if less than 1/3 of the voting power is faulty, this misbehavior is detectable but will not lead to a safety violation. Thus, unless they have 1/3 or more (or in some cases more than 2/3) of the voting power attackers have the incentive to not misbehave. If attackers control too much voting power, we have to deal with forks, as discussed in this document.
+
+## Two types of forks
+
+* Fork-Full. Two correct validators decide on different blocks for the same height. Since also the next validator sets are decided upon, the correct validators may be partitioned to participate in two distinct branches of the forked chain.
+
+As in this case we have two different blocks (both having the same right/no right to exist), a central system invariant (one block per height decided by correct validators) is violated. As full nodes are contaminated in this case, the contamination can spread also to light clients. However, even without breaking this system invariant, light clients can be subject to a fork:
+
+* Fork-Light. All correct validators decide on the same block for height *h*, but faulty processes (validators or not), forge a different block for that height, in order to fool users (who use the light client).
+
+# Attack scenarios
+
+## On-chain attacks
+
+### Equivocation (one round)
+
+There are several scenarios in which forks might happen. The first is double signing within a round.
+
+* F1. Equivocation: faulty validators sign multiple vote messages (prevote and/or precommit) for different values *during the same round r* at a given height h.
+
+### Flip-flopping
+
+Tendermint consensus implements a locking mechanism: If a correct validator *p* receives proposal for value v and *2f + 1* prevotes for a value *id(v)* in round *r*, it locks *v* and remembers *r*. In this case, *p* also sends a precommit message for *id(v)*, which later may serve as proof that *p* locked *v*.
+In subsequent rounds, *p* only sends prevote messages for a value it had previously locked. However, it is possible to change the locked value if in a future round *r' > r*, if the process receives proposal and *2f + 1* prevotes for a different value *v'*. In this case, *p* could send a prevote/precommit for *id(v')*. This algorithmic feature can be exploited in two ways:
+
+* F2. Faulty Flip-flopping (Amnesia): faulty validators precommit some value *id(v)* in round *r* (value *v* is locked in round *r*) and then prevote for different value *id(v')* in higher round *r' > r* without previously correctly unlocking value *v*. In this case faulty processes "forget" that they have locked value *v* and prevote some other value in the following rounds.
+Some correct validators might have decided on *v* in *r*, and other correct validators decide on *v'* in *r'*. Here we can have branching on the main chain (Fork-Full).
+
+* F3. Correct Flip-flopping (Back to the past): There are some precommit messages signed by (correct) validators for value *id(v)* in round *r*. Still, *v* is not decided upon, and all processes move on to the next round. Then correct validators (correctly) lock and decide a different value *v'* in some round *r' > r*. And the correct validators continue; there is no branching on the main chain.
+However, faulty validators may use the correct precommit messages from round *r* together with a posteriori generated faulty precommit messages for round *r* to forge a block for a value that was not decided on the main chain (Fork-Light).
+
+## Off-chain attacks
+
+F1-F3 may contaminate the state of full nodes (and even validators). Contaminated (but otherwise correct) full nodes may thus communicate faulty blocks to light clients.
+Similarly, without actually interfering with the main chain, we can have the following:
+
+* F4. Phantom validators: faulty validators vote (sign prevote and precommit messages) in heights in which they are not part of the validator sets (at the main chain).
+
+* F5. Lunatic validator: faulty validator that sign vote messages to support (arbitrary) application state that is different from the application state that resulted from valid state transitions.
+
+## Types of victims
+
+We consider three types of potential attack victims:
+
+* FN: full node
+* LCS: light client with sequential header verification
+* LCB: light client with bisection based header verification
+
+F1 and F2 can be used by faulty validators to actually create multiple branches on the blockchain. That means that correctly operating full nodes decide on different blocks for the same height. Until a fork is detected locally by a full node (by receiving evidence from others or by some other local check that fails), the full node can spread corrupted blocks to light clients.
+
+*Remark.* If full nodes take a branch different from the one taken by the validators, it may be that the liveness of the gossip protocol may be affected. We should eventually look at this more closely. However, as it does not influence safety it is not a primary concern.
+
+F3 is similar to F1, except that no two correct validators decide on different blocks. It may still be the case that full nodes become affected.
+
+In addition, without creating a fork on the main chain, light clients can be contaminated by more than a third of validators that are faulty and sign a forged header
+F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Light), as they trust a header that is signed by at least one correct validator (trusting period method).
+
+The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a light client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the light client.
+
+F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled).
+
+| Attack | FN | LCS | LCB |
+|:------:|:------:|:------:|:------:|
+| F1 | direct | FN | FN |
+| F2 | direct | FN | FN |
+| F3 | direct | FN | FN |
+| F4 | | | direct |
+| F5 | | | direct |
+
+**Q:** Light clients are more vulnerable than full nodes, because the former do only verify headers but do not execute transactions. What kind of certainty is gained by a full node that executes a transaction?
+
+As a full node verifies all transactions, it can only be
+contaminated by an attack if the blockchain itself violates its invariant (one block per height), that is, in case of a fork that leads to branching.
+
+## Detailed Attack Scenarios
+
+### Equivocation based attacks
+
+In case of equivocation based attacks, faulty validators sign multiple votes (prevote and/or precommit) in the same
+round of some height. This attack can be executed on both full nodes and light clients. It requires 1/3 or more of voting power to be executed.
+
+#### Scenario 1: Equivocation on the main chain
+
+Validators:
+
+* CA - a set of correct validators with less than 1/3 of the voting power
+* CB - a set of correct validators with less than 1/3 of the voting power
+* CA and CB are disjoint
+* F - a set of faulty validators with 1/3 or more voting power
+
+Observe that this setting violates the Cosmos failure model.
+
+Execution:
+
+* A faulty proposer proposes block A to CA
+* A faulty proposer proposes block B to CB
+* Validators from the set CA and CB prevote for A and B, respectively.
+* Faulty validators from the set F prevote both for A and B.
+* The faulty prevote messages
+ * for A arrive at CA long before the B messages
+ * for B arrive at CB long before the A messages
+* Therefore correct validators from set CA and CB will observe
+more than 2/3 of prevotes for A and B and precommit for A and B, respectively.
+* Faulty validators from the set F precommit both values A and B.
+* Thus, we have more than 2/3 commits for both A and B.
+
+Consequences:
+
+* Creating evidence of misbehavior is simple in this case as we have multiple messages signed by the same faulty processes for different values in the same round.
+
+* We have to ensure that these different messages reach a correct process (full node, monitor?), which can submit evidence.
+
+* This is an attack on the full node level (Fork-Full).
+* It extends also to the light clients,
+* For both we need a detection and recovery mechanism.
+
+#### Scenario 2: Equivocation to a light client (LCS)
+
+Validators:
+
+* a set F of faulty validators with more than 2/3 of the voting power.
+
+Execution:
+
+* for the main chain F behaves nicely
+* F coordinates to sign a block B that is different from the one on the main chain.
+* the light clients obtains B and trusts at as it is signed by more than 2/3 of the voting power.
+
+Consequences:
+
+Once equivocation is used to attack light client it opens space
+for different kind of attacks as application state can be diverged in any direction. For example, it can modify validator set such that it contains only validators that do not have any stake bonded. Note that after a light client is fooled by a fork, that means that an attacker can change application state and validator set arbitrarily.
+
+In order to detect such (equivocation-based attack), the light client would need to cross check its state with some correct validator (or to obtain a hash of the state from the main chain using out of band channels).
+
+*Remark.* The light client would be able to create evidence of misbehavior, but this would require to pull potentially a lot of data from correct full nodes. Maybe we need to figure out different architecture where a light client that is attacked will push all its data for the current unbonding period to a correct node that will inspect this data and submit corresponding evidence. There are also architectures that assumes a special role (sometimes called fisherman) whose goal is to collect as much as possible useful data from the network, to do analysis and create evidence transactions. That functionality is outside the scope of this document.
+
+*Remark.* The difference between LCS and LCB might only be in the amount of voting power needed to convince light client about arbitrary state. In case of LCB where security threshold is at minimum, an attacker can arbitrarily modify application state with 1/3 or more of voting power, while in case of LCS it requires more than 2/3 of the voting power.
+
+### Flip-flopping: Amnesia based attacks
+
+In case of amnesia, faulty validators lock some value *v* in some round *r*, and then vote for different value *v'* in higher rounds without correctly unlocking value *v*. This attack can be used both on full nodes and light clients.
+
+#### Scenario 3: At most 2/3 of faults
+
+Validators:
+
+* a set F of faulty validators with 1/3 or more but at most 2/3 of the voting power
+* a set C of correct validators
+
+Execution:
+
+* Faulty validators commit (without exposing it on the main chain) a block A in round *r* by collecting more than 2/3 of the
+ voting power (containing correct and faulty validators).
+* All validators (correct and faulty) reach a round *r' > r*.
+* Some correct validators in C do not lock any value before round *r'*.
+* The faulty validators in F deviate from Tendermint consensus by ignoring that they locked A in *r*, and propose a different block B in *r'*.
+* As the validators in C that have not locked any value find B acceptable, they accept the proposal for B and commit a block B.
+
+*Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution.
+
+If a light client is attacked using this attack with 1/3 or more of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the light client trusts deviates from the one on the main chain.
+
+#### Scenario 4: More than 2/3 of faults
+
+In case there is an attack with more than 2/3 of the voting power, an attacker can arbitrarily change application state.
+
+Validators:
+
+* a set F1 of faulty validators with 1/3 or more of the voting power
+* a set F2 of faulty validators with less than 1/3 of the voting power
+
+Execution
+
+* Similar to Scenario 3 (however, messages by correct validators are not needed)
+* The faulty validators in F1 lock value A in round *r*
+* They sign a different value in follow-up rounds
+* F2 does not lock A in round *r*
+
+Consequences:
+
+* The validators in F1 will be detectable by the fork accountability mechanisms.
+* The validators in F2 cannot be detected using this mechanism.
+Only in case they signed something which conflicts with the application this can be used against them. Otherwise, they do not do anything incorrect.
+
+**Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires a different mechanism that would require as an evidence a sequence of blocks that led to that state. This might be very tricky to implement.
+
+### Back to the past
+
+In this kind of attack, faulty validators take advantage of the fact that they did not sign messages in some of the past rounds. Due to the asynchronous network in which Tendermint operates, we cannot easily differentiate between such an attack and delayed message. This kind of attack can be used at both full nodes and light clients.
+
+#### Scenario 5
+
+Validators:
+
+* C1 - a set of correct validators with over 1/3 of the voting power
+* C2 - a set of correct validators with 1/3 of the voting power
+* C1 and C2 are disjoint
+* F - a set of faulty validators with less than 1/3 voting power
+* one additional faulty process *q*
+* F and *q* violate the Cosmos failure model.
+
+Execution:
+
+* in a round *r* of height *h* we have C1 precommitting a value A,
+* C2 precommits nil,
+* F does not send any message
+* *q* precommits nil.
+* In some round *r' > r*, F and *q* and C2 commit some other value B different from A.
+* F and *fp* "go back to the past" and sign precommit message for value A in round *r*.
+* Together with precomit messages of C1 this is sufficient for a commit for value A.
+
+Consequences:
+
+* Only a single faulty validator that previously precommited nil did equivocation, while the other 1/3 of faulty validators actually executed an attack that has exactly the same sequence of messages as part of amnesia attack. Detecting this kind of attack boil down to mechanisms for equivocation and amnesia.
+
+**Q:** should we keep this as a separate kind of attack? It seems that equivocation, amnesia and phantom validators are the only kind of attack we need to support and this gives us security also in other cases. This would not be surprising as equivocation and amnesia are attacks that followed from the protocol and phantom attack is not really an attack to Tendermint but more to the Cosmos Proof of Stake module.
+
+### Phantom validators
+
+In case of phantom validators, processes that are not part of the current validator set but are still bonded (as attack happen during their unbonding period) can be part of the attack by signing vote messages. This attack can be executed against both full nodes and light clients.
+
+#### Scenario 6
+
+Validators:
+
+* F -- a set of faulty validators that are not part of the validator set on the main chain at height *h + k*
+
+Execution:
+
+* There is a fork, and there exist two different headers for height *h + k*, with different validator sets:
+ * VS2 on the main chain
+ * forged header VS2', signed by F (and others)
+
+* a light client has a trust in a header for height *h* (and the corresponding validator set VS1).
+* As part of bisection header verification, it verifies the header at height *h + k* with new validator set VS2'.
+
+Consequences:
+
+* To detect this, a node needs to see both, the forged header and the canonical header from the chain.
+* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set.
+
+**Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time it is eclipsed.
+
+**Remark.** Phantom validator evidence has been removed from implementation as it was deemed, although possibly a plausible form of evidence, not relevant. Any attack on
+the light client involving a phantom validator will have needed to be initiated by 1/3+ lunatic
+validators that can forge a new validator set that includes the phantom validator. Only in
+that case will the light client accept the phantom validators vote. We need only worry about
+punishing the 1/3+ lunatic cabal, that is the root cause of the attack.
+
+### Lunatic validator
+
+Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack light clients.
+Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by
+referring to the block before the one in which height happen.
+
+**Q:** can we say that in this case a validator declines to check if a proposed value is valid before voting for it?
diff --git a/cometbft/v0.38/spec/light-client/Fork-Detection.mdx b/cometbft/v0.38/spec/light-client/Fork-Detection.mdx
new file mode 100644
index 00000000..4bcd78cc
--- /dev/null
+++ b/cometbft/v0.38/spec/light-client/Fork-Detection.mdx
@@ -0,0 +1,75 @@
+---
+order: 1
+parent:
+ title: Fork Detection
+ order: 2
+---
+
+# Cosmos fork detection and IBC fork detection
+
+## Status
+
+This is a work in progress.
+This directory captures the ongoing work and discussion on fork
+detection both in the context of a Cosmos light node and in the
+context of IBC. It contains the following files
+
+### [detection.md](./detection_003_reviewed.md)
+
+a draft of the light node fork detection including "proof of fork"
+ definition, that is, the data structure to submit evidence to full
+ nodes.
+
+### [discussions.md](./discussions.md)
+
+A collection of ideas and intuitions from recent discussions
+
+- the outcome of recent discussion
+- a sketch of the light client supervisor to provide the context in
+ which fork detection happens
+- a discussion about lightstore semantics
+
+### [req-ibc-detection.md](./req-ibc-detection.md)
+
+- a collection of requirements for fork detection in the IBC
+ context. In particular it contains a section "Required Changes in
+ ICS 007" with necessary updates to ICS 007 to support Cosmos
+ fork detection
+
+### [draft-functions.md](./draft-functions.md)
+
+In order to address the collected requirements, we started to sketch
+some functions that we will need in the future when we specify in more
+detail the
+
+- fork detections
+- proof of fork generation
+- proof of fork verification
+
+on the following components.
+
+- IBC on-chain components
+- Relayer
+
+### TODOs
+
+We decided to merge the files while there are still open points to
+address to record the current state an move forward. In particular,
+the following points need to be addressed:
+
+- [https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466504876](https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466504876)
+
+- [https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466493900](https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466493900)
+
+- [https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466489045](https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466489045)
+
+- [https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466491471](https://github.com/informalsystems/tendermint-rs/pull/479#discussion_r466491471)
+
+Most likely we will write a specification on the light client
+supervisor along the outcomes of
+
+- [https://github.com/informalsystems/tendermint-rs/pull/509](https://github.com/informalsystems/tendermint-rs/pull/509)
+
+that also addresses initialization
+
+- [https://github.com/tendermint/spec/issues/131](https://github.com/tendermint/spec/issues/131)
diff --git a/cometbft/v0.38/spec/light-client/Light-Client-Specification.mdx b/cometbft/v0.38/spec/light-client/Light-Client-Specification.mdx
new file mode 100644
index 00000000..19285d79
--- /dev/null
+++ b/cometbft/v0.38/spec/light-client/Light-Client-Specification.mdx
@@ -0,0 +1,205 @@
+---
+order: 1
+parent:
+ title: Light Client
+ order: 5
+---
+
+# Light Client Specification
+
+This directory contains work-in-progress English and TLA+ specifications for the Light Client
+protocol. Implementations of the light client can be found in
+[Rust](https://github.com/informalsystems/tendermint-rs/tree/master/light-client) and
+[Go](https://github.com/cometbft/cometbft/tree/v0.38.x/light).
+
+Light clients are assumed to be initialized once from a trusted source
+with a trusted header and validator set. The light client
+protocol allows a client to then securely update its trusted state by requesting and
+verifying a minimal set of data from a network of full nodes (at least one of which is correct).
+
+The light client is decomposed into two main components:
+
+- [Commit Verification](#commit-verification) - verify signed headers and associated validator
+ set changes from a single full node, called primary
+- [Attack Detection](#attack-detection) - verify commits across multiple full nodes (called secondaries) and detect conflicts (ie. the existence of a lightclient attack)
+
+In case a lightclient attack is detected, the lightclient submits evidence to a full node which is responsible for "accountability", that is, punishing attackers:
+
+- [Accountability](#accountability) - given evidence for an attack, compute a set of validators that are responsible for it.
+
+## Commit Verification
+
+The [English specification](verification/verification_001_published.md) describes the light client
+commit verification problem in terms of the temporal properties
+[LCV-DIST-SAFE.1](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-safe1) and
+[LCV-DIST-LIVE.1](https://github.com/cometbft/cometbft/blob/v0.38.x/spec/light-client/verification/verification_001_published.md#lcv-dist-live1).
+Commit verification is assumed to operate within the Cosmos Failure Model, where +2/3 of validators are correct for some time period and
+validator sets can change arbitrarily at each height.
+
+A light client protocol is also provided, including all checks that
+need to be performed on headers, commits, and validator sets
+to satisfy the temporal properties - so a light client can continuously
+synchronize with a blockchain. Clients can skip possibly
+many intermediate headers by exploiting overlap in trusted and untrusted validator sets.
+When there is not enough overlap, a bisection routine can be used to find a
+minimal set of headers that do provide the required overlap.
+
+The [TLA+ specification ver. 001](verification/Lightclient_A_1.tla)
+is a formal description of the
+commit verification protocol executed by a client, including the safety and
+termination, which can be model checked with Apalache.
+
+A more detailed TLA+ specification of
+[Light client verification ver. 003](verification/Lightclient_003_draft.tla)
+is currently under peer review.
+
+The `MC*.tla` files contain concrete parameters for the
+[TLA+ specification](verification/Lightclient_A_1.tla), in order to do model checking.
+For instance, [MC4_3_faulty.tla](verification/MC4_3_faulty.tla) contains the following parameters
+for the nodes, heights, the trusting period, the clock drifts,
+correctness of the primary node, and the ratio of the faulty processes:
+
+```tla
+AllNodes == {"n1", "n2", "n3", "n4"}
+TRUSTED_HEIGHT == 1
+TARGET_HEIGHT == 3
+TRUSTING_PERIOD == 1400 \* the trusting period in some time units
+CLOCK_DRIFT = 10 \* how much we assume the local clock is drifting
+REAL_CLOCK_DRIFT = 3 \* how much the local clock is actually drifting
+IS_PRIMARY_CORRECT == FALSE
+FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators
+```
+
+To run a complete set of experiments, clone [apalache](https://github.com/informalsystems/apalache) and [apalache-tests](https://github.com/informalsystems/apalache-tests) into a directory `$DIR` and run the following commands:
+
+```sh
+$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 002bmc-apalache-ok.csv $DIR/apalache . out
+./out/run-all.sh
+```
+
+After the experiments have finished, you can collect the logs by executing the following command:
+
+```sh
+cd ./out
+$DIR/apalache-tests/scripts/parse-logs.py --human .
+```
+
+All lines in `results.csv` should report `Deadlock`, which means that the algorithm
+has terminated and no invariant violation was found.
+
+Similar to [002bmc-apalache-ok.csv](verification/002bmc-apalache-ok.csv),
+file [003bmc-apalache-error.csv](verification/003bmc-apalache-error.csv) specifies
+the set of experiments that should result in counterexamples:
+
+```sh
+$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 003bmc-apalache-error.csv $DIR/apalache . out
+./out/run-all.sh
+```
+
+All lines in `results.csv` should report `Error`.
+
+The following table summarizes the experimental results for Light client verification
+version 001. The TLA+ properties can be found in the
+[TLA+ specification](verification/Lightclient_A_1.tla).
+ The experiments were run in an AWS instance equipped with 32GB
+RAM and a 4-core Intel® Xeon® CPU E5-2686 v4 @ 2.30GHz CPU.
+We write “`✗=k`” when a bug is reported at depth k, and “`✓<=k`” when
+no bug is reported up to depth k.
+
+
+
+The experimental results for version 003 are to be added.
+
+## Attack Detection
+
+The [English specification](detection/detection_003_reviewed.md)
+defines light client attacks (and how they differ from blockchain
+forks), and describes the problem of a light client detecting
+these attacks by communicating with a network of full nodes,
+where at least one is correct.
+
+The specification also contains a detection protocol that checks
+whether the header obtained from the primary via the verification
+protocol matches corresponding headers provided by the secondaries.
+If this is not the case, the protocol analyses the verification traces
+of the involved full nodes
+and generates
+[evidence](detection/detection_003_reviewed.md#cmbc-lc-evidence-data1)
+of misbehavior that can be submitted to a full node so that
+the faulty validators can be punished.
+
+The [TLA+ specification](detection/LCDetector_003_draft.tla)
+is a formal description of the
+detection protocol for two peers, including the safety and
+termination, which can be model checked with Apalache.
+
+The `LCD_MC*.tla` files contain concrete parameters for the
+[TLA+ specification](detection/LCDetector_003_draft.tla),
+in order to run the model checker.
+For instance, [LCD_MC4_4_faulty.tla](./verification/MC4_4_faulty.tla)
+contains the following parameters
+for the nodes, heights, the trusting period, the clock drifts,
+correctness of the nodes, and the ratio of the faulty processes:
+
+```tla
+AllNodes == {"n1", "n2", "n3", "n4"}
+TRUSTED_HEIGHT == 1
+TARGET_HEIGHT == 3
+TRUSTING_PERIOD == 1400 \* the trusting period in some time units
+CLOCK_DRIFT = 10 \* how much we assume the local clock is drifting
+REAL_CLOCK_DRIFT = 3 \* how much the local clock is actually drifting
+IS_PRIMARY_CORRECT == FALSE
+IS_SECONDARY_CORRECT == FALSE
+FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators
+```
+
+To run a complete set of experiments, clone [apalache](https://github.com/informalsystems/apalache) and [apalache-tests](https://github.com/informalsystems/apalache-tests) into a directory `$DIR` and run the following commands:
+
+```sh
+$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 004bmc-apalache-ok.csv $DIR/apalache . out
+./out/run-all.sh
+```
+
+After the experiments have finished, you can collect the logs by executing the following command:
+
+```sh
+cd ./out
+$DIR/apalache-tests/scripts/parse-logs.py --human .
+```
+
+All lines in `results.csv` should report `Deadlock`, which means that the algorithm
+has terminated and no invariant violation was found.
+
+Similar to [004bmc-apalache-ok.csv](verification/004bmc-apalache-ok.csv),
+file [005bmc-apalache-error.csv](verification/005bmc-apalache-error.csv) specifies
+the set of experiments that should result in counterexamples:
+
+```sh
+$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 005bmc-apalache-error.csv $DIR/apalache . out
+./out/run-all.sh
+```
+
+All lines in `results.csv` should report `Error`.
+
+The detailed experimental results are to be added soon.
+
+## Accountability
+
+The [English specification](attacks/isolate-attackers_002_reviewed.md)
+defines the protocol that is executed on a full node upon receiving attack [evidence](detection/detection_003_reviewed.md#cmbc-lc-evidence-data1) from a lightclient. In particular, the protocol handles three types of attacks
+
+- lunatic
+- equivocation
+- amnesia
+
+We discussed in the [last part](attacks/isolate-attackers_002_reviewed.md#Part-III---Completeness) of the English specification
+that the non-lunatic cases are defined by having the same validator set in the conflicting blocks. For these cases,
+computer-aided analysis of [Tendermint Consensus in TLA+](./accountability/README.md) shows that equivocation and amnesia capture all non-lunatic attacks.
+
+The [TLA+ specification](attacks/Isolation_001_draft.tla)
+is a formal description of the
+protocol, including the safety property, which can be model checked with Apalache.
+
+Similar to the other specifications, [MC_5_3.tla](attacks/MC_5_3.tla) contains concrete parameters to run the model checker. The specification can be checked within seconds.
+
+[tendermint-accountability](./accountability/README.md)
diff --git a/cometbft/v0.38/spec/light-client/assets/light-node-image.png b/cometbft/v0.38/spec/light-client/assets/light-node-image.png
new file mode 100644
index 00000000..f0b93c6e
Binary files /dev/null and b/cometbft/v0.38/spec/light-client/assets/light-node-image.png differ
diff --git a/cometbft/v0.38/spec/light-client/experiments.png b/cometbft/v0.38/spec/light-client/experiments.png
new file mode 100644
index 00000000..94166ffa
Binary files /dev/null and b/cometbft/v0.38/spec/light-client/experiments.png differ
diff --git a/cometbft/v0.38/spec/light-client/verification.mdx b/cometbft/v0.38/spec/light-client/verification.mdx
new file mode 100644
index 00000000..7a243fbb
--- /dev/null
+++ b/cometbft/v0.38/spec/light-client/verification.mdx
@@ -0,0 +1,584 @@
+---
+order: 1
+parent:
+ title: Verification
+ order: 2
+---
+# Core Verification
+
+## Problem statement
+
+We assume that the light client knows a (base) header `inithead` it trusts (by social consensus or because
+the light client has decided to trust the header before). The goal is to check whether another header
+`newhead` can be trusted based on the data in `inithead`.
+
+The correctness of the protocol is based on the assumption that `inithead` was generated by an instance of
+Tendermint consensus.
+
+### Failure Model
+
+For the purpose of the following definitions we assume that there exists a function
+`validators` that returns the corresponding validator set for the given hash.
+
+The light client protocol is defined with respect to the following failure model:
+
+Given a known bound `TRUSTED_PERIOD`, and a block `b` with header `h` generated at time `Time`
+(i.e. `h.Time = Time`), a set of validators that hold more than 2/3 of the voting power
+in `validators(b.Header.NextValidatorsHash)` is correct until time `b.Header.Time + TRUSTED_PERIOD`.
+
+*Assumption*: "correct" is defined w.r.t. realtime (some Newtonian global notion of time, i.e., wall time),
+while `Header.Time` corresponds to the [BFT time](../../consensus/bft-time.md). In this note, we assume that clocks of correct processes
+are synchronized (for example using NTP), and therefore there is bounded clock drift (`CLOCK_DRIFT`) between local clocks and
+BFT time. More precisely, for every correct light client process and every `header.Time` (i.e. BFT Time, for a header correctly
+generated by the Tendermint consensus), the following inequality holds: `Header.Time < now + CLOCK_DRIFT`,
+where `now` corresponds to the system clock at the light client process.
+
+Furthermore, we assume that `TRUSTED_PERIOD` is (several) order of magnitude bigger than `CLOCK_DRIFT` (`TRUSTED_PERIOD >> CLOCK_DRIFT`),
+as `CLOCK_DRIFT` (using NTP) is in the order of milliseconds and `TRUSTED_PERIOD` is in the order of weeks.
+
+We expect a light client process defined in this document to be used in the context in which there is some
+larger period during which misbehaving validators can be detected and punished (we normally refer to it as `UNBONDING_PERIOD`
+due to the "bonding" mechanism in modern proof of stake systems). Furthermore, we assume that
+`TRUSTED_PERIOD < UNBONDING_PERIOD` and that they are normally of the same order of magnitude, for example
+`TRUSTED_PERIOD = UNBONDING_PERIOD / 2`.
+
+The specification in this document considers an implementation of the light client under the Failure Model defined above.
+Mechanisms like `fork accountability` and `evidence submission` are defined in the context of `UNBONDING_PERIOD` and
+they incentivize validators to follow the protocol specification defined in this document. If they don't,
+and we have 1/3 (or more) faulty validators, safety may be violated. Our approach then is
+to *detect* these cases (after the fact), and take suitable repair actions (automatic and social).
+This is discussed in document on [Fork accountability](../../consensus/light-client/accountability.md).
+
+The term "trusted" above indicates that the correctness of the protocol depends on
+this assumption. It is in the responsibility of the user that runs the light client to make sure that the risk
+of trusting a corrupted/forged `inithead` is negligible.
+
+*Remark*: This failure model might change to a hybrid version that takes heights into account in the future.
+
+### High Level Solution
+
+Upon initialization, the light client is given a header `inithead` it trusts (by
+social consensus). When a light clients sees a new signed header `snh`, it has to decide whether to trust the new
+header. Trust can be obtained by (possibly) the combination of three methods.
+
+1. **Uninterrupted sequence of headers.** Given a trusted header `h` and an untrusted header `h1`,
+the light client trusts a header `h1` if it trusts all headers in between `h` and `h1`.
+
+2. **Trusted period.** Given a trusted header `h`, an untrusted header `h1 > h` and `TRUSTED_PERIOD` during which
+the failure model holds, we can check whether at least one validator, that has been continuously correct
+from `h.Time` until now, has signed `h1`. If this is the case, we can trust `h1`.
+
+3. **Bisection.** If a check according to 2. (trusted period) fails, the light client can try to
+obtain a header `hp` whose height lies between `h` and `h1` in order to check whether `h` can be used to
+get trust for `hp`, and `hp` can be used to get trust for `snh`. If this is the case we can trust `h1`;
+if not, we continue recursively until either we found set of headers that can build (transitively) trust relation
+between `h` and `h1`, or we failed as two consecutive headers don't verify against each other.
+
+## Definitions
+
+### Data structures
+
+In the following, only the details of the data structures needed for this specification are given.
+
+ ```go
+ type Header struct {
+ Height int64
+ Time Time // the chain time when the header (block) was generated
+
+ LastBlockID BlockID // prev block info
+ ValidatorsHash []byte // hash of the validators for the current block
+ NextValidatorsHash []byte // hash of the validators for the next block
+ }
+
+ type SignedHeader struct {
+ Header Header
+ Commit Commit // commit for the given header
+ }
+
+ type ValidatorSet struct {
+ Validators []Validator
+ TotalVotingPower int64
+ }
+
+ type Validator struct {
+ Address Address // validator address (we assume validator's addresses are unique)
+ VotingPower int64 // validator's voting power
+ }
+
+ type TrustedState {
+ SignedHeader SignedHeader
+ ValidatorSet ValidatorSet
+ }
+ ```
+
+### Functions
+
+For the purpose of this light client specification, we assume that the Cosmos Full Node
+exposes the following functions over RPC:
+
+```go
+ // returns signed header: Header with Commit, for the given height
+ func Commit(height int64) (SignedHeader, error)
+
+ // returns validator set for the given height
+ func Validators(height int64) (ValidatorSet, error)
+```
+
+Furthermore, we assume the following auxiliary functions:
+
+```go
+ // returns true if the commit is for the header, ie. if it contains
+ // the correct hash of the header; otherwise false
+ func matchingCommit(header Header, commit Commit) bool
+
+ // returns the set of validators from the given validator set that
+ // committed the block (that correctly signed the block)
+ // it assumes signature verification so it can be computationally expensive
+ func signers(commit Commit, validatorSet ValidatorSet) []Validator
+
+ // returns the voting power the validators in v1 have according to their voting power in set v2
+ // it does not assume signature verification
+ func votingPowerIn(v1 []Validator, v2 ValidatorSet) int64
+
+ // returns hash of the given validator set
+ func hash(v2 ValidatorSet) []byte
+```
+
+In the functions below we will be using `trustThreshold` as a parameter. For simplicity
+we assume that `trustThreshold` is a float between `1/3` and `2/3` and we will not be checking it
+in the pseudo-code.
+
+**VerifySingle.** The function `VerifySingle` attempts to validate given untrusted header and the corresponding validator sets
+based on a given trusted state. It ensures that the trusted state is still within its trusted period,
+and that the untrusted header is within assumed `clockDrift` bound of the passed time `now`.
+Note that this function is not making external (RPC) calls to the full node; the whole logic is
+based on the local (given) state. This function is supposed to be used by the IBC handlers.
+
+```go
+func VerifySingle(untrustedSh SignedHeader,
+ untrustedVs ValidatorSet,
+ untrustedNextVs ValidatorSet,
+ trustedState TrustedState,
+ trustThreshold float,
+ trustingPeriod Duration,
+ clockDrift Duration,
+ now Time) (TrustedState, error) {
+
+ if untrustedSh.Header.Time > now + clockDrift {
+ return (trustedState, ErrInvalidHeaderTime)
+ }
+
+ trustedHeader = trustedState.SignedHeader.Header
+ if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) {
+ return (state, ErrHeaderNotWithinTrustedPeriod)
+ }
+
+ // we assume that time it takes to execute verifySingle function
+ // is several order of magnitudes smaller than trustingPeriod
+ error = verifySingle(
+ trustedState,
+ untrustedSh,
+ untrustedVs,
+ untrustedNextVs,
+ trustThreshold)
+
+ if error != nil return (state, error)
+
+ // the untrusted header is now trusted
+ newTrustedState = TrustedState(untrustedSh, untrustedNextVs)
+ return (newTrustedState, nil)
+}
+
+// return true if header is within its light client trusted period; otherwise returns false
+func isWithinTrustedPeriod(header Header,
+ trustingPeriod Duration,
+ now Time) bool {
+
+ return header.Time + trustedPeriod > now
+}
+```
+
+Note that in case `VerifySingle` returns without an error (untrusted header
+is successfully verified) then we have a guarantee that the transition of the trust
+from `trustedState` to `newTrustedState` happened during the trusted period of
+`trustedState.SignedHeader.Header`.
+
+TODO: Explain what happens in case `VerifySingle` returns with an error.
+
+**verifySingle.** The function `verifySingle` verifies a single untrusted header
+against a given trusted state. It includes all validations and signature verification.
+It is not publicly exposed since it does not check for header expiry (time constraints)
+and hence it's possible to use it incorrectly.
+
+```go
+func verifySingle(trustedState TrustedState,
+ untrustedSh SignedHeader,
+ untrustedVs ValidatorSet,
+ untrustedNextVs ValidatorSet,
+ trustThreshold float) error {
+
+ untrustedHeader = untrustedSh.Header
+ untrustedCommit = untrustedSh.Commit
+
+ trustedHeader = trustedState.SignedHeader.Header
+ trustedVs = trustedState.ValidatorSet
+
+ if trustedHeader.Height >= untrustedHeader.Height return ErrNonIncreasingHeight
+ if trustedHeader.Time >= untrustedHeader.Time return ErrNonIncreasingTime
+
+ // validate the untrusted header against its commit, vals, and next_vals
+ error = validateSignedHeaderAndVals(untrustedSh, untrustedVs, untrustedNextVs)
+ if error != nil return error
+
+ // check for adjacent headers
+ if untrustedHeader.Height == trustedHeader.Height + 1 {
+ if trustedHeader.NextValidatorsHash != untrustedHeader.ValidatorsHash {
+ return ErrInvalidAdjacentHeaders
+ }
+ } else {
+ error = verifyCommitTrusting(trustedVs, untrustedCommit, untrustedVs, trustThreshold)
+ if error != nil return error
+ }
+
+ // verify the untrusted commit
+ return verifyCommitFull(untrustedVs, untrustedCommit)
+}
+
+// returns nil if header and validator sets are consistent; otherwise returns error
+func validateSignedHeaderAndVals(signedHeader SignedHeader, vs ValidatorSet, nextVs ValidatorSet) error {
+ header = signedHeader.Header
+ if hash(vs) != header.ValidatorsHash return ErrInvalidValidatorSet
+ if hash(nextVs) != header.NextValidatorsHash return ErrInvalidNextValidatorSet
+ if !matchingCommit(header, signedHeader.Commit) return ErrInvalidCommitValue
+ return nil
+}
+
+// returns nil if at least single correst signer signed the commit; otherwise returns error
+func verifyCommitTrusting(trustedVs ValidatorSet,
+ commit Commit,
+ untrustedVs ValidatorSet,
+ trustLevel float) error {
+
+ totalPower := trustedVs.TotalVotingPower
+ signedPower := votingPowerIn(signers(commit, untrustedVs), trustedVs)
+
+ // check that the signers account for more than max(1/3, trustLevel) of the voting power
+ // this ensures that there is at least single correct validator in the set of signers
+ if signedPower < max(1/3, trustLevel) * totalPower return ErrInsufficientVotingPower
+ return nil
+}
+
+// returns nil if commit is signed by more than 2/3 of voting power of the given validator set
+// return error otherwise
+func verifyCommitFull(vs ValidatorSet, commit Commit) error {
+ totalPower := vs.TotalVotingPower;
+ signedPower := votingPowerIn(signers(commit, vs), vs)
+
+ // check the signers account for +2/3 of the voting power
+ if signedPower * 3 <= totalPower * 2 return ErrInvalidCommit
+ return nil
+}
+```
+
+**VerifyHeaderAtHeight.** The function `VerifyHeaderAtHeight` captures high level
+logic, i.e., application call to the light client module to download and verify header
+for some height.
+
+```go
+func VerifyHeaderAtHeight(untrustedHeight int64,
+ trustedState TrustedState,
+ trustThreshold float,
+ trustingPeriod Duration,
+ clockDrift Duration) (TrustedState, error)) {
+
+ trustedHeader := trustedState.SignedHeader.Header
+
+ now := System.Time()
+ if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) {
+ return (trustedState, ErrHeaderNotWithinTrustedPeriod)
+ }
+
+ newTrustedState, err := VerifyBisection(untrustedHeight,
+ trustedState,
+ trustThreshold,
+ trustingPeriod,
+ clockDrift,
+ now)
+
+ if err != nil return (trustedState, err)
+
+ now = System.Time()
+ if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) {
+ return (trustedState, ErrHeaderNotWithinTrustedPeriod)
+ }
+
+ return (newTrustedState, err)
+}
+```
+
+Note that in case `VerifyHeaderAtHeight` returns without an error (untrusted header
+is successfully verified) then we have a guarantee that the transition of the trust
+from `trustedState` to `newTrustedState` happened during the trusted period of
+`trustedState.SignedHeader.Header`.
+
+In case `VerifyHeaderAtHeight` returns with an error, then either (i) the full node we are talking to is faulty
+or (ii) the trusted header has expired (it is outside its trusted period). In case (i) the full node is faulty so
+light client should disconnect and reinitialize with new peer. In the case (ii) as the trusted header has expired,
+we need to reinitialize light client with a new trusted header (that is within its trusted period),
+but we don't necessarily need to disconnect from the full node we are talking to (as we haven't observed full node misbehavior in this case).
+
+**VerifyBisection.** The function `VerifyBisection` implements
+recursive logic for checking if it is possible building trust
+relationship between `trustedState` and untrusted header at the given height over
+finite set of (downloaded and verified) headers.
+
+```go
+func VerifyBisection(untrustedHeight int64,
+ trustedState TrustedState,
+ trustThreshold float,
+ trustingPeriod Duration,
+ clockDrift Duration,
+ now Time) (TrustedState, error) {
+
+ untrustedSh, error := Commit(untrustedHeight)
+ if error != nil return (trustedState, ErrRequestFailed)
+
+ untrustedHeader = untrustedSh.Header
+
+ // note that we pass now during the recursive calls. This is fine as
+ // all other untrusted headers we download during recursion will be
+ // for a smaller heights, and therefore should happen before.
+ if untrustedHeader.Time > now + clockDrift {
+ return (trustedState, ErrInvalidHeaderTime)
+ }
+
+ untrustedVs, error := Validators(untrustedHeight)
+ if error != nil return (trustedState, ErrRequestFailed)
+
+ untrustedNextVs, error := Validators(untrustedHeight + 1)
+ if error != nil return (trustedState, ErrRequestFailed)
+
+ error = verifySingle(
+ trustedState,
+ untrustedSh,
+ untrustedVs,
+ untrustedNextVs,
+ trustThreshold)
+
+ if fatalError(error) return (trustedState, error)
+
+ if error == nil {
+ // the untrusted header is now trusted.
+ newTrustedState = TrustedState(untrustedSh, untrustedNextVs)
+ return (newTrustedState, nil)
+ }
+
+ // at this point in time we need to do bisection
+ pivotHeight := ceil((trustedHeader.Height + untrustedHeight) / 2)
+
+ error, newTrustedState = VerifyBisection(pivotHeight,
+ trustedState,
+ trustThreshold,
+ trustingPeriod,
+ clockDrift,
+ now)
+ if error != nil return (newTrustedState, error)
+
+ return VerifyBisection(untrustedHeight,
+ newTrustedState,
+ trustThreshold,
+ trustingPeriod,
+ clockDrift,
+ now)
+}
+
+func fatalError(err) bool {
+ return err == ErrHeaderNotWithinTrustedPeriod OR
+ err == ErrInvalidAdjacentHeaders OR
+ err == ErrNonIncreasingHeight OR
+ err == ErrNonIncreasingTime OR
+ err == ErrInvalidValidatorSet OR
+ err == ErrInvalidNextValidatorSet OR
+ err == ErrInvalidCommitValue OR
+ err == ErrInvalidCommit
+}
+```
+
+### The case `untrustedHeader.Height < trustedHeader.Height`
+
+In the use case where someone tells the light client that application data that is relevant for it
+can be read in the block of height `k` and the light client trusts a more recent header, we can use the
+hashes to verify headers "down the chain." That is, we iterate down the heights and check the hashes in each step.
+
+*Remark.* For the case were the light client trusts two headers `i` and `j` with `i < k < j`, we should
+discuss/experiment whether the forward or the backward method is more effective.
+
+```go
+func VerifyHeaderBackwards(trustedHeader Header,
+ untrustedHeader Header,
+ trustingPeriod Duration,
+ clockDrift Duration) error {
+
+ if untrustedHeader.Height >= trustedHeader.Height return ErrErrNonDecreasingHeight
+ if untrustedHeader.Time >= trustedHeader.Time return ErrNonDecreasingTime
+
+ now := System.Time()
+ if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) {
+ return ErrHeaderNotWithinTrustedPeriod
+ }
+
+ old := trustedHeader
+ for i := trustedHeader.Height - 1; i > untrustedHeader.Height; i-- {
+ untrustedSh, error := Commit(i)
+ if error != nil return ErrRequestFailed
+
+ if (hash(untrustedSh.Header) != old.LastBlockID.Hash) {
+ return ErrInvalidAdjacentHeaders
+ }
+
+ old := untrustedSh.Header
+ }
+
+ if hash(untrustedHeader) != old.LastBlockID.Hash {
+ return ErrInvalidAdjacentHeaders
+ }
+
+ now := System.Time()
+ if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) {
+ return ErrHeaderNotWithinTrustedPeriod
+ }
+
+ return nil
+ }
+```
+
+*Assumption*: In the following, we assume that *untrusted_h.Header.height > trusted_h.Header.height*. We will quickly discuss the other case in the next section.
+
+We consider the following set-up:
+
+- the light client communicates with one full node
+- the light client locally stores all the headers that has passed basic verification and that are within light client trust period. In the pseudo code below we
+write *Store.Add(header)* for this. If a header failed to verify, then
+the full node we are talking to is faulty and we should disconnect from it and reinitialize with new peer.
+- If `CanTrust` returns *error*, then the light client has seen a forged header or the trusted header has expired (it is outside its trusted period).
+ - In case of forged header, the full node is faulty so light client should disconnect and reinitialize with new peer. If the trusted header has expired,
+ we need to reinitialize light client with new trusted header (that is within its trusted period), but we don't necessarily need to disconnect from the full node
+ we are talking to (as we haven't observed full node misbehavior in this case).
+
+## Correctness of the Light Client Protocols
+
+### Definitions
+
+- `TRUSTED_PERIOD`: trusted period
+- for realtime `t`, the predicate `correct(v,t)` is true if the validator `v`
+ follows the protocol until time `t` (we will see about recovery later).
+- Validator fields. We will write a validator as a tuple `(v,p)` such that
+ - `v` is the identifier (i.e., validator address; we assume identifiers are unique in each validator set)
+ - `p` is its voting power
+- For each header `h`, we write `trust(h) = true` if the light client trusts `h`.
+
+### Failure Model
+
+If a block `b` with a header `h` is generated at time `Time` (i.e. `h.Time = Time`), then a set of validators that
+hold more than `2/3` of the voting power in `validators(h.NextValidatorsHash)` is correct until time
+`h.Time + TRUSTED_PERIOD`.
+
+Formally,
+
+```latex
+[
+\sum_{(v,p) \in validators(h.NextValidatorsHash) \wedge correct(v,h.Time + TRUSTED_PERIOD)} p >
+\frac{2}{3} \sum_{(v,p) \in validators(h.NextValidatorsHash)} p
+]
+```
+
+
+The light client communicates with a full node and learns new headers. The goal is to locally decide whether to trust a header. Our implementation needs to ensure the following two properties:
+
+- *Light Client Completeness*: If a header `h` was correctly generated by an instance of Tendermint consensus (and its age is less than the trusted period),
+then the light client should eventually set `trust(h)` to `true`.
+
+- *Light Client Accuracy*: If a header `h` was *not generated* by an instance of Tendermint consensus, then the light client should never set `trust(h)` to true.
+
+*Remark*: If in the course of the computation, the light client obtains certainty that some headers were forged by adversaries
+(that is were not generated by an instance of Tendermint consensus), it may submit (a subset of) the headers it has seen as evidence of misbehavior.
+
+*Remark*: In Completeness we use "eventually", while in practice `trust(h)` should be set to true before `h.Time + TRUSTED_PERIOD`. If not, the header
+cannot be trusted because it is too old.
+
+*Remark*: If a header `h` is marked with `trust(h)`, but it is too old at some point in time we denote with `now` (`h.Time + TRUSTED_PERIOD < now`),
+then the light client should set `trust(h)` to `false` again at time `now`.
+
+*Assumption*: Initially, the light client has a header `inithead` that it trusts, that is, `inithead` was correctly generated by the Tendermint consensus.
+
+To reason about the correctness, we may prove the following invariant.
+
+*Verification Condition: light Client Invariant.*
+ For each light client `l` and each header `h`:
+if `l` has set `trust(h) = true`,
+ then validators that are correct until time `h.Time + TRUSTED_PERIOD` have more than two thirds of the voting power in `validators(h.NextValidatorsHash)`.
+
+Formally,
+
+```latex
+[
+\sum_{(v,p) \in validators(h.NextValidatorsHash) \wedge correct(v,h.Time + TRUSTED_PERIOD)} p >
+\frac{2}{3} \sum_{(v,p) \in validators(h.NextValidatorsHash)} p
+]
+```
+
+*Remark.* To prove the invariant, we will have to prove that the light client only trusts headers that were correctly generated by Tendermint consensus.
+Then the formula above follows from the failure model.
+
+## Details
+
+**Observation 1.** If `h.Time + TRUSTED_PERIOD > now`, we trust the validator set `validators(h.NextValidatorsHash)`.
+
+When we say we trust `validators(h.NextValidatorsHash)` we do `not` trust that each individual validator in `validators(h.NextValidatorsHash)`
+is correct, but we only trust the fact that less than `1/3` of them are faulty (more precisely, the faulty ones have less than `1/3` of the total voting power).
+
+*`VerifySingle` correctness arguments*
+
+Light Client Accuracy:
+
+- Assume by contradiction that `untrustedHeader` was not generated correctly and the light client sets trust to true because `verifySingle` returns without error.
+- `trustedState` is trusted and sufficiently new
+- by the Failure Model, less than `1/3` of the voting power held by faulty validators => at least one correct validator `v` has signed `untrustedHeader`.
+- as `v` is correct up to now, it followed the Tendermint consensus protocol at least up to signing `untrustedHeader` => `untrustedHeader` was correctly generated.
+We arrive at the required contradiction.
+
+Light Client Completeness:
+
+- The check is successful if sufficiently many validators of `trustedState` are still validators in the height `untrustedHeader.Height` and signed `untrustedHeader`.
+- If `untrustedHeader.Height = trustedHeader.Height + 1`, and both headers were generated correctly, the test passes.
+
+*Verification Condition:* We may need an invariant stating that if `untrustedSignedHeader.Header.Height = trustedHeader.Height + 1` then
+`signers(untrustedSignedHeader.Commit) \subseteq validators(trustedHeader.NextValidatorsHash)`.
+
+*Remark*: The variable `trustThreshold` can be used if the user believes that relying on one correct validator is not sufficient.
+However, in case of (frequent) changes in the validator set, the higher the `trustThreshold` is chosen, the more unlikely it becomes that
+`verifySingle` returns with an error for non-adjacent headers.
+
+- `VerifyBisection` correctness arguments (sketch)*
+
+Light Client Accuracy:
+
+- Assume by contradiction that the header at `untrustedHeight` obtained from the full node was not generated correctly and
+the light client sets trust to true because `VerifyBisection` returns without an error.
+- `VerifyBisection` returns without error only if all calls to `verifySingle` in the recursion return without error (return `nil`).
+- Thus we have a sequence of headers that all satisfied the `verifySingle`
+- again a contradiction
+
+light Client Completeness:
+
+This is only ensured if upon `Commit(pivot)` the light client is always provided with a correctly generated header.
+
+*Stalling*
+
+With `VerifyBisection`, a faulty full node could stall a light client by creating a long sequence of headers that are queried one-by-one by the light client and look OK,
+before the light client eventually detects a problem. There are several ways to address this:
+
+- Each call to `Commit` could be issued to a different full node
+- Instead of querying header by header, the light client tells a full node which header it trusts, and the height of the header it needs. The full node responds with
+the header along with a proof consisting of intermediate headers that the light client can use to verify. Roughly, `VerifyBisection` would then be executed at the full node.
+- We may set a timeout how long `VerifyBisection` may take.
diff --git a/cometbft/v0.38/spec/p2p/Implementation-of-the-p2p-layer.mdx b/cometbft/v0.38/spec/p2p/Implementation-of-the-p2p-layer.mdx
new file mode 100644
index 00000000..9011536b
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/Implementation-of-the-p2p-layer.mdx
@@ -0,0 +1,43 @@
+---
+order: 1
+title: Implementation
+---
+
+# Implementation of the p2p layer
+
+This section documents the implementation of the peer-to-peer (p2p)
+communication layer in CometBFT.
+
+The documentation was [produced](https://github.com/tendermint/tendermint/pull/9348)
+using the `v0.34.*` releases
+and the branch [`v0.34.x`](https://github.com/cometbft/cometbft/tree/v0.34.x)
+of this repository as reference.
+As there were no substancial changes in the p2p implementation, the
+documentation also applies to the releases `v0.37.*` and `v0.38.*` [^v35].
+
+[^v35]: The releases `v0.35.*` and `v0.36.*`, which included a major
+ refactoring of the p2p layer implementation, were [discontinued][v35postmorten].
+
+[v35postmorten]: https://interchain-io.medium.com/discontinuing-tendermint-v0-35-a-postmortem-on-the-new-networking-layer-3696c811dabc
+
+## Contents
+
+The documentation follows the organization of the
+[`p2p` package](https://github.com/cometbft/cometbft/tree/v0.34.x/p2p),
+which implements the following abstractions:
+
+- [Transport](./transport.md): establishes secure and authenticated
+ connections with peers;
+- [Switch](./switch.md): responsible for dialing peers and accepting
+ connections from peers, for managing established connections, and for
+ routing messages between the reactors and peers,
+ that is, between local and remote instances of the CometBFT protocols;
+- [PEX Reactor](./pex.md): due to the several roles of this component, the
+ documentation is split in several parts:
+ - [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer addresses, thus implementing a peer discovery service;
+ - [Address Book](./addressbook.md): stores discovered peer addresses and
+ quality metrics associated to peers with which the node has interacted;
+ - [Peer Manager](./peer_manager.md): defines when and to which peers a node
+ should dial, in order to establish outbound connections;
+- [Types](./types.md) and [Configuration](./configuration.md) provide a list of
+ existing types and configuration parameters used by the p2p package.
diff --git a/cometbft/v0.38/spec/p2p/Peer-to-Peer.mdx b/cometbft/v0.38/spec/p2p/Peer-to-Peer.mdx
new file mode 100644
index 00000000..29efd8ec
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/Peer-to-Peer.mdx
@@ -0,0 +1,46 @@
+---
+order: 1
+parent:
+ title: P2P
+ order: 6
+---
+
+# Peer-to-Peer Communication
+
+A CometBFT network is composed of multiple CometBFT instances, hereafter called
+`nodes`, that interact by exchanging messages.
+
+The CometBFT protocols are designed under the assumption of a partially-connected network model.
+This means that a node is not assumed to be directly connected to every other
+node in the network.
+Instead, each node is directly connected to only a subset of other nodes,
+hereafter called its `peers`.
+
+The peer-to-peer (p2p) communication layer is then the component of CometBFT that:
+
+1. establishes connections between nodes in a CometBFT network
+2. manages the communication between a node and the connected peers
+3. intermediates the exchange of messages between peers in CometBFT protocols
+
+The specification the p2p layer is a work in progress,
+tracked by [issue #19](https://github.com/cometbft/cometbft/issues/19).
+The current content is organized as follows:
+
+- [`implementation`](./implementation/README.md): documents the current state
+ of the implementation of the p2p layer, covering the main components of the
+ `p2p` package. The documentation covers, in a fairly comprehensive way,
+ the items 1. and 2. from the list above.
+- [`reactor-api`](./reactor-api/README.md): specifies the API offered by the
+ p2p layer to the protocol layer, through the `Reactor` abstraction.
+ This is a high-level specification (i.e., it should not be implementation-specific)
+ of the p2p layer API, covering item 3. from the list above.
+- [`legacy-docs`](./legacy-docs/): We keep older documentation in
+ the `legacy-docs` directory, as overall, it contains useful information.
+ However, part of this content is redundant,
+ being more comprehensively covered in more recent documents,
+ and some implementation details might be outdated
+ (see [issue #981](https://github.com/cometbft/cometbft/issues/981)).
+
+In addition to this content, some unfinished, work in progress, and auxiliary
+material can be found in the
+[knowledge-base](https://github.com/cometbft/knowledge-base/tree/main/p2p) repository.
diff --git a/cometbft/v0.38/spec/p2p/images/p2p-reactors.png b/cometbft/v0.38/spec/p2p/images/p2p-reactors.png
new file mode 100644
index 00000000..5515976c
Binary files /dev/null and b/cometbft/v0.38/spec/p2p/images/p2p-reactors.png differ
diff --git a/cometbft/v0.38/spec/p2p/images/p2p_state.png b/cometbft/v0.38/spec/p2p/images/p2p_state.png
new file mode 100644
index 00000000..c86d0168
Binary files /dev/null and b/cometbft/v0.38/spec/p2p/images/p2p_state.png differ
diff --git a/cometbft/v0.38/spec/p2p/implementation/addressbook.md b/cometbft/v0.38/spec/p2p/implementation/addressbook.md
new file mode 100644
index 00000000..26b95042
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/addressbook.md
@@ -0,0 +1,368 @@
+# Address Book
+
+The address book tracks information about peers, i.e., about other nodes in the network.
+
+The primary information stored in the address book are peer addresses.
+A peer address is composed by a node ID and a network address; a network
+address is composed by an IP address or a DNS name plus a port number.
+The same node ID can be associated to multiple network addresses.
+
+There are two sources for the addresses stored in the address book.
+The [Peer Exchange protocol](./pex-protocol.md) stores in the address book
+the peer addresses it discovers, i.e., it learns from connected peers.
+And the [Switch](./switch.md) registers the addresses of peers with which it
+has interacted: to which it has dialed or from which it has accepted a
+connection.
+
+The address book also records additional information about peers with which the
+node has interacted, from which is possible to rank peers.
+The Switch reports [connection attempts](#dial-attempts) to a peer address; too
+much failed attempts indicate that a peer address is invalid.
+Reactors, in they turn, report a peer as [good](#good-peers) when it behaves as
+expected, or as a [bad peer](#bad-peers), when it misbehaves.
+
+There are two entities that retrieve peer addresses from the address book.
+The [Peer Manager](./peer_manager.md) retrieves peer addresses to dial, so to
+establish outbound connections.
+This selection is random, but has a configurable bias towards peers that have
+been marked as good peers.
+The [Peer Exchange protocol](./pex-protocol.md) retrieves random samples of
+addresses to offer (send) to peers.
+This selection is also random but it includes, in particular for nodes that
+operate in seed mode, some bias toward peers marked as good ones.
+
+## Buckets
+
+Peer addresses are stored in buckets.
+There are buckets for new addresses and buckets for old addresses.
+The buckets for new addresses store addresses of peers about which the node
+does not have much information; the first address registered for a peer ID is
+always stored in a bucket for new addresses.
+The buckets for old addresses store addresses of peers with which the node has
+interacted and that were reported as [good peers](#good-peers) by a reactor.
+An old address therefore can be seen as an alias for a good address.
+
+> Note that new addresses does not mean bad addresses.
+> The addresses of peers marked as [bad peers](#bad-peers) are removed from the
+> buckets where they are stored, and temporarily kept in a table of banned peers.
+
+The number of buckets is fixed and there are more buckets for new addresses
+(`256`) than buckets for old addresses (`64`), a ratio of 4:1.
+Each bucket can store up to `64` addresses.
+When a bucket becomes full, the peer address with the lowest ranking is removed
+from the bucket.
+The first choice is to remove bad addresses, with multiple failed attempts
+associated.
+In the absence of those, the *oldest* address in the bucket is removed, i.e.,
+the address with the oldest last attempt to dial.
+
+When a bucket for old addresses becomes full, the lowest-ranked peer address in
+the bucket is moved to a bucket of new addresses.
+When a bucket for new addresses becomes full, the lowest-ranked peer address in
+the bucket is removed from the address book.
+In other words, exceeding old or good addresses are downgraded to new
+addresses, while exceeding new addresses are dropped.
+
+The bucket that stores an `address` is defined by the following two methods,
+for new and old addresses:
+
+- `calcNewBucket(address, source) = hash(key + groupKey(source) + hash(key + groupKey(address) + groupKey(source)) % newBucketsPerGroup) % newBucketCount`
+- `calcOldBucket(address) = hash(key + groupKey(address) + hash(key + address) % oldBucketsPerGroup) % oldBucketCount`
+
+The `key` is a fixed random 96-bit (8-byte) string.
+The `groupKey` for an address is a string representing its network group.
+The `source` of an address is the address of the peer from which we learn the
+address..
+The first (internal) hash is reduced to an integer up to `newBucketsPerGroup =
+32`, for new addresses, and `oldBucketsPerGroup = 4`, for old addresses.
+The second (external) hash is reduced to bucket indexes, in the interval from 0
+to the number of new (`newBucketCount = 256`) or old (`oldBucketCount = 64`) buckets.
+
+Notice that new addresses with sources from the same network group are more
+likely to end up in the same bucket, therefore to competing for it.
+For old address, instead, two addresses are more likely to end up in the same
+bucket when they belong to the same network group.
+
+## Adding addresses
+
+The `AddAddress` method adds the address of a peer to the address book.
+
+The added address is associated to a *source* address, which identifies the
+node from which the peer address was learned.
+
+Addresses are added to the address book in the following situations:
+
+1. When a peer address is learned via PEX protocol, having the sender
+ of the PEX message as its source
+2. When an inbound peer is added, in this case the peer itself is set as the
+ source of its own address
+3. When the switch is instructed to dial addresses via the `DialPeersAsync`
+ method, in this case the node itself is set as the source
+
+If the added address contains a node ID that is not registered in the address
+book, the address is added to a [bucket](#buckets) of new addresses.
+Otherwise, the additional address for an existing node ID is **not added** to
+the address book when:
+
+- The last address added with the same node ID is stored in an old bucket, so
+ it is considered a "good" address
+- There are addresses associated to the same node ID stored in
+ `maxNewBucketsPerAddress = 4` distinct buckets
+- Randomly, with a probability that increases exponentially with the number of
+ buckets in which there is an address with the same node ID.
+ So, a new address for a node ID which is already present in one bucket is
+ added with 50% of probability; if the node ID is present in two buckets, the
+ probability decreases to 25%; and if it is present in three buckets, the
+ probability is 12.5%.
+
+The new address is also added to the `addrLookup` table, which stores
+`knownAddress` entries indexed by their node IDs.
+If the new address is from an unknown peer, a new entry is added to the
+`addrLookup` table; otherwise, the existing entry is updated with the new
+address.
+Entries of this table contain, among other fields, the list of buckets where
+addresses of a peer are stored.
+The `addrLookup` table is used by most of the address book methods (e.g.,
+`HasAddress`, `IsGood`, `MarkGood`, `MarkAttempt`), as it provides fast access
+to addresses.
+
+### Errors
+
+- if the added address or the associated source address are nil
+- if the added address is invalid
+- if the added address is the local node's address
+- if the added address ID is of a [banned](#bad-peers) peer
+- if either the added address or the associated source address IDs are configured as private IDs
+- if `routabilityStrict` is set and the address is not routable
+- in case of failures computing the bucket for the new address (`calcNewBucket` method)
+- if the added address instance, which is a new address, is configured as an
+ old address (sanity check of `addToNewBucket` method)
+
+## Need for Addresses
+
+The `NeedMoreAddrs` method verifies whether the address book needs more addresses.
+
+It is invoked by the PEX reactor to define whether to request peer addresses
+to a new outbound peer or to a randomly selected connected peer.
+
+The address book needs more addresses when it has less than `1000` addresses
+registered, counting all buckets for new and old addresses.
+
+## Pick address
+
+The `PickAddress` method returns an address stored in the address book, chosen
+at random with a configurable bias towards new addresses.
+
+It is invoked by the Peer Manager to obtain a peer address to dial, as part of
+its `ensurePeers` routine.
+The bias starts from 10%, when the peer has no outbound peers, increasing by
+10% for each outbound peer the node has, up to 90%, when the node has at least
+8 outbound peers.
+
+The configured bias is a parameter that influences the probability of choosing
+an address from a bucket of new addresses or from a bucket of old addresses.
+A second parameter influencing this choice is the number of new and old
+addresses stored in the address book.
+In the absence of bias (i.e., if the configured bias is 50%), the probability
+of picking a new address is given by the square root of the number of new
+addresses divided by the sum of the square roots of the numbers of new and old
+addresses.
+By adding a bias toward new addresses (i.e., configured bias larger than 50%),
+the portion on the sample occupied by the square root of the number of new
+addresses increases, while the corresponding portion for old addresses decreases.
+As a result, it becomes more likely to pick a new address at random from this sample.
+
+> The use of the square roots softens the impact of disproportional numbers of
+> new and old addresses in the address book. This is actually the expected
+> scenario, as there are 4 times more buckets for new addresses than buckets
+> for old addresses.
+
+Once the type of address, new or old, is defined, a non-empty bucket of this
+type is selected at random.
+From the selected bucket, an address is chosen at random and returned.
+If all buckets of the selected type are empty, no address is returned.
+
+## Random selection
+
+The `GetSelection` method returns a selection of addresses stored in the
+address book, with no bias toward new or old addresses.
+
+It is invoked by the PEX protocol to obtain a list of peer addresses with two
+purposes:
+
+- To send to a peer in a PEX response, in the case of outbound peers or of
+ nodes not operating in seed mode
+- To crawl, in the case of nodes operating in seed mode, as part of every
+ interaction of the `crawlPeersRoutine`
+
+The selection is a random subset of the peer addresses stored in the
+`addrLookup` table, which stores the last address added for each peer ID.
+The target size of the selection is `23%` (`getSelectionPercent`) of the
+number of addresses stored in the address book, but it should not be lower than
+`32` (`minGetSelection`) --- if it is, all addresses in the book are returned
+--- nor greater than `250` (`maxGetSelection`).
+
+> The random selection is produced by:
+>
+> - Retrieving all entries of the `addrLookup` map, which by definition are
+> returned in random order.
+> - Randomly shuffling the retrieved list, using the Fisher-Yates algorithm
+
+## Random selection with bias
+
+The `GetSelectionWithBias` method returns a selection of addresses stored in
+the address book, with bias toward new addresses.
+
+It is invoked by the PEX protocol to obtain a list of peer addresses to be sent
+to a peer in a PEX response.
+This method is only invoked by seed nodes, when replying to a PEX request
+received from an inbound peer (i.e., a peer that dialed the seed node).
+The bias used in this scenario is hard-coded to 30%, meaning that 70% of
+the returned addresses are expected to be old addresses.
+
+The number of addresses that compose the selection is computed in the same way
+as for the non-biased random selection.
+The bias toward new addresses is implemented by requiring that the configured
+bias, interpreted as a percentage, of the select addresses come from buckets of
+new addresses, while the remaining come from buckets of old addresses.
+Since the number of old addresses is typically lower than the number of new
+addresses, it is possible that the address book does not have enough old
+addresses to include in the selection.
+In this case, additional new addresses are included in the selection.
+Thus, the configured bias, in practice, is towards old addresses, not towards
+new addresses.
+
+To randomly select addresses of a type, the address book considers all
+addresses present in every bucket of that type.
+This list of all addresses of a type is randomly shuffled, and the requested
+number of addresses are retrieved from the tail of this list.
+The returned selection contains, at its beginning, a random selection of new
+addresses in random order, followed by a random selection of old addresses, in
+random order.
+
+## Dial Attempts
+
+The `MarkAttempt` method records a failed attempt to connect to an address.
+
+It is invoked by the Peer Manager when it fails dialing a peer, but the failure
+is not in the authentication step (`ErrSwitchAuthenticationFailure` error).
+In case of authentication errors, the peer is instead marked as a [bad peer](#bad-peers).
+
+The failed connection attempt is recorded in the address registered for the
+peer's ID in the `addrLookup` table, which is the last address added with that ID.
+The known address' counter of failed `Attempts` is increased and the failure
+time is registered in `LastAttempt`.
+
+The possible effect of recording multiple failed connect attempts to a peer is
+to turn its address into a *bad* address (do not confuse with banned addresses).
+A known address becomes bad if it is stored in buckets of new addresses, and
+when connection attempts:
+
+- Have not been made over a week, i.e., `LastAttempt` is older than a week
+- Have failed 3 times and never succeeded, i.e., `LastSucess` field is unset
+- Have failed 10 times in the last week, i.e., `LastSucess` is older than a week
+
+Addresses marked as *bad* are the first candidates to be removed from a bucket of
+new addresses when the bucket becomes full.
+
+> Note that failed connection attempts are reported for a peer address, but in
+> fact the address book records them for a peer.
+>
+> More precisely, failed connection attempts are recorded in the entry of the
+> `addrLookup` table with reported peer ID, which contains the last address
+> added for that node ID, which is not necessarily the reported peer address.
+
+## Good peers
+
+The `MarkGood` method marks a peer ID as good.
+
+It is invoked by the consensus reactor, via switch, when the number of useful
+messages received from a peer is a multiple of `10000`.
+Vote and block part messages are considered for this number, they must be valid
+and not be duplicated messages to be considered useful.
+
+> The `SwitchReporter` type of `behaviour` package also invokes the `MarkGood`
+> method when a "reason" associated with consensus votes and block parts is
+> reported.
+> No reactor, however, currently provides these "reasons" to the `SwitchReporter`.
+
+The effect of this action is that the address registered for the peer's ID in the
+`addrLookup` table, which is the last address added with that ID, is marked as
+good and moved to a bucket of old addresses.
+An address marked as good has its failed to connect counter and timestamp reset.
+If the destination bucket of old addresses is full, the oldest address in the
+bucket is moved (downgraded) to a bucket of new addresses.
+
+Moving the peer address to a bucket of old addresses has the effect of
+upgrading, or increasing the ranking of a peer in the address book.
+
+## Bad peers
+
+The `MarkBad` method marks a peer as bad and bans it for a period of time.
+
+This method is only invoked within the PEX reactor, with a banning time of 24
+hours, for the following reasons:
+
+- A peer misbehaves in the [PEX protocol](./pex-protocol.md#misbehavior)
+- When the `maxAttemptsToDial` limit (`16`) is reached for a peer
+- If an `ErrSwitchAuthenticationFailure` error is returned when dialing a peer
+
+The effect of this action is that the address registered for the peer's ID in the
+`addrLookup` table, which is the last address added with that ID, is banned for
+a period of time.
+The banned peer is removed from the `addrLookup` table and from all buckets
+where its addresses are stored.
+
+The information about banned peers, however, is not discarded.
+It is maintained in the `badPeers` map, indexed by peer ID.
+This allows, in particular, addresses of banned peers to be
+[reinstated](#reinstating-addresses), i.e., to be added
+back to the address book, when their ban period expires.
+
+## Reinstating addresses
+
+The `ReinstateBadPeers` method attempts to re-add banned addresses to the address book.
+
+It is invoked by the PEX reactor when dialing new peers.
+This action is taken before requesting additional addresses to peers,
+in the case that the node needs more peer addresses.
+
+The set of banned peer addresses is retrieved from the `badPeers` map.
+Addresses that are not any longer banned, i.e., whose banned period has expired,
+are added back to the address book as new addresses, while the corresponding
+node IDs are removed from the `badPeers` map.
+
+## Removing addresses
+
+The `RemoveAddress` method removes an address from the address book.
+
+It is invoked by the switch when it dials a peer or accepts a connection from a
+peer that ends up being the node itself (`IsSelf` error).
+In both cases, the address dialed or accepted is also added to the address book
+as a local address, via the `AddOurAddress` method.
+
+The same logic is also internally used by the address book for removing
+addresses of a peer that is [marked as a bad peer](#bad-peers).
+
+The entry registered with the peer ID of the address in the `addrLookup` table,
+which is the last address added with that ID, is removed from all buckets where
+it is stored and from the `addrLookup` table.
+
+> FIXME: is it possible that addresses with the same ID as the removed address,
+> but with distinct network addresses, are kept in buckets of the address book?
+> While they will not be accessible anymore, as there is no reference to them
+> in the `addrLookup`, they will still be there.
+
+## Persistence
+
+The `loadFromFile` method, called when the address book is started, reads
+address book entries from a file, passed to the address book constructor.
+The file, at this point, does not need to exist.
+
+The `saveRoutine` is started when the address book is started.
+It saves the address book to the configured file every `dumpAddressInterval`,
+hard-coded to 2 minutes.
+It is also possible to save the content of the address book using the `Save`
+method.
+Saving the address book content to a file acquires the address book lock, also
+employed by all other public methods.
diff --git a/cometbft/v0.38/spec/p2p/implementation/configuration.md b/cometbft/v0.38/spec/p2p/implementation/configuration.md
new file mode 100644
index 00000000..9f172c22
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/configuration.md
@@ -0,0 +1,49 @@
+# CometBFT p2p configuration
+
+This document contains configurable parameters a node operator can use to tune the p2p behaviour.
+
+| Parameter| Default| Description |
+| --- | --- | ---|
+| ListenAddress | "tcp://0.0.0.0:26656" | Address to listen for incoming connections (0.0.0.0:0 means any interface, any port) |
+| ExternalAddress | "" | Address to advertise to peers for them to dial |
+| [Seeds](./pex-protocol.md#seed-nodes) | empty | Comma separated list of seed nodes to connect to (ID@host:port )|
+| [Persistent peers](./peer_manager.md#persistent-peers) | empty | Comma separated list of nodes to keep persistent connections to (ID@host:port ) |
+| [AddrBook](./addressbook.md) | defaultAddrBookPath | Path do address book |
+| AddrBookStrict | true | Set true for strict address routability rules and false for private or local networks |
+| [MaxNumInboundPeers](./switch.md#accepting-peers) | 40 | Maximum number of inbound peers |
+| [MaxNumOutboundPeers](./peer_manager.md#ensure-peers) | 10 | Maximum number of outbound peers to connect to, excluding persistent peers |
+| [UnconditionalPeers](./switch.md#accepting-peers) | empty | These are IDs of the peers which are allowed to be (re)connected as both inbound or outbound regardless of whether the node reached `max_num_inbound_peers` or `max_num_outbound_peers` or not. |
+| PersistentPeersMaxDialPeriod| 0 * time.Second | Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) |
+| FlushThrottleTimeout |100 * time.Millisecond| Time to wait before flushing messages out on the connection |
+| MaxPacketMsgPayloadSize | 1024 | Maximum size of a message packet payload, in bytes |
+| SendRate | 5120000 (5 mB/s) | Rate at which packets can be sent, in bytes/second |
+| RecvRate | 5120000 (5 mB/s) | Rate at which packets can be received, in bytes/second|
+| [PexReactor](./pex.md) | true | Set true to enable the peer-exchange reactor |
+| SeedMode | false | Seed mode, in which node constantly crawls the network and looks for. Does not work if the peer-exchange reactor is disabled. |
+| PrivatePeerIDs | empty | Comma separated list of peer IDsthat we do not add to the address book or gossip to other peers. They stay private to us. |
+| AllowDuplicateIP | false | Toggle to disable guard against peers connecting from the same ip.|
+| [HandshakeTimeout](./transport.md#connection-upgrade) | 20 * time.Second | Timeout for handshake completion between peers |
+| [DialTimeout](./switch.md#dialing-peers) | 3 * time.Second | Timeout for dialing a peer |
+
+
+These parameters can be set using the `$CMTHOME/config/config.toml` file. A subset of them can also be changed via command line using the following command line flags:
+
+| Parameter | Flag | Example |
+| --- | --- | --- |
+| Listen address| `p2p.laddr` | "tcp://0.0.0.0:26656" |
+| Seed nodes | `p2p.seeds` | `--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` |
+| Persistent peers | `p2p.persistent_peers` | `--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` |
+| Unconditional peers | `p2p.unconditional_peer_ids` | `--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` |
+| PexReactor | `p2p.pex` | `--p2p.pex` |
+| Seed mode | `p2p.seed_mode` | `--p2p.seed_mode` |
+| Private peer ids | `p2p.private_peer_ids` | `--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` |
+
+ **Note on persistent peers**
+
+ If `persistent_peers_max_dial_period` is set greater than zero, the
+pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period`
+during exponential backoff and we keep trying again without giving up.
+
+If `seeds` and `persistent_peers` intersect,
+the user will be warned that seeds may auto-close connections
+and that the node may not be able to keep the connection persistent.
diff --git a/cometbft/v0.38/spec/p2p/implementation/peer_manager.md b/cometbft/v0.38/spec/p2p/implementation/peer_manager.md
new file mode 100644
index 00000000..5dfc14b2
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/peer_manager.md
@@ -0,0 +1,140 @@
+# Peer Manager
+
+The peer manager is responsible for establishing connections with peers.
+It defines when a node should dial peers and which peers it should dial.
+The peer manager is not an implementation abstraction of the p2p layer,
+but a role that is played by the [PEX reactor](./pex.md).
+
+## Outbound peers
+
+The `ensurePeersRoutine` is a persistent routine intended to ensure that a node
+is connected to `MaxNumOutboundPeers` outbound peers.
+This routine is continuously executed by regular nodes, i.e. nodes not
+operating in seed mode, as part of the PEX reactor implementation.
+
+The logic defining when the node should dial peers, for selecting peers to dial
+and for actually dialing them is implemented in the `ensurePeers` method.
+This method is periodically invoked -- every `ensurePeersPeriod`, with default
+value to 30 seconds -- by the `ensurePeersRoutine`.
+
+A node is expected to dial peers whenever the number of outbound peers is lower
+than the configured `MaxNumOutboundPeers` parameter.
+The current number of outbound peers is retrieved from the switch, using the
+`NumPeers` method, which also reports the number of nodes to which the switch
+is currently dialing.
+If the number of outbound peers plus the number of dialing routines equals to
+`MaxNumOutboundPeers`, nothing is done.
+Otherwise, the `ensurePeers` method will attempt to dial node addresses in
+order to reach the target number of outbound peers.
+
+Once defined that the node needs additional outbound peers, the node queries
+the address book for candidate addresses.
+This is done using the [`PickAddress`](./addressbook.md#pick-address) method,
+which returns an address selected at random on the address book, with some bias
+towards new or old addresses.
+When the node has up to 3 outbound peers, the adopted bias is towards old
+addresses, i.e., addresses of peers that are believed to be "good".
+When the node has from 5 outbound peers, the adopted bias is towards new
+addresses, i.e., addresses of peers about which the node has not yet collected
+much information.
+So, the more outbound peers a node has, the less conservative it will be when
+selecting new peers.
+
+The selected peer addresses are then dialed in parallel, by starting a dialing
+routine per peer address.
+Dialing a peer address can fail for multiple reasons.
+The node might have attempted to dial the peer too many times.
+In this case, the peer address is marked as bad and removed from the address book.
+The node might have attempted and failed to dial the peer recently
+and the exponential `backoffDuration` has not yet passed.
+Or the current connection attempt might fail, which is registered in the address book.
+None of these errors are explicitly handled by the `ensurePeers` method, which
+also does not wait until the connections are established.
+
+The third step of the `ensurePeers` method is to ensure that the address book
+has enough addresses.
+This is done, first, by [reinstating banned peers](./addressbook.md#Reinstating-addresses)
+whose ban period has expired.
+Then, the node randomly selects a connected peer, which can be either an
+inbound or outbound peer, to [requests addresses](./pex-protocol.md#Requesting-Addresses)
+using the PEX protocol.
+Last, and this action is only performed if the node could not retrieve any new
+address to dial from the address book, the node dials the configured seed nodes
+in order to establish a connection to at least one of them.
+
+### Fast dialing
+
+As above described, seed nodes are actually the last source of peer addresses
+for regular nodes.
+They are contacted by a node when, after an invocation of the `ensurePeers`
+method, no suitable peer address to dial is retrieved from the address book
+(e.g., because it is empty).
+
+Once a connection with a seed node is established, the node immediately
+[sends a PEX request](./pex-protocol.md#Requesting-Addresses) to it, as it is
+added as an outbound peer.
+When the corresponding PEX response is received, the addresses provided by the
+seed node are added to the address book.
+As a result, in the next invocation of the `ensurePeers` method, the node
+should be able to dial some of the peer addresses provided by the seed node.
+
+However, as observed in this [issue](https://github.com/tendermint/tendermint/issues/2093),
+it can take some time, up to `ensurePeersPeriod` or 30 seconds, from when the
+node receives new peer addresses and when it dials the received addresses.
+To avoid this delay, which can be particularly relevant when the node has no
+peers, a node immediately attempts to dial peer addresses when they are
+received from a peer that is locally configured as a seed node.
+
+> This was implemented in a rough way, leading to inconsistencies described in
+> this [issue](https://github.com/cometbft/cometbft/issues/486),
+> fixed by this [PR](https://github.com/cometbft/cometbft/pull/3360).
+
+### First round
+
+When the PEX reactor is started, the `ensurePeersRoutine` is created and it
+runs thorough the operation of a node, periodically invoking the `ensurePeers`
+method.
+However, if when the persistent routine is started the node already has some
+peers, either inbound or outbound peers, or is dialing some addresses, the
+first invocation of `ensurePeers` is delayed by a random amount of time from 0
+to `ensurePeersPeriod`.
+
+### Persistent peers
+
+The node configuration can contain a list of *persistent peers*.
+Those peers have preferential treatment compared to regular peers and the node
+is always trying to connect to them.
+Moreover, these peers are not removed from the address book in the case of
+multiple failed dial attempts.
+
+On startup, the node immediately tries to dial the configured persistent peers
+by calling the switch's [`DialPeersAsync`](./switch.md#manual-operation) method.
+This is not done in the p2p package, but it is part of the procedure to set up a node.
+
+> TODO: the handling of persistent peers should be described in more detail.
+
+### Life cycle
+
+The picture below is a first attempt of illustrating the life cycle of an outbound peer:
+
+
+
+A peer can be in the following states:
+
+- Candidate peers: peer addresses stored in the address boook, that can be
+ retrieved via the [`PickAddress`](./addressbook.md#pick-address) method
+- [Dialing](./switch.md#dialing-peers): peer addresses that are currently being
+ dialed. This state exists to ensure that a single dialing routine exist per peer.
+- [Reconnecting](./switch.md#reconnect-to-peer): persistent peers to which a node
+ is currently reconnecting, as a previous connection attempt has failed.
+- Connected peers: peers that a node has successfully dialed, added as outbound peers.
+- [Bad peers](./addressbook.md#bad-peers): peers marked as bad in the address
+ book due to exhibited [misbehavior](./pex-protocol.md#misbehavior).
+ Peers can be reinstated after being marked as bad.
+
+## Pending of documentation
+
+The `dialSeeds` method of the PEX reactor.
+
+The `dialPeer` method of the PEX reactor.
+This includes `dialAttemptsInfo`, `maxBackoffDurationForPeer` methods.
diff --git a/cometbft/v0.38/spec/p2p/implementation/pex-protocol.md b/cometbft/v0.38/spec/p2p/implementation/pex-protocol.md
new file mode 100644
index 00000000..760a56bd
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/pex-protocol.md
@@ -0,0 +1,240 @@
+# Peer Exchange Protocol
+
+The Peer Exchange (PEX) protocol enables nodes to exchange peer addresses, thus
+implementing a peer discovery mechanism.
+
+The PEX protocol uses two messages:
+
+- `PexRequest`: sent by a node to [request](#requesting-addresses) peer
+ addresses to a peer
+- `PexAddrs`: a list of peer addresses [provided](#providing-addresses) to a
+ peer as response to a `PexRequest` message
+
+While all nodes, with few exceptions, participate on the PEX protocol,
+a subset of nodes, configured as [seed nodes](#seed-nodes) have a particular
+role in the protocol.
+They crawl the network, connecting to random peers, in order to learn as many
+peer addresses as possible to provide to other nodes.
+
+## Requesting Addresses
+
+A node requests peer addresses by sending a `PexRequest` message to a peer.
+
+For regular nodes, not operating in seed mode, a PEX request is sent when
+the node *needs* peers addresses, a condition checked:
+
+1. When an *outbound* peer is added, causing the node to request addresses from
+ the new peer
+2. Periodically, by the `ensurePeersRoutine`, causing the node to request peer
+ addresses to a randomly selected peer
+
+A node needs more peer addresses when its addresses book has
+[less than 1000 records](./addressbook.md#need-for-addresses).
+It is thus reasonable to assume that the common case is that a peer needs more
+peer addresses, so that PEX requests are sent whenever the above two situations happen.
+
+A PEX request is sent when a new *outbound* peer is added.
+The same does not happen with new inbound peers because the implementation
+considers outbound peers, that the node has chosen for dialing, more
+trustworthy than inbound peers, that the node has accepted.
+Moreover, when a node is short of peer addresses, it dials the configured seed nodes;
+since they are added as outbound peers, the node can immediately request peer addresses.
+
+The `ensurePeersRoutine` periodically checks, by default every 30 seconds (`ensurePeersPeriod`),
+whether the node has enough outbound peers.
+If it does not have, the node tries dialing some peer addresses stored in the address book.
+As part of this procedure, the node selects a peer at random,
+from the set of connected peers retrieved from the switch,
+and sends a PEX request to the selected peer.
+
+Sending a PEX request to a peer is implemented by the `RequestAddrs` method of
+the PEX reactor.
+
+### Responses
+
+After a PEX request is sent to a peer, the node expects to receive,
+as a response, a `PexAddrs` message from the peer.
+This message encodes a list of peer addresses that are
+[added to address book](./addressbook.md#adding-addresses),
+having the peer from which the PEX response was received as their source.
+
+Received PEX responses are handled by the `ReceiveAddrs` method of the PEX reactor.
+In the case of a PEX response received from a peer which is configured as
+a seed node, the PEX reactor attempts immediately to dial the provided peer
+addresses, as detailed [here](./peer_manager.md#fast-dialing).
+
+### Misbehavior
+
+Sending multiple PEX requests to a peer, before receiving a reply from it,
+is considered a misbehavior.
+To prevent it, the node maintains a `requestsSent` set of outstanding
+requests, indexed by destination peers.
+While a peer ID is present in the `requestsSent` set, the node does not send
+further PEX requests to that peer.
+A peer ID is removed from the `requestsSent` set when a PEX response is
+received from it.
+
+Sending a PEX response to a peer that has not requested peer addresses
+is also considered a misbehavior.
+So, if a PEX response is received from a peer that is not registered in
+the `requestsSent` set, a `ErrUnsolicitedList` error is produced.
+This leads the peer to be disconnected and [marked as a bad peer](./addressbook.md#bad-peers).
+
+## Providing Addresses
+
+When a node receives a `PexRequest` message from a peer,
+it replies with a `PexAddrs` message.
+
+This message encodes a [random selection of peer addresses](./addressbook.md#random-selection)
+retrieved from the address book.
+
+Sending a PEX response to a peer is implemented by the `SendAddrs` method of
+the PEX reactor.
+
+### Misbehavior
+
+Requesting peer addresses too often is considered a misbehavior.
+Since node are expected to send PEX requests every `ensurePeersPeriod`,
+the minimum accepted interval between requests from the same peer is set
+to `ensurePeersPeriod / 3`, 10 seconds by default.
+
+The `receiveRequest` method is responsible for verifying this condition.
+The node keeps a `lastReceivedRequests` map with the time of the last PEX
+request received from every peer.
+If the interval between successive requests is less than the minimum accepted
+one, the peer is disconnected and [marked as a bad peer](./addressbook.md#bad-peers).
+An exception is made for the first two PEX requests received from a peer.
+
+> The probably reason is that, when a new peer is added, the two conditions for
+> a node to request peer addresses can be triggered with an interval lower than
+> the minimum accepted interval.
+> Since this is a legit behavior, it should not be punished.
+
+## Seed nodes
+
+A seed node is a node configured to operate in `SeedMode`.
+
+### Crawling peers
+
+Seed nodes crawl the network, connecting to random peers and sending PEX
+requests to them, in order to learn as many peer addresses as possible.
+More specifically, a node operating in seed mode sends PEX requests in two cases:
+
+1. When an outbound peer is added, and the seed node needs more peer addresses,
+ it requests peer addresses to the new peer
+2. Periodically, the `crawlPeersRoutine` sends PEX requests to a random set of
+ peers, whose addresses are registered in the Address Book
+
+The first case also applies for nodes not operating in seed mode.
+The second case replaces the second for regular nodes, as seed nodes do not
+run the `ensurePeersRoutine`, as regular nodes,
+but run the `crawlPeersRoutine`, which is not run by regular nodes.
+
+The `crawlPeersRoutine` periodically, every 30 seconds (`crawlPeerPeriod`),
+starts a new peer discovery round.
+First, the seed node retrieves a random selection of peer addresses from its
+Address Book.
+This selection is produced in the same way as in the random selection of peer
+addresses that are [provided](#providing-addresses) to a requesting peer.
+Peers that the seed node has crawled recently,
+less than 2 minutes ago (`minTimeBetweenCrawls`), are removed from this selection.
+The remaining peer addresses are registered in the `crawlPeerInfos` table.
+
+The seed node is not necessarily connected to the peer whose address is
+selected for each round of crawling.
+So, the seed node dials the selected peer addresses.
+This is performed in foreground, one peer at a time.
+As a result, a round of crawling can take a substantial amount of time.
+For each selected peer it succeeds dialing to, this include already connected
+peers, the seed node sends a PEX request.
+
+Dialing a selected peer address can fail for multiple reasons.
+The seed node might have attempted to dial the peer too many times.
+In this case, the peer address is marked as [bad in the address book](./addressbook.md#bad-peers).
+The seed node might have attempted to dial the peer recently, without success,
+and the exponential `backoffDuration` has not yet passed.
+Or the current connection attempt might fail, which is registered in the address book.
+
+Failures to dial to a peer address produce an information that is important for
+a seed node.
+They indicate that a peer is unreachable, or is not operating correctly, and
+therefore its address should not be provided to other nodes.
+This occurs when, due to multiple failed connection attempts or authentication
+failures, the peer address ends up being removed from the address book.
+As a result, the periodically crawling of selected peers not only enables the
+discovery of new peers, but also allows the seed node to stop providing
+addresses of bad peers.
+
+### Offering addresses
+
+Nodes operating in seed mode handle PEX requests differently than regular
+nodes, whose operation is described [here](#providing-addresses).
+
+This distinction exists because nodes dial a seed node with the main, if not
+exclusive goal of retrieving peer addresses.
+In other words, nodes do not dial a seed node because they intend to have it as
+a peer in the multiple CometBFT protocols, but because they believe that a
+seed node is a good source of addresses of nodes to which they can establish
+connections and interact in the multiple CometBFT protocols.
+
+So, when a seed node receives a `PexRequest` message from an inbound peer,
+it sends a `PexAddrs` message, containing a selection of peer
+addresses, back to the peer and *disconnects* from it.
+Seed nodes therefore treat inbound connections from peers as a short-term
+connections, exclusively intended to retrieve peer addresses.
+Once the requested peer addresses are sent, the connection with the peer is closed.
+
+Moreover, the selection of peer addresses provided to inbound peers by a seed
+node, although still essentially random, has a [bias toward old
+addresses](./addressbook.md#random-selection-with-bias).
+The selection bias is defined by `biasToSelectNewPeers`, hard-coded to `30%`,
+meaning that `70%` of the peer addresses provided by a seed node are expected
+to be old addresses.
+Although this nomenclature is not clear, *old* addresses are the addresses that
+survived the most in the address book, that is, are addresses that the seed
+node believes being from *good* peers (more details [here](./addressbook.md#good-peers)).
+
+Another distinction is on the handling of potential [misbehavior](#misbehavior-1)
+of peers requesting addresses.
+A seed node does not enforce, a priori, a minimal interval between PEX requests
+from inbound peers.
+Instead, it does not reply to more than one PEX request per peer inbound
+connection, and, as above mentioned, it disconnects from incoming peers after
+responding to them.
+If the same peer dials again to the seed node and requests peer addresses, the
+seed node will reply to this peer like it was the first time it has requested
+peer addresses.
+
+> This is more an implementation restriction than a desired behavior.
+> The `lastReceivedRequests` map stores the last time a PEX request was
+> received from a peer, and the entry relative to a peer is removed from this
+> map when the peer is disconnected.
+>
+> It is debatable whether this approach indeed prevents abuse against seed nodes.
+
+### Disconnecting from peers
+
+Seed nodes treat connections with peers as short-term connections, which are
+mainly, if not exclusively, intended to exchange peer addresses.
+
+In the case of inbound peers, that have dialed the seed node, the intent of the
+connection is achieved once a PEX response is sent to the peer.
+The seed node thus disconnects from an inbound peer after sending a `PexAddrs`
+message to it.
+
+In the case of outbound peers, which the seed node has dialed for crawling peer
+addresses, the intent of the connection is essentially achieved when a PEX
+response is received from the peer.
+The seed node, however, does not disconnect from a peer after receiving a
+selection of peer addresses from it.
+As a result, after some rounds of crawling, a seed node will have established
+connections to a substantial amount of peers.
+
+To couple with the existence of multiple connections with peers that have no
+longer purpose for the seed node, the `crawlPeersRoutine` also invokes, after
+each round of crawling, the `attemptDisconnects` method.
+This method retrieves the list of connected peers from the switch, and
+disconnects from peers that are not persistent peers, and with which a
+connection is established for more than `SeedDisconnectWaitPeriod`.
+This period is a configuration parameter, set to 28 hours when the PEX reactor
+is created by the default node constructor.
diff --git a/cometbft/v0.38/spec/p2p/implementation/pex.md b/cometbft/v0.38/spec/p2p/implementation/pex.md
new file mode 100644
index 00000000..8243eaa5
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/pex.md
@@ -0,0 +1,111 @@
+# PEX Reactor
+
+The PEX reactor is one of the reactors running in a CometBFT node.
+
+Its implementation is located in the `p2p/pex` package, and it is considered
+part of the implementation of the p2p layer.
+
+This document overviews the implementation of the PEX reactor, describing how
+the methods from the `Reactor` interface are implemented.
+
+The actual operation of the PEX reactor is presented in documents describing
+the roles played by the PEX reactor in the p2p layer:
+
+- [Address Book](./addressbook.md): stores known peer addresses and information
+ about peers to which the node is connected or has attempted to connect
+- [Peer Manager](./peer_manager.md): manages connections established with peers,
+ defining when a node should dial peers and which peers it should dial
+- [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer
+ addresses, thus implementing a peer discovery service
+
+## OnStart
+
+The `OnStart` method implements `BaseService` and starts the PEX reactor.
+
+The [address book](./addressbook.md), which is a `Service` is started.
+This loads the address book content from disk,
+and starts a routine that periodically persists the address book content to disk.
+
+The PEX reactor is configured with the addresses of a number of seed nodes,
+the `Seeds` parameter of the `ReactorConfig`.
+The addresses of seed nodes are parsed into `NetAddress` instances and resolved
+into IP addresses, which is implemented by the `checkSeeds` method.
+Valid seed node addresses are stored in the `seedAddrs` field,
+and are used by the `dialSeeds` method to contact the configured seed nodes.
+
+The last action is to start one of the following persistent routines, based on
+the `SeedMode` configuration parameter:
+
+- Regular nodes run the `ensurePeersRoutine` to check whether the node has
+ enough outbound peers, dialing peers when necessary
+- Seed nodes run the `crawlPeersRoutine` to periodically start a new round
+ of [crawling](./pex-protocol.md#Crawling-peers) to discover as many peer
+ addresses as possible
+
+### Errors
+
+Errors encountered when loading the address book from disk are returned,
+and prevent the reactor from being started.
+An exception is made for the `service.ErrAlreadyStarted` error, which is ignored.
+
+Errors encountered when parsing the configured addresses of seed nodes
+are returned and cause the reactor startup to fail.
+An exception is made for DNS resolution `ErrNetAddressLookup` errors,
+which are not deemed fatal and are only logged as invalid addresses.
+
+If none of the configured seed node addresses is valid, and the loaded address
+book is empty, the reactor is not started and an error is returned.
+
+## OnStop
+
+The `OnStop` method implements `BaseService` and stops the PEX reactor.
+
+The address book routine that periodically saves its content to disk is stopped.
+
+## GetChannels
+
+The `GetChannels` method, from the `Reactor` interface, returns the descriptor
+of the channel used by the PEX protocol.
+
+The channel ID is `PexChannel` (0), with priority `1`, send queue capacity of
+`10`, and maximum message size of `64000` bytes.
+
+## AddPeer
+
+The `AddPeer` method, from the `Reactor` interface,
+adds a new peer to the PEX protocol.
+
+If the new peer is an **inbound peer**, i.e., if the peer has dialed the node,
+the peer's address is [added to the address book](./addressbook.md#adding-addresses).
+Since the peer was authenticated when establishing a secret connection with it,
+the source of the peer address is trusted, and its source is set by the peer itself.
+In the case of an outbound peer, the node should already have its address in
+the address book, as the switch has dialed the peer.
+
+If the peer is an **outbound peer**, i.e., if the node has dialed the peer,
+and the PEX protocol needs more addresses,
+the node [sends a PEX request](./pex-protocol.md#Requesting-Addresses) to the peer.
+The same is not done when inbound peers are added because they are deemed least
+trustworthy than outbound peers.
+
+## RemovePeer
+
+The `RemovePeer` method, from the `Reactor` interface,
+removes a peer from the PEX protocol.
+
+The peer's ID is removed from the tables tracking PEX requests
+[sent](./pex-protocol.md#misbehavior) but not yet replied
+and PEX requests [received](./pex-protocol.md#misbehavior-1).
+
+## Receive
+
+The `Receive` method, from the `Reactor` interface,
+handles a message received by the PEX protocol.
+
+A node receives two type of messages as part of the PEX protocol:
+
+- `PexRequest`: a request for addresses received from a peer, handled as
+ described [here](./pex-protocol.md#providing-addresses)
+- `PexAddrs`: a list of addresses received from a peer, as a reponse to a PEX
+ request sent by the node, as described [here](./pex-protocol.md#responses)
+
diff --git a/cometbft/v0.38/spec/p2p/implementation/switch.md b/cometbft/v0.38/spec/p2p/implementation/switch.md
new file mode 100644
index 00000000..4497fef9
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/switch.md
@@ -0,0 +1,238 @@
+# Switch
+
+The switch is a core component of the p2p layer.
+It manages the procedures for [dialing peers](#dialing-peers) and
+[accepting](#accepting-peers) connections from peers, which are actually
+implemented by the [transport](./transport.md).
+It also manages the reactors, i.e., protocols implemented by the node that
+interact with its peers.
+Once a connection with a peer is established, the peer is [added](#add-peer) to
+the switch and all registered reactors.
+Reactors may also instruct the switch to [stop a peer](#stop-peer), namely
+disconnect from it.
+The switch, in this case, makes sure that the peer is removed from all
+registered reactors.
+
+## Dialing peers
+
+Dialing a peer is implemented by the `DialPeerWithAddress` method.
+
+This method is invoked by the [peer manager](./peer_manager.md#ensure-peers)
+to dial a peer address and establish a connection with an outbound peer.
+
+The switch keeps a single dialing routine per peer ID.
+This is ensured by keeping a synchronized map `dialing` with the IDs of peers
+to which the peer is dialing.
+A peer ID is added to `dialing` when the `DialPeerWithAddress` method is called
+for that peer, and it is removed when the method returns for whatever reason.
+The method returns immediately when invoked for a peer which ID is already in
+the `dialing` structure.
+
+The actual dialing is implemented by the [`Dial`](./transport.md#dial) method
+of the transport configured for the switch, in the `addOutboundPeerWithConfig`
+method.
+If the transport succeeds establishing a connection, the returned `Peer` is
+added to the switch using the [`addPeer`](#add-peer) method.
+This operation can fail, returning an error. In this case, the switch invokes
+the transport's [`Cleanup`](./transport.md#cleanup) method to clean any resources
+associated with the peer.
+
+If the transport fails to establish a connection with the peer that is configured
+as a persistent peer, the switch spawns a routine to [reconnect to the peer](#reconnect-to-peer).
+If the peer is already in the `reconnecting` state, the spawned routine has no
+effect and returns immediately.
+This is in fact a likely scenario, as the `reconnectToPeer` routine relies on
+this same `DialPeerWithAddress` method for dialing peers.
+
+### Manual operation
+
+The `DialPeersAsync` method receives a list of peer addresses (strings)
+and dials all of them in parallel.
+It is invoked in two situations:
+
+- In the [setup](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L987)
+of a node, to establish connections with every configured persistent peer
+- In the RPC package, to implement two unsafe RPC commands, not used in production:
+ [`DialSeeds`](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core/net.go#L47) and
+ [`DialPeers`](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core/net.go#L87)
+
+The received list of peer addresses to dial is parsed into `NetAddress` instances.
+In case of parsing errors, the method returns. An exception is made for
+DNS resolution `ErrNetAddressLookup` errors, which do not interrupt the procedure.
+
+As the peer addresses provided to this method are typically not known by the node,
+contrarily to the addressed dialed using the `DialPeerWithAddress` method,
+they are added to the node's address book, which is persisted to disk.
+
+The switch dials the provided peers in parallel.
+The list of peer addresses is randomly shuffled, and for each peer a routine is
+spawned.
+Each routine sleeps for a random interval, up to 3 seconds, then invokes the
+`DialPeerWithAddress` method that actually dials the peer.
+
+### Reconnect to peer
+
+The `reconnectToPeer` method is invoked when a connection attempt to a peer fails,
+and the peer is configured as a persistent peer.
+
+The `reconnecting` synchronized map keeps the peer's in this state, identified
+by their IDs (string).
+This should ensure that a single instance of this method is running at any time.
+The peer is kept in this map while this method is running for it: it is set on
+the beginning, and removed when the method returns for whatever reason.
+If the peer is already in the `reconnecting` state, nothing is done.
+
+The remaining of the method performs multiple connection attempts to the peer,
+via `DialPeerWithAddress` method.
+If a connection attempt succeeds, the methods returns and the routine finishes.
+The same applies when an `ErrCurrentlyDialingOrExistingAddress` error is
+returned by the dialing method, as it indicates that peer is already connected
+or that another routine is attempting to (re)connect to it.
+
+A first set of connection attempts is done at (about) regular intervals.
+More precisely, between two attempts, the switch waits for a interval of
+`reconnectInterval`, hard-coded to 5 seconds, plus a random jitter up to
+`dialRandomizerIntervalMilliseconds`, hard-coded to 3 seconds.
+At most `reconnectAttempts`, hard-coded to 20, are made using this
+regular-interval approach.
+
+A second set of connection attempts is done with exponentially increasing
+intervals.
+The base interval `reconnectBackOffBaseSeconds` is hard-coded to 3 seconds,
+which is also the increasing factor.
+The exponentially increasing dialing interval is adjusted as well by a random
+jitter up to `dialRandomizerIntervalMilliseconds`.
+At most `reconnectBackOffAttempts`, hard-coded to 10, are made using this approach.
+
+> Note: the first sleep interval, to which a random jitter is applied, is 1,
+> not `reconnectBackOffBaseSeconds`, as the first exponent is `0`...
+
+## Accepting peers
+
+The `acceptRoutine` method is a persistent routine that handles connections
+accepted by the transport configured for the switch.
+
+The [`Accept`](./transport.md#accept) method of the configured transport
+returns a `Peer` with which an inbound connection was established.
+The switch accepts a new peer if the maximum number of inbound peers was not
+reached, or if the peer was configured as an _unconditional peer_.
+The maximum number of inbound peers is determined by the `MaxNumInboundPeers`
+configuration parameter, whose default value is `40`.
+
+If accepted, the peer is added to the switch using the [`addPeer`](#add-peer) method.
+If the switch does not accept the established incoming connection, or if the
+`addPeer` method returns an error, the switch invokes the transport's
+[`Cleanup`](./transport.md#cleanup) method to clean any resources associated
+with the peer.
+
+The transport's `Accept` method can also return a number of errors.
+Errors of `ErrRejected` or `ErrFilterTimeout` types are ignored,
+an `ErrTransportClosed` causes the accepted routine to be interrupted,
+while other errors cause the routine to panic.
+
+> TODO: which errors can cause the routine to panic?
+
+## Add peer
+
+The `addPeer` method adds a peer to the switch,
+either after dialing (by `addOutboundPeerWithConfig`, called by `DialPeerWithAddress`)
+a peer and establishing an outbound connection,
+or after accepting (`acceptRoutine`) a peer and establishing an inbound connection.
+
+The first step is to invoke the `filterPeer` method.
+It checks whether the peer is already in the set of connected peers,
+and whether any of the configured `peerFilter` methods reject the peer.
+If the peer is already present or it is rejected by any filter, the `addPeer`
+method fails and returns an error.
+
+Then, the new peer is started, added to the set of connected peers, and added
+to all reactors.
+More precisely, first the new peer's information is first provided to every
+reactor (`InitPeer` method).
+Next, the peer's sending and receiving routines are started, and the peer is
+added to set of connected peers.
+These two operations can fail, causing `addPeer` to return an error.
+Then, in the absence of previous errors, the peer is added to every reactor (`AddPeer` method).
+
+> Adding the peer to the peer set returns a `ErrSwitchDuplicatePeerID` error
+> when a peer with the same ID is already presented.
+>
+> TODO: Starting a peer could be reduced as starting the MConn with that peer?
+
+## Stop peer
+
+There are two methods for stopping a peer, namely disconnecting from it, and
+removing it from the table of connected peers.
+
+The `StopPeerForError` method is invoked to stop a peer due to an external
+error, which is provided to method as a generic "reason".
+
+The `StopPeerGracefully` method stops a peer in the absence of errors or, more
+precisely, not providing to the switch any "reason" for that.
+
+In both cases the `Peer` instance is stopped, the peer is removed from all
+registered reactors, and finally from the list of connected peers.
+
+> Issue is mentioned in
+> the internal `stopAndRemovePeer` method explaining why removing the peer from
+> the list of connected peers is the last action taken.
+
+When there is a "reason" for stopping the peer (`StopPeerForError` method)
+and the peer is a persistent peer, the method creates a routine to attempt
+reconnecting to the peer address, using the `reconnectToPeer` method.
+If the peer is an outbound peer, the peer's address is know, since the switch
+has dialed the peer.
+Otherwise, the peer address is retrieved from the `NodeInfo` instance from the
+connection handshake.
+
+## Add reactor
+
+The `AddReactor` method registers a `Reactor` to the switch.
+
+The reactor is associated to the set of channel ids it employs.
+Two reactors (in the same node) cannot share the same channel id.
+
+There is a call back to the reactor, in which the switch passes itself to the
+reactor.
+
+## Remove reactor
+
+The `RemoveReactor` method unregisters a `Reactor` from the switch.
+
+The reactor is disassociated from the set of channel ids it employs.
+
+There is a call back to the reactor, in which the switch passes `nil` to the
+reactor.
+
+## OnStart
+
+This is a `BaseService` method.
+
+All registered reactors are started.
+
+The switch's `acceptRoutine` is started.
+
+## OnStop
+
+This is a `BaseService` method.
+
+All (connected) peers are stopped and removed from the peer's list using the
+`stopAndRemovePeer` method.
+
+All registered reactors are stopped.
+
+## Broadcast
+
+This method broadcasts a message on a channel, by sending the message in
+parallel to all connected peers.
+
+The method spawns a thread for each connected peer, invoking the `Send` method
+provided by each `Peer` instance with the provided message and channel ID.
+The return value (a boolean) of these calls are redirected to a channel that is
+returned by the method.
+
+> TODO: detail where this method is invoked:
+>
+> - By the consensus protocol, in `broadcastNewRoundStepMessage`,
+> `broadcastNewValidBlockMessage`, and `broadcastHasVoteMessage`
+> - By the state sync protocol
diff --git a/cometbft/v0.38/spec/p2p/implementation/transport.md b/cometbft/v0.38/spec/p2p/implementation/transport.md
new file mode 100644
index 00000000..20d4db87
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/transport.md
@@ -0,0 +1,222 @@
+# Transport
+
+The transport establishes secure and authenticated connections with peers.
+
+The transport [`Dial`](#dial)s peer addresses to establish outbound connections,
+and [`Listen`](#listen)s in a configured network address
+to [`Accept`](#accept) inbound connections from peers.
+
+The transport establishes raw TCP connections with peers
+and [upgrade](#connection-upgrade) them into authenticated secret connections.
+The established secret connection is then wrapped into `Peer` instance, which
+is returned to the caller, typically the [switch](./switch.md).
+
+## Dial
+
+The `Dial` method is used by the switch to establish an outbound connection with a peer.
+It is a synchronous method, which blocks until a connection is established or an error occurs.
+The method returns an outbound `Peer` instance wrapping the established connection.
+
+The transport first dials the provided peer's address to establish a raw TCP connection.
+The dialing maximum duration is determined by `dialTimeout`, hard-coded to 1 second.
+The established raw connection is then submitted to a set of [filters](#connection-filtering),
+which can reject it.
+If the connection is not rejected, it is recorded in the table of established connections.
+
+The established raw TCP connection is then [upgraded](#connection-upgrade) into
+an authenticated secret connection.
+This procedure should ensure, in particular, that the public key of the remote peer
+matches the ID of the dialed peer, which is part of peer address provided to this method.
+In the absence of errors,
+the established secret connection (`conn.SecretConnection` type)
+and the information about the peer (`NodeInfo` record) retrieved and verified
+during the version handshake,
+are wrapped into an outbound `Peer` instance and returned to the switch.
+
+## Listen
+
+The `Listen` method produces a TCP listener instance for the provided network
+address, and spawns an `acceptPeers` routine to handle the raw connections
+accepted by the listener.
+The `NetAddress` method exports the listen address configured for the transport.
+
+The maximum number of simultaneous incoming connections accepted by the listener
+is bound to `MaxNumInboundPeer` plus the configured number of unconditional peers,
+using the `MultiplexTransportMaxIncomingConnections` option,
+in the node [initialization](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L563).
+
+This method is called when a node is [started](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L974).
+In case of errors, the `acceptPeers` routine is not started and the error is returned.
+
+## Accept
+
+The `Accept` method returns to the switch inbound connections established with a peer.
+It is a synchronous method, which blocks until a connection is accepted or an error occurs.
+The method returns an inbound `Peer` instance wrapping the established connection.
+
+The transport handles incoming connections in the `acceptPeers` persistent routine.
+This routine is started by the [`Listen`](#listen) method
+and accepts raw connections from a TCP listener.
+A new routine is spawned for each accepted connection.
+The raw connection is submitted to a set of [filters](#connection-filtering),
+which can reject it.
+If the connection is not rejected, it is recorded in the table of established connections.
+
+The established raw TCP connection is then [upgraded](#connection-upgrade) into
+an authenticated secret connection.
+The established secret connection (`conn.SecretConnection` type),
+the information about the peer (`NodeInfo` record) retrieved and verified
+during the version handshake,
+as well any error returned in this process are added to a queue of accepted connections.
+This queue is consumed by the `Accept` method.
+
+> Handling accepted connection asynchronously was introduced due to this issue:
+>
+
+## Connection Filtering
+
+The `filterConn` method is invoked for every new raw connection established by the transport.
+Its main goal is avoid the transport to maintain duplicated connections with the same peer.
+It also runs a set of configured connection filters.
+
+The transports keeps a table `conns` of established connections.
+The table maps the remote address returned by a generic connection to a list of
+IP addresses, to which the connection remote address is resolved.
+If the remote address of the new connection is already present in the table,
+the connection is rejected.
+Otherwise, the connection's remote address is resolved into a list of IPs,
+which are recorded in the established connections table.
+
+The connection and the resolved IPs are then passed through a set of connection filters,
+configured via the `MultiplexTransportConnFilters` transport option.
+The maximum duration for the filters execution, which is performed in parallel,
+is determined by `filterTimeout`.
+Its default value is 5 seconds,
+which can be changed using the `MultiplexTransportFilterTimeout` transport option.
+
+If the connection and the resolved remote addresses are not filtered out,
+the transport registers them into the `conns` table and returns.
+
+In case of errors, the connection is removed from the table of established
+connections and closed.
+
+### Errors
+
+If the address of the new connection is already present in the `conns` table,
+an `ErrRejected` error with the `isDuplicate` reason is returned.
+
+If the IP resolution of the connection's remote address fails,
+an `AddrError` or `DNSError` error is returned.
+
+If any of the filters reject the connection,
+an `ErrRejected` error with the `isRejected` reason is returned.
+
+If the filters execution times out,
+an `ErrFilterTimeout` error is returned.
+
+## Connection Upgrade
+
+The `upgrade` method is invoked for every new raw connection established by the
+transport that was not [filtered out](#connection-filtering).
+It upgrades an established raw TCP connection into a secret authenticated
+connection, and validates the information provided by the peer.
+
+This is a complex procedure, that can be summarized by the following three
+message exchanges between the node and the new peer:
+
+1. Encryption: the nodes produce ephemeral key pairs and exchange ephemeral
+ public keys, from which are derived: (i) a pair of secret keys used to
+ encrypt the data exchanged between the nodes, and (ii) a challenge message.
+1. Authentication: the nodes exchange their persistent public keys and a
+ signature of the challenge message produced with the their persistent
+ private keys. This allows validating the peer's persistent public key,
+ which plays the role of node ID.
+1. Version handshake: nodes exchange and validate each other `NodeInfo` records.
+ This records contain, among other fields, their node IDs, the network/chain
+ ID they are part of, and the list of supported channel IDs.
+
+Steps (1) and (2) are implemented in the `conn` package.
+In case of success, they produce the secret connection that is actually used by
+the node to communicate with the peer.
+An overview of this procedure, which implements the station-to-station (STS)
+[protocol][sts-paper] ([PDF][sts-paper-pdf]), can be found [here][peer-sts].
+The maximum duration for establishing a secret connection with the peer is
+defined by `handshakeTimeout`, hard-coded to 3 seconds.
+
+The established secret connection stores the persistent public key of the peer,
+which has been validated via the challenge authentication of step (2).
+If the connection being upgraded is an outbound connection, i.e., if the node has
+dialed the peer, the dialed peer's ID is compared to the peer's persistent public key:
+if they do not match, the connection is rejected.
+This verification is not performed in the case of inbound (accepted) connections,
+as the node does not know a priori the remote node's ID.
+
+Step (3), the version handshake, is performed by the transport.
+Its maximum duration is also defined by `handshakeTimeout`, hard-coded to 3 seconds.
+The version handshake retrieves the `NodeInfo` record of the new peer,
+which can be rejected for multiple reasons, listed [here][peer-handshake].
+
+If the connection upgrade succeeds, the method returns the established secret
+connection, an instance of `conn.SecretConnection` type,
+and the `NodeInfo` record of the peer.
+
+In case of errors, the connection is removed from the table of established
+connections and closed.
+
+### Errors
+
+The timeouts for steps (1) and (2), and for step (3), are configured as the
+deadline for operations on the TCP connection that is being upgraded.
+If this deadline it is reached, the connection produces an
+`os.ErrDeadlineExceeded` error, returned by the corresponding step.
+
+Any error produced when establishing a secret connection with the peer (steps 1 and 2) or
+during the version handshake (step 3), including timeouts,
+is encapsulated into an `ErrRejected` error with reason `isAuthFailure` and returned.
+
+If the upgraded connection is an outbound connection, and the peer ID learned in step (2)
+does not match the dialed peer's ID,
+an `ErrRejected` error with reason `isAuthFailure` is returned.
+
+If the peer's `NodeInfo` record, retrieved in step (3), is invalid,
+or if reports a node ID that does not match peer ID learned in step (2),
+an `ErrRejected` error with reason `isAuthFailure` is returned.
+If it reports a node ID equals to the local node ID,
+an `ErrRejected` error with reason `isSelf` is returned.
+If it is not compatible with the local `NodeInfo`,
+an `ErrRejected` error with reason `isIncompatible` is returned.
+
+## Close
+
+The `Close` method closes the TCP listener created by the `Listen` method,
+and sends a signal for interrupting the `acceptPeers` routine.
+
+This method is called when a node is [stopped](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L1023).
+
+## Cleanup
+
+The `Cleanup` method receives a `Peer` instance,
+and removes the connection established with a peer from the table of established connections.
+It also invokes the `Peer` interface method to close the connection associated with a peer.
+
+It is invoked when the connection with a peer is closed.
+
+## Supported channels
+
+The `AddChannel` method registers a channel in the transport.
+
+The channel ID is added to the list of supported channel IDs,
+stored in the local `NodeInfo` record.
+
+The `NodeInfo` record is exchanged with peers in the version handshake.
+For this reason, this method is not invoked with a started transport.
+
+> The only call to this method is performed in the `CustomReactors` constructor
+> option of a node, i.e., before the node is started.
+> Note that the default list of supported channel IDs, including the default reactors,
+> is provided to the transport as its original `NodeInfo` record.
+
+[peer-sts]: ../legacy-docs/peer.md#authenticated-encryption-handshake
+[peer-handshake]: ../legacy-docs/peer.md#cometbft-version-handshake
+[sts-paper]: https://link.springer.com/article/10.1007/BF00124891
+[sts-paper-pdf]: https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf
diff --git a/cometbft/v0.38/spec/p2p/implementation/types.md b/cometbft/v0.38/spec/p2p/implementation/types.md
new file mode 100644
index 00000000..cef26329
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/implementation/types.md
@@ -0,0 +1,233 @@
+# Types adopted in the p2p implementation
+
+This document lists the packages and source files, excluding test units, that
+implement the p2p layer, and summarizes the main types they implement.
+Types play the role of classes in Go.
+
+The reference version for this documentation is the branch
+[`v0.34.x`](https://github.com/cometbft/cometbft/tree/v0.34.x/p2p).
+
+State of August 2022.
+
+## Package `p2p`
+
+Implementation of the p2p layer of CometBFT.
+
+### `base_reactor.go`
+
+`Reactor` interface.
+
+`BaseReactor` implements `Reactor`.
+
+**Not documented yet**.
+
+### `conn_set.go`
+
+`ConnSet` interface, a "lookup table for connections and their ips".
+
+Internal type `connSet` implements the `ConnSet` interface.
+
+Used by the [transport](#transportgo) to store connected peers.
+
+### `errors.go`
+
+Defines several error types.
+
+`ErrRejected` enumerates a number of reason for which a peer was rejected.
+Mainly produced by the [transport](#transportgo),
+but also by the [switch](#switchgo).
+
+`ErrSwitchDuplicatePeerID` is produced by the `PeerSet` used by the [switch](#switchgo).
+
+`ErrSwitchConnectToSelf` is handled by the [switch](#switchgo),
+but currently is not produced outside tests.
+
+`ErrSwitchAuthenticationFailure` is handled by the [PEX reactor](#pex_reactorgo),
+but currently is not produced outside tests.
+
+`ErrTransportClosed` is produced by the [transport](#transportgo)
+and handled by the [switch](#switchgo).
+
+`ErrNetAddressNoID`, `ErrNetAddressInvalid`, and `ErrNetAddressLookup`
+are parsing a string to create an instance of `NetAddress`.
+It can be returned in the setup of the [switch](#switchgo)
+and of the [PEX reactor](#pex_reactorgo),
+as well when the [transport](#transportgo) validates a `NodeInfo`, as part of
+the connection handshake.
+
+`ErrCurrentlyDialingOrExistingAddress` is produced by the [switch](#switchgo),
+and handled by the switch and the [PEX reactor](#pex_reactorgo).
+
+### `fuzz.go`
+
+For testing purposes.
+
+`FuzzedConnection` wraps a `net.Conn` and injects random delays.
+
+### `key.go`
+
+`NodeKey` is the persistent key of a node, namely its private key.
+
+The `ID` of a node is a string representing the node's public key.
+
+### `metrics.go`
+
+Prometheus `Metrics` exposed by the p2p layer.
+
+### `netaddress.go`
+
+Type `NetAddress` contains the `ID` and the network address (IP and port) of a node.
+
+The API of the [address book](#addrbookgo) receives and returns `NetAddress` instances.
+
+This source file was adapted from [`btcd`](https://github.com/btcsuite/btcd),
+a Go implementation of Bitcoin.
+
+### `node_info.go`
+
+Interface `NodeInfo` stores the basic information about a node exchanged with a
+peer during the handshake.
+
+It is implemented by `DefaultNodeInfo` type.
+
+The [switch](#switchgo) stores the local `NodeInfo`.
+
+The `NodeInfo` of connected peers is produced by the
+[transport](#transportgo) during the handshake, and stored in [`Peer`](#peergo) instances.
+
+### `peer.go`
+
+Interface `Peer` represents a connected peer.
+
+It is implemented by the internal `peer` type.
+
+The [transport](#transportgo) API methods return `Peer` instances,
+wrapping established secure connection with peers.
+
+The [switch](#switchgo) API methods receive `Peer` instances.
+The switch stores connected peers in a `PeerSet`.
+
+The [`Reactor`](#base_reactorgo) methods, invoked by the switch, receive `Peer` instances.
+
+### `peer_set.go`
+
+Interface `IPeerSet` offers methods to access a table of [`Peer`](#peergo) instances.
+
+Type `PeerSet` implements a thread-safe table of [`Peer`](#peergo) instances,
+used by the [switch](#switchgo).
+
+The switch provides limited access to this table by returing a `IPeerSet`
+instance, used by the [PEX reactor](#pex_reactorgo).
+
+### `switch.go`
+
+Documented in [switch](./switch.md).
+
+The `Switch` implements the [peer manager](./peer_manager.md) role for inbound peers.
+
+[`Reactor`](#base_reactorgo)s have access to the `Switch` and may invoke its methods.
+This includes the [PEX reactor](#pex_reactorgo).
+
+### `transport.go`
+
+Documented in [transport](./transport.md).
+
+The `Transport` interface is implemented by `MultiplexTransport`.
+
+The [switch](#switchgo) contains a `Transport` and uses it to establish
+connections with peers.
+
+### `types.go`
+
+Aliases for p2p's `conn` package types.
+
+## Package `p2p.conn`
+
+Implements the connection between CometBFT nodes,
+which is encrypted, authenticated, and multiplexed.
+
+### `connection.go`
+
+Implements the `MConnection` type and the `Channel` abstraction.
+
+A `MConnection` multiplexes a generic network connection (`net.Conn`) into
+multiple independent `Channel`s, used by different [`Reactor`](#base_reactorgo)s.
+
+A [`Peer`](#peergo) stores the `MConnection` instance used to interact with a
+peer, which multiplex a [`SecretConnection`](#secret_connectiongo).
+
+### `conn_go110.go`
+
+Support for go 1.10.
+
+### `secret_connection.go`
+
+Implements the `SecretConnection` type, which is an encrypted authenticated
+connection built atop a raw network (TCP) connection.
+
+A [`Peer`](#peergo) stores the `SecretConnection` established by the transport,
+which is the underlying connection multiplexed by [`MConnection`](#connectiongo).
+
+As briefly documented in the [transport](./transport.md#Connection-Upgrade),
+a `SecretConnection` implements the Station-To-Station (STS) protocol.
+
+The `SecretConnection` type implements the `net.Conn` interface,
+which is a generic network connection.
+
+## Package `p2p.mock`
+
+Mock implementations of [`Peer`](#peergo) and [`Reactor`](#base_reactorgo) interfaces.
+
+## Package `p2p.mocks`
+
+Code generated by `mockery`.
+
+## Package `p2p.pex`
+
+Implementation of the [PEX reactor](./pex.md).
+
+### `addrbook.go`
+
+Documented in [address book](./addressbook.md).
+
+This source file was adapted from [`btcd`](https://github.com/btcsuite/btcd),
+a Go implementation of Bitcoin.
+
+### `errors.go`
+
+A number of errors produced and handled by the [address book](#addrbookgo).
+
+`ErrAddrBookNilAddr` is produced by the address book, but handled (logged) by
+the [PEX reactor](#pex_reactorgo).
+
+`ErrUnsolicitedList` is produced and handled by the [PEX protocol](#pex_reactorgo).
+
+### `file.go`
+
+Implements the [address book](#addrbookgo) persistence.
+
+### `known_address.go`
+
+Type `knownAddress` represents an address stored in the [address book](#addrbookgo).
+
+### `params.go`
+
+Constants used by the [address book](#addrbookgo).
+
+### `pex_reactor.go`
+
+Implementation of the [PEX reactor](./pex.md), which is a [`Reactor`](#base_reactorgo).
+
+This includes the implementation of the [PEX protocol](./pex-protocol.md)
+and of the [peer manager](./peer_manager.md) role for outbound peers.
+
+The PEX reactor also manages an [address book](#addrbookgo) instance.
+
+## Package `p2p.trust`
+
+Go documentation of `Metric` type:
+
+> // Metric - keeps track of peer reliability
+> // See cometbft/docs/architecture/adr-006-trust-metric.md for details
+
+Not imported by any other CometBFT source file.
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/Overview.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/Overview.mdx
new file mode 100644
index 00000000..e2895b5e
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/Overview.mdx
@@ -0,0 +1,16 @@
+---
+order: 1
+title: Legacy Docs
+---
+
+# Legacy Docs
+
+This section contains useful information. However, part of this content is redundant, being more comprehensively covered
+in more recent documents, and some implementation details might be outdated
+(see issue [#981](https://github.com/cometbft/cometbft/issues/981)).
+
+- [Messages](/cometbft/v0.38/spec/p2p/legacy-docs/messages/Overview)
+- [P2P Config](/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Config)
+- [P2P Multiplex Connection](/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Multiplex-Connection)
+- [Peer Discovery](/cometbft/v0.38/spec/p2p/legacy-docs/Peer-Discovery)
+- [Peers](/cometbft/v0.38/spec/p2p/legacy-docs/Peers)
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Config.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Config.mdx
new file mode 100644
index 00000000..34383e62
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Config.mdx
@@ -0,0 +1,53 @@
+---
+order: 1
+---
+
+# P2P Config
+
+Here we describe configuration options around the Peer Exchange.
+These can be set using flags or via the `$CMTHOME/config/config.toml` file.
+
+## Seed Mode
+
+`--p2p.seed_mode`
+
+The node operates in seed mode. In seed mode, a node continuously crawls the network for peers,
+and upon incoming connection shares some peers and disconnects.
+
+## Seeds
+
+`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”`
+
+Dials these seeds when we need more peers. They should return a list of peers and then disconnect.
+If we already have enough peers in the address book, we may never need to dial them.
+
+## Persistent Peers
+
+`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”`
+
+Dial these peers and auto-redial them if the connection fails.
+These are intended to be trusted persistent peers that can help
+anchor us in the p2p network. The auto-redial uses exponential
+backoff and will give up after a day of trying to connect.
+
+But If `persistent_peers_max_dial_period` is set greater than zero,
+pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period`
+during exponential backoff and we keep trying again without giving up
+
+**Note:** If `seeds` and `persistent_peers` intersect,
+the user will be warned that seeds may auto-close connections
+and that the node may not be able to keep the connection persistent.
+
+## Private Peers
+
+`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”`
+
+These are IDs of the peers that we do not add to the address book or gossip to
+other peers. They stay private to us.
+
+## Unconditional Peers
+
+`--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”`
+
+These are IDs of the peers which are allowed to be connected by both inbound or outbound regardless of
+`max_num_inbound_peers` or `max_num_outbound_peers` of user's node reached or not.
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Multiplex-Connection.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Multiplex-Connection.mdx
new file mode 100644
index 00000000..eb255a44
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Multiplex-Connection.mdx
@@ -0,0 +1,115 @@
+---
+order: 1
+---
+
+# P2P Multiplex Connection
+
+## MConnection
+
+`MConnection` is a multiplex connection that supports multiple independent streams
+with distinct quality of service guarantees atop a single TCP connection.
+Each stream is known as a `Channel` and each `Channel` has a globally unique _byte id_.
+Each `Channel` also has a relative priority that determines the quality of service
+of the `Channel` compared to other `Channel`s.
+The _byte id_ and the relative priorities of each `Channel` are configured upon
+initialization of the connection.
+
+The `MConnection` supports three packet types:
+
+- Ping
+- Pong
+- Msg
+
+### Ping and Pong
+
+The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively.
+
+When we haven't received any messages on an `MConnection` in time `pingTimeout`, we send a ping message.
+When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages
+to send and the peer has not sent us too many pings (TODO).
+
+If a pong or message is not received in sufficient time after a ping, the peer is disconnected from.
+
+### Msg
+
+Messages in channels are chopped into smaller `msgPacket`s for multiplexing.
+
+```go
+type msgPacket struct {
+ ChannelID byte
+ EOF byte // 1 means message ends here.
+ Bytes []byte
+}
+```
+
+The `msgPacket` is serialized using [Proto3](https://developers.google.com/protocol-buffers/docs/proto3).
+The received `Bytes` of a sequential set of packets are appended together
+until a packet with `EOF=1` is received, then the complete serialized message
+is returned for processing by the `onReceive` function of the corresponding channel.
+
+### Multiplexing
+
+Messages are sent from a single `sendRoutine`, which loops over a select statement and results in the sending
+of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels.
+Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time.
+Messages are chosen for a batch one at a time from the channel with the lowest ratio of recently sent bytes to channel priority.
+
+## Sending Messages
+
+There are two methods for sending messages:
+
+```go
+func (m MConnection) Send(chID byte, msg interface{}) bool {}
+func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
+```
+
+`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued
+for the channel with the given id byte `chID`. The message `msg` is serialized
+using protobuf marshalling.
+
+`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel
+with the given id byte chID if the queue is not full; otherwise it returns false immediately.
+
+`Send()` and `TrySend()` are also exposed for each `Peer`.
+
+## Peer
+
+Each peer has one `MConnection` instance, and includes other information such as whether the connection
+was outbound, whether the connection should be recreated if it closes, various identity information about the node,
+and other higher level thread-safe data used by the reactors.
+
+## Switch/Reactor
+
+The `Switch` handles peer connections and exposes an API to receive incoming messages
+on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one
+or more `Channels`. So while sending outgoing messages is typically performed on the peer,
+incoming messages are received on the reactor.
+
+```go
+// Declare a MyReactor reactor that handles messages on MyChannelID.
+type MyReactor struct{}
+
+func (reactor MyReactor) GetChannels() []*ChannelDescriptor {
+ return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}}
+}
+
+func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) {
+ r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error)
+ msgString := ReadString(r, n, err)
+ fmt.Println(msgString)
+}
+
+// Other Reactor methods omitted for brevity
+...
+
+switch := NewSwitch([]Reactor{MyReactor{}})
+
+...
+
+// Send a random message to all outbound connections
+for _, peer := range switch.Peers().List() {
+ if peer.IsOutbound() {
+ peer.Send(MyChannelID, "Here's a random message")
+ }
+}
+```
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/Peer-Discovery.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/Peer-Discovery.mdx
new file mode 100644
index 00000000..492fb56b
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/Peer-Discovery.mdx
@@ -0,0 +1,69 @@
+---
+order: 1
+---
+
+# Peer Discovery
+
+A CometBFT P2P network has different kinds of nodes with different requirements for connectivity to one another.
+This document describes what kind of nodes CometBFT should enable and how they should work.
+
+## Seeds
+
+Seeds are the first point of contact for a new node.
+They return a list of known active peers and then disconnect.
+
+Seeds should operate full nodes with the PEX reactor in a "crawler" mode
+that continuously explores to validate the availability of peers.
+
+Seeds should only respond with some top percentile of the best peers it knows about.
+
+## New Full Node
+
+A new node needs a few things to connect to the network:
+
+- a list of seeds, which can be provided to CometBFT via config file or flags,
+ or hardcoded into the software by in-process apps
+- a `ChainID`, also called `Network` at the p2p layer
+- a recent block height, H, and hash, HASH for the blockchain.
+
+The values `H` and `HASH` must be received and corroborated by means external to CometBFT, and specific to the user - ie. via the user's trusted social consensus.
+This requirement to validate `H` and `HASH` out-of-band and via social consensus
+is the essential difference in security models between Proof-of-Work and Proof-of-Stake blockchains.
+
+With the above, the node then queries some seeds for peers for its chain,
+dials those peers, and runs the CometBFT protocols with those it successfully connects to.
+
+When the peer catches up to height H, it ensures the block hash matches HASH.
+If not, CometBFT will exit, and the user must try again - either they are connected
+to bad peers or their social consensus is invalid.
+
+## Restarted Full Node
+
+A node checks its address book on startup and attempts to connect to peers from there.
+If it can't connect to any peers after some time, it falls back to the seeds to find more.
+
+Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up
+to the latest state of the blockchain from wherever they were last.
+In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length
+of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again
+so they know they have synced the correct chain.
+
+## Validator Node
+
+A validator node is a node that interfaces with a validator signing key.
+These nodes require the highest security, and should not accept incoming connections.
+They should maintain outgoing connections to a controlled set of "Sentry Nodes" that serve
+as their proxy shield to the rest of the network.
+
+Validators that know and trust each other can accept incoming connections from one another and maintain direct private connectivity via VPN.
+
+## Sentry Node
+
+Sentry nodes are guardians of a validator node and provide it access to the rest of the network.
+They should be well connected to other full nodes on the network.
+Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other.
+They should always expect to have direct incoming connections from the validator node and its backup(s).
+They do not report the validator node's address in the PEX and
+they may be more strict about the quality of peers they keep.
+
+Sentry nodes belonging to validators that trust each other may wish to maintain persistent connections via VPN with one another, but only report each other sparingly in the PEX.
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/Peers.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/Peers.mdx
new file mode 100644
index 00000000..69217a62
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/Peers.mdx
@@ -0,0 +1,132 @@
+---
+order: 1
+---
+
+# Peers
+
+This document explains how CometBFT Peers are identified and how they connect to one another.
+
+## Peer Identity
+
+CometBFT peers are expected to maintain long-term persistent identities in the form of a public key.
+Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in `crypto` package.
+
+A single peer ID can have multiple IP addresses associated with it, but a node
+will only ever connect to one at a time.
+
+When attempting to connect to a peer, we use the PeerURL: `@:`.
+We will attempt to connect to the peer at IP:PORT, and verify,
+via authenticated encryption, that it is in possession of the private key
+corresponding to ``. This prevents man-in-the-middle attacks on the peer layer.
+
+## Connections
+
+All p2p connections use TCP.
+Upon establishing a successful TCP connection with a peer,
+two handshakes are performed: one for authenticated encryption, and one for CometBFT versioning.
+Both handshakes have configurable timeouts (they should complete quickly).
+
+### Authenticated Encryption Handshake
+
+CometBFT implements the Station-to-Station protocol
+using X25519 keys for Diffie-Helman key-exchange and chacha20poly1305 for encryption.
+
+Previous versions of this protocol (0.32 and below) suffered from malleability attacks whereas an active man
+in the middle attacker could compromise confidentiality as described in [Prime, Order Please!
+Revisiting Small Subgroup and Invalid Curve Attacks on
+Protocols using Diffie-Hellman](https://eprint.iacr.org/2019/526.pdf).
+
+We have added dependency on the Merlin a keccak based transcript hashing protocol to ensure non-malleability.
+
+It goes as follows:
+
+- generate an ephemeral X25519 keypair
+- send the ephemeral public key to the peer
+- wait to receive the peer's ephemeral public key
+- create a new Merlin Transcript with the string "TENDERMINT_SECRET_CONNECTION_TRANSCRIPT_HASH"
+- Sort the ephemeral keys and add the high labeled "EPHEMERAL_UPPER_PUBLIC_KEY" and the low keys labeled "EPHEMERAL_LOWER_PUBLIC_KEY" to the Merlin transcript.
+- compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key
+- add the DH secret to the transcript labeled DH_SECRET.
+- generate two keys to use for encryption (sending and receiving) and a challenge for authentication as follows:
+ - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as
+ `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN`
+ - get 64 bytes of output from hkdf-sha256
+ - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite.
+- use a separate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range
+- all communications from now on are encrypted in 1400 byte frames (plus encoding overhead),
+ using the respective secret and nonce. Each nonce is incremented by one after each use.
+- we now have an encrypted channel, but still need to authenticate
+- extract a 32 bytes challenge from merlin transcript with the label "SECRET_CONNECTION_MAC"
+- sign the common challenge obtained from the hkdf with our persistent private key
+- send the amino encoded persistent pubkey and signature to the peer
+- wait to receive the persistent public key and signature from the peer
+- verify the signature on the challenge using the peer's persistent public key
+
+If this is an outgoing connection (we dialed the peer) and we used a peer ID,
+then finally verify that the peer's persistent public key corresponds to the peer ID we dialed,
+ie. `peer.PubKey.Address() == `.
+
+The connection has now been authenticated. All traffic is encrypted.
+
+Note: only the dialer can authenticate the identity of the peer,
+but this is what we care about since when we join the network we wish to
+ensure we have reached the intended peer (and are not being MITMd).
+
+### Peer Filter
+
+Before continuing, we check if the new peer has the same ID as ourselves or
+an existing peer. If so, we disconnect.
+
+We also check the peer's address and public key against
+an optional whitelist which can be managed through the ABCI app -
+if the whitelist is enabled and the peer does not qualify, the connection is
+terminated.
+
+### CometBFT Version Handshake
+
+The CometBFT Version Handshake allows the peers to exchange their NodeInfo:
+
+```golang
+type NodeInfo struct {
+ Version p2p.Version
+ ID p2p.ID
+ ListenAddr string
+
+ Network string
+ SoftwareVersion string
+ Channels []int8
+
+ Moniker string
+ Other NodeInfoOther
+}
+
+type Version struct {
+ P2P uint64
+ Block uint64
+ App uint64
+}
+
+type NodeInfoOther struct {
+ TxIndex string
+ RPCAddress string
+}
+```
+
+The connection is disconnected if:
+
+- `peer.NodeInfo.ID` is not equal `peerConn.ID`
+- `peer.NodeInfo.Version.Block` does not match ours
+- `peer.NodeInfo.Network` is not the same as ours
+- `peer.Channels` does not intersect with our known Channels.
+- `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be
+ resolved
+
+At this point, if we have not disconnected, the peer is valid.
+It is added to the switch and hence all reactors via the `AddPeer` method.
+Note that each reactor may handle multiple channels.
+
+## Connection Activity
+
+Once a peer is added, incoming messages for a given reactor are handled through
+that reactor's `Receive` method, and output messages are sent directly by the Reactors
+on each peer. A typical reactor maintains per-peer go-routine(s) that handle this.
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/Overview.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/Overview.mdx
new file mode 100644
index 00000000..243e0bb6
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/Overview.mdx
@@ -0,0 +1,19 @@
+---
+order: 1
+parent:
+ title: Messages
+ order: 1
+---
+
+# Messages
+
+An implementation of the spec consists of many components. While many parts of these components are implementation specific, the p2p messages are not. In this section we will be covering all the p2p messages of components.
+
+There are two parts to the P2P messages, the message and the channel. The channel is message specific and messages are specific to components of CometBFT. When a node connect to a peer it will tell the other node which channels are available. This notifies the peer what services the connecting node offers. You can read more on channels in [connection.md](/cometbft/v0.38/spec/p2p/legacy-docs/P2P-Multiplex-Connection)
+
+- [Block Sync](/cometbft/v0.38/spec/p2p/legacy-docs/messages/block-sync)
+- [Mempool](/cometbft/v0.38/spec/p2p/legacy-docs/messages/mempool)
+- [Evidence](/cometbft/v0.38/spec/p2p/legacy-docs/messages/evidence)
+- [State Sync](/cometbft/v0.38/spec/p2p/legacy-docs/messages/state-sync)
+- [Pex](/cometbft/v0.38/spec/p2p/legacy-docs/messages/Peer-Exchange)
+- [Consensus](/cometbft/v0.38/spec/p2p/legacy-docs/messages/consensus)
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/Peer-Exchange.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/Peer-Exchange.mdx
new file mode 100644
index 00000000..cab26ac2
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/Peer-Exchange.mdx
@@ -0,0 +1,76 @@
+---
+order: 6
+---
+
+# Peer Exchange
+
+## Channels
+
+Pex has one channel. The channel identifier is listed below.
+
+| Name | Number |
+|------------|--------|
+| PexChannel | 0 |
+
+## Message Types
+
+The current PEX service has two versions. The first uses IP/port pair but since the p2p stack is moving towards a transport agnostic approach,
+node endpoints require a `Protocol` and `Path` hence the V2 version uses a [url](https://golang.org/pkg/net/url/#URL) instead.
+
+### PexRequest
+
+PexRequest is an empty message requesting a list of peers.
+
+> EmptyRequest
+
+### PexResponse
+
+PexResponse is an list of net addresses provided to a peer to dial.
+
+| Name | Type | Description | Field Number |
+|-------|------------------------------------|------------------------------------------|--------------|
+| addresses | repeated [PexAddress](#pexaddress) | List of peer addresses available to dial | 1 |
+
+### PexAddress
+
+PexAddress provides needed information for a node to dial a peer.
+
+| Name | Type | Description | Field Number |
+|------|--------|------------------|--------------|
+| id | string | NodeID of a peer | 1 |
+| ip | string | The IP of a node | 2 |
+| port | port | Port of a peer | 3 |
+
+
+### PexRequestV2
+
+PexRequest is an empty message requesting a list of peers.
+
+> EmptyRequest
+
+### PexResponseV2
+
+PexResponse is an list of net addresses provided to a peer to dial.
+
+| Name | Type | Description | Field Number |
+|-------|------------------------------------|------------------------------------------|--------------|
+| addresses | repeated [PexAddressV2](#pexresponsev2) | List of peer addresses available to dial | 1 |
+
+### PexAddressV2
+
+PexAddress provides needed information for a node to dial a peer.
+
+| Name | Type | Description | Field Number |
+|------|--------|------------------|--------------|
+| url | string | See [golang url](https://golang.org/pkg/net/url/#URL) | 1 |
+
+### Message
+
+Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The one of consists of two messages.
+
+| Name | Type | Description | Field Number |
+|--------------|---------------------------|------------------------------------------------------|--------------|
+| pex_request | [PexRequest](#pexrequest) | Empty request asking for a list of addresses to dial | 1 |
+| pex_response | [PexResponse](#pexresponse)| List of addresses to dial | 2 |
+| pex_request_v2| [PexRequestV2](#pexrequestv2)| Empty request asking for a list of addresses to dial| 3 |
+| pex_response_v2| [PexRespinseV2](#pexresponsev2)| List of addresses to dial | 4 |
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/block-sync.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/block-sync.mdx
new file mode 100644
index 00000000..49afcc41
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/block-sync.mdx
@@ -0,0 +1,70 @@
+---
+order: 2
+---
+
+# Block Sync
+
+## Channel
+
+Block sync has one channel.
+
+| Name | Number |
+|-------------------|--------|
+| BlocksyncChannel | 64 |
+
+## Message Types
+
+There are multiple message types for Block Sync
+
+### BlockRequest
+
+BlockRequest asks a peer for a block at the height specified.
+
+| Name | Type | Description | Field Number |
+|--------|-------|---------------------------|--------------|
+| Height | int64 | Height of requested block | 1 |
+
+### NoBlockResponse
+
+NoBlockResponse notifies the peer requesting a block that the node does not contain it.
+
+| Name | Type | Description | Field Number |
+|--------|-------|---------------------------|--------------|
+| Height | int64 | Height of requested block | 1 |
+
+### BlockResponse
+
+BlockResponse contains the block requested.
+It also contains an extended commit _iff_ vote extensions are enabled at the block's height.
+
+| Name | Type | Description | Field Number |
+|-----------|----------------------------------------------------------------|---------------------------------|--------------|
+| Block | [Block](../../../core/data_structures.md#block) | Requested Block | 1 |
+| ExtCommit | [ExtendedCommit](../../../core/data_structures.md#extendedcommit) | Sender's LastCommit information | 2 |
+
+### StatusRequest
+
+StatusRequest is an empty message that notifies the peer to respond with the highest and lowest blocks it has stored.
+
+> Empty message.
+
+### StatusResponse
+
+StatusResponse responds to a peer with the highest and lowest heights of any block it has in its blockstore.
+
+| Name | Type | Description | Field Number |
+|--------|-------|-------------------------------------------------------------------|--------------|
+| Height | int64 | Current Height of a node | 1 |
+| Base | int64 | First known block, if pruning is enabled it will be higher than 1 | 2 |
+
+### Message
+
+Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The `oneof` consists of five messages.
+
+| Name | Type | Description | Field Number |
+|-------------------|-------------------------------------|--------------------------------------------------------------|--------------|
+| block_request | [BlockRequest](#blockrequest) | Request a block from a peer | 1 |
+| no_block_response | [NoBlockResponse](#noblockresponse) | Response saying it doe snot have the requested block | 2 |
+| block_response | [BlockResponse](#blockresponse) | Response with requested block + (optionally) vote extensions | 3 |
+| status_request | [StatusRequest](#statusrequest) | Request the highest and lowest block numbers from a peer | 4 |
+| status_response | [StatusResponse](#statusresponse) | Response with the highest and lowest block numbers the store | 5 |
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/consensus.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/consensus.mdx
new file mode 100644
index 00000000..f65c68c6
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/consensus.mdx
@@ -0,0 +1,146 @@
+---
+order: 7
+---
+
+# Consensus
+
+## Channel
+
+Consensus has four separate channels. The channel identifiers are listed below.
+
+| Name | Number |
+|--------------------|--------|
+| StateChannel | 32 |
+| DataChannel | 33 |
+| VoteChannel | 34 |
+| VoteSetBitsChannel | 35 |
+
+## Message Types
+
+### Proposal
+
+Proposal is sent when a new block is proposed. It is a suggestion of what the
+next block in the blockchain should be.
+
+| Name | Type | Description | Field Number |
+|----------|----------------------------------------------------|----------------------------------------|--------------|
+| proposal | [Proposal](../../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 |
+
+### Vote
+
+Vote is sent to vote for some block (or to inform others that a process does not vote in the
+current round). Vote contains validator's information (validator address and index), height and
+round for which the vote is sent, vote type, blockID if process vote for some block (`nil` otherwise)
+and a timestamp when the vote is sent. The message is signed by the validator private key.
+
+| Name | Type | Description | Field Number |
+|------|--------------------------------------------|---------------------------|--------------|
+| vote | [Vote](../../../core/data_structures.md#vote) | Vote for a proposed Block | 1 |
+
+### BlockPart
+
+BlockPart is sent when gossiping a piece of the proposed block. It contains height, round
+and the block part.
+
+| Name | Type | Description | Field Number |
+|--------|--------------------------------------------|----------------------------------------|--------------|
+| height | int64 | Height of corresponding block. | 1 |
+| round | int32 | Round of voting to finalize the block. | 2 |
+| part | [Part](../../../core/data_structures.md#part) | A part of the block. | 3 |
+
+### NewRoundStep
+
+NewRoundStep is sent for every step transition during the core consensus algorithm execution.
+It is used in the gossip part of the CometBFT consensus protocol to inform peers about a current
+height/round/step a process is in.
+
+| Name | Type | Description | Field Number |
+|--------------------------|--------|----------------------------------------|--------------|
+| height | int64 | Height of corresponding block | 1 |
+| round | int32 | Round of voting to finalize the block. | 2 |
+| step | uint32 | | 3 |
+| seconds_since_start_time | int64 | | 4 |
+| last_commit_round | int32 | | 5 |
+
+### NewValidBlock
+
+NewValidBlock is sent when a validator observes a valid block B in some round r,
+i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r.
+It contains height and round in which valid block is observed, block parts header that describes
+the valid block and is used to obtain all
+block parts, and a bit array of the block parts a process currently has, so its peers can know what
+parts it is missing so they can send them.
+In case the block is also committed, then IsCommit flag is set to true.
+
+| Name | Type | Description | Field Number |
+|-----------------------|--------------------------------------------------------------|----------------------------------------|--------------|
+| height | int64 | Height of corresponding block | 1 |
+| round | int32 | Round of voting to finalize the block. | 2 |
+| block_part_set_header | [PartSetHeader](../../../core/data_structures.md#partsetheader) | | 3 |
+| block_parts | int32 | | 4 |
+| is_commit | bool | | 5 |
+
+### ProposalPOL
+
+ProposalPOL is sent when a previous block is re-proposed.
+It is used to inform peers in what round the process learned for this block (ProposalPOLRound),
+and what prevotes for the re-proposed block the process has.
+
+| Name | Type | Description | Field Number |
+|--------------------|----------|-------------------------------|--------------|
+| height | int64 | Height of corresponding block | 1 |
+| proposal_pol_round | int32 | | 2 |
+| proposal_pol | bitarray | | 3 |
+
+### ReceivedVote
+
+ReceivedVote is sent to indicate that a particular vote has been received. It contains height,
+round, vote type and the index of the validator that is the originator of the corresponding vote.
+
+| Name | Type | Description | Field Number |
+|--------|------------------------------------------------------------------|----------------------------------------|--------------|
+| height | int64 | Height of corresponding block | 1 |
+| round | int32 | Round of voting to finalize the block. | 2 |
+| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 |
+| index | int32 | | 4 |
+
+### VoteSetMaj23
+
+VoteSetMaj23 is sent to indicate that a process has seen +2/3 votes for some BlockID.
+It contains height, round, vote type and the BlockID.
+
+| Name | Type | Description | Field Number |
+|--------|------------------------------------------------------------------|----------------------------------------|--------------|
+| height | int64 | Height of corresponding block | 1 |
+| round | int32 | Round of voting to finalize the block. | 2 |
+| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 |
+
+### VoteSetBits
+
+VoteSetBits is sent to communicate the bit-array of votes a process has seen for a given
+BlockID. It contains height, round, vote type, BlockID and a bit array of
+the votes a process has.
+
+| Name | Type | Description | Field Number |
+|----------|------------------------------------------------------------------|----------------------------------------|--------------|
+| height | int64 | Height of corresponding block | 1 |
+| round | int32 | Round of voting to finalize the block. | 2 |
+| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 |
+| block_id | [BlockID](../../../core/data_structures.md#blockid) | | 4 |
+| votes | BitArray | Round of voting to finalize the block. | 5 |
+
+### Message
+
+Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof).
+
+| Name | Type | Description | Field Number |
+|-----------------|---------------------------------|----------------------------------------|--------------|
+| new_round_step | [NewRoundStep](#newroundstep) | Height of corresponding block | 1 |
+| new_valid_block | [NewValidBlock](#newvalidblock) | Round of voting to finalize the block. | 2 |
+| proposal | [Proposal](#proposal) | | 3 |
+| proposal_pol | [ProposalPOL](#proposalpol) | | 4 |
+| block_part | [BlockPart](#blockpart) | | 5 |
+| vote | [Vote](#vote) | | 6 |
+| received_vote | [ReceivedVote](#receivedvote) | | 7 |
+| vote_set_maj23 | [VoteSetMaj23](#votesetmaj23) | | 8 |
+| vote_set_bits | [VoteSetBits](#votesetbits) | | 9 |
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/evidence.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/evidence.mdx
new file mode 100644
index 00000000..7db104b3
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/evidence.mdx
@@ -0,0 +1,23 @@
+---
+order: 3
+---
+
+# Evidence
+
+## Channel
+
+Evidence has one channel. The channel identifier is listed below.
+
+| Name | Number |
+|-----------------|--------|
+| EvidenceChannel | 56 |
+
+## Message Types
+
+### EvidenceList
+
+EvidenceList consists of a list of verified evidence. This evidence will already have been propagated throughout the network. EvidenceList is used in two places, as a p2p message and within the block [block](../../../core/data_structures.md#block) as well.
+
+| Name | Type | Description | Field Number |
+|----------|-------------------------------------------------------------|------------------------|--------------|
+| evidence | repeated [Evidence](../../../core/data_structures.md#evidence) | List of valid evidence | 1 |
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/mempool.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/mempool.mdx
new file mode 100644
index 00000000..8f3925ca
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/mempool.mdx
@@ -0,0 +1,33 @@
+---
+order: 4
+---
+# Mempool
+
+## Channel
+
+Mempool has one channel. The channel identifier is listed below.
+
+| Name | Number |
+|----------------|--------|
+| MempoolChannel | 48 |
+
+## Message Types
+
+There is currently only one message that Mempool broadcasts and receives over
+the p2p gossip network (via the reactor): `TxsMessage`
+
+### Txs
+
+A list of transactions. These transactions have been checked against the application for validity. This does not mean that the transactions are valid, it is up to the application to check this.
+
+| Name | Type | Description | Field Number |
+|------|----------------|----------------------|--------------|
+| txs | repeated bytes | List of transactions | 1 |
+
+### Message
+
+Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The one of consists of one message [`Txs`](#txs).
+
+| Name | Type | Description | Field Number |
+|------|-------------|-----------------------|--------------|
+| txs | [Txs](#txs) | List of transactions | 1 |
diff --git a/cometbft/v0.38/spec/p2p/legacy-docs/messages/state-sync.mdx b/cometbft/v0.38/spec/p2p/legacy-docs/messages/state-sync.mdx
new file mode 100644
index 00000000..30657ecb
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/legacy-docs/messages/state-sync.mdx
@@ -0,0 +1,132 @@
+---
+order: 5
+---
+
+# State Sync
+
+## Channels
+
+State sync has four distinct channels. The channel identifiers are listed below.
+
+| Name | Number |
+|-------------------|--------|
+| SnapshotChannel | 96 |
+| ChunkChannel | 97 |
+| LightBlockChannel | 98 |
+| ParamsChannel | 99 |
+
+## Message Types
+
+### SnapshotRequest
+
+When a new node begin state syncing, it will ask all peers it encounters if it has any
+available snapshots:
+
+| Name | Type | Description | Field Number |
+|----------|--------|-------------|--------------|
+
+### SnapShotResponse
+
+The receiver will query the local ABCI application via `ListSnapshots`, and send a message
+containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: and stored at the application layer. When a peer is starting it will request snapshots.
+
+| Name | Type | Description | Field Number |
+|----------|--------|-----------------------------------------------------------|--------------|
+| height | uint64 | Height at which the snapshot was taken | 1 |
+| format | uint32 | Format of the snapshot. | 2 |
+| chunks | uint32 | How many chunks make up the snapshot | 3 |
+| hash | bytes | Arbitrary snapshot hash | 4 |
+| metadata | bytes | Arbitrary application data. **May be non-deterministic.** | 5 |
+
+### ChunkRequest
+
+The node running state sync will offer these snapshots to the local ABCI application via
+`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot
+is accepted, the state syncer will request snapshot chunks from appropriate peers:
+
+| Name | Type | Description | Field Number |
+|--------|--------|-------------------------------------------------------------|--------------|
+| height | uint64 | Height at which the chunk was created | 1 |
+| format | uint32 | Format chosen for the chunk. **May be non-deterministic.** | 2 |
+| index | uint32 | Index of the chunk within the snapshot. | 3 |
+
+### ChunkResponse
+
+The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`,
+and respond with it (limited to 16 MB):
+
+| Name | Type | Description | Field Number |
+|---------|--------|-------------------------------------------------------------|--------------|
+| height | uint64 | Height at which the chunk was created | 1 |
+| format | uint32 | Format chosen for the chunk. **May be non-deterministic.** | 2 |
+| index | uint32 | Index of the chunk within the snapshot. | 3 |
+| hash | bytes | Arbitrary snapshot hash | 4 |
+| missing | bool | Arbitrary application data. **May be non-deterministic.** | 5 |
+
+Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty
+chunk is a valid (although unlikely) response.
+
+The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot
+is restored. If a chunk response is not returned within some time, it will be re-requested,
+possibly from a different peer.
+
+The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol.
+
+### LightBlockRequest
+
+To verify state and to provide state relevant information for consensus, the node will ask peers for
+light blocks at specified heights.
+
+| Name | Type | Description | Field Number |
+|----------|--------|----------------------------|--------------|
+| height | uint64 | Height of the light block | 1 |
+
+### LightBlockResponse
+
+The receiver will retrieve and construct the light block from both the block and state stores. The
+receiver will verify the data by comparing the hashes and store the header, commit and validator set
+if necessary. The light block at the height of the snapshot will be used to verify the `AppHash`.
+
+| Name | Type | Description | Field Number |
+|---------------|---------------------------------------------------------|--------------------------------------|--------------|
+| light_block | [LightBlock](../../../core/data_structures.md#lightblock) | Light block at the height requested | 1 |
+
+State sync will use [light client verification](../../../light-client/verification/README.md) to verify
+the light blocks.
+
+If no state sync is in progress (i.e. during normal operation), any unsolicited response messages
+are discarded.
+
+### ParamsRequest
+
+In order to build the state, the state provider will request the params at the height of the snapshot and use the header to verify it.
+
+| Name | Type | Description | Field Number |
+|----------|--------|----------------------------|--------------|
+| height | uint64 | Height of the consensus params | 1 |
+
+
+### ParamsResponse
+
+A reciever to the request will use the state store to fetch the consensus params at that height and return it to the sender.
+
+| Name | Type | Description | Field Number |
+|----------|--------|---------------------------------|--------------|
+| height | uint64 | Height of the consensus params | 1 |
+| consensus_params | [ConsensusParams](../../../core/data_structures.md#consensusparams) | Consensus params at the height requested | 2 |
+
+
+### Message
+
+Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The `oneof` consists of eight messages.
+
+| Name | Type | Description | Field Number |
+|----------------------|--------------------------------------------|----------------------------------------------|--------------|
+| snapshots_request | [SnapshotRequest](#snapshotrequest) | Request a recent snapshot from a peer | 1 |
+| snapshots_response | [SnapshotResponse](#snapshotresponse) | Respond with the most recent snapshot stored | 2 |
+| chunk_request | [ChunkRequest](#chunkrequest) | Request chunks of the snapshot. | 3 |
+| chunk_response | [ChunkRequest](#chunkresponse) | Response of chunks used to recreate state. | 4 |
+| light_block_request | [LightBlockRequest](#lightblockrequest) | Request a light block. | 5 |
+| light_block_response | [LightBlockResponse](#lightblockresponse) | Respond with a light block | 6 |
+| params_request | [ParamsRequest](#paramsrequest) | Request the consensus params at a height. | 7 |
+| params_response | [ParamsResponse](#paramsresponse) | Respond with the consensus params | 8 |
diff --git a/cometbft/v0.38/spec/p2p/reactor-api/API-for-Reactors.mdx b/cometbft/v0.38/spec/p2p/reactor-api/API-for-Reactors.mdx
new file mode 100644
index 00000000..fd22b87a
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/reactor-api/API-for-Reactors.mdx
@@ -0,0 +1,335 @@
+---
+order: 3
+---
+
+# API for Reactors
+
+This document describes the API provided by the p2p layer to the protocol
+layer, namely to the registered reactors.
+
+This API consists of two interfaces: the one provided by the `Switch` instance,
+and the ones provided by multiple `Peer` instances, one per connected peer.
+The `Switch` instance is provided to every reactor as part of the reactor's
+[registration procedure][reactor-registration].
+The multiple `Peer` instances are provided to every registered reactor whenever
+a [new connection with a peer][reactor-addpeer] is established.
+
+> **Note**
+>
+> The practical reasons that lead to the interface to be provided in two parts,
+> `Switch` and `Peer` instances are discussed in more datail in the
+> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/switch-peer.md).
+
+## `Switch` API
+
+The [`Switch`][switch-type] is the central component of the p2p layer
+implementation. It manages all the reactors running in a node and keeps track
+of the connections with peers.
+The table below summarizes the interaction of the standard reactors with the `Switch`:
+
+| `Switch` API method | consensus | block sync | state sync | mempool | evidence | PEX |
+|--------------------------------------------|-----------|------------|------------|---------|-----------|-------|
+| `Peers() IPeerSet` | x | x | | | | x |
+| `NumPeers() (int, int, int)` | | x | | | | x |
+| `Broadcast(Envelope) chan bool` | x | x | x | | | |
+| `MarkPeerAsGood(Peer)` | x | | | | | |
+| `StopPeerForError(Peer, interface{})` | x | x | x | x | x | x |
+| `StopPeerGracefully(Peer)` | | | | | | x |
+| `Reactor(string) Reactor` | | x | | | | |
+
+The above list is not exhaustive as it does not include all the `Switch` methods
+invoked by the PEX reactor, a special component that should be considered part
+of the p2p layer. This document does not cover the operation of the PEX reactor
+as a connection manager.
+
+### Peers State
+
+The first two methods in the switch API allow reactors to query the state of
+the p2p layer: the set of connected peers.
+~~~
+
+ func (sw *Switch) Peers() IPeerSet
+~~~
+
+The `Peers()` method returns the current set of connected peers.
+The returned `IPeerSet` is an immutable concurrency-safe copy of this set.
+Observe that the `Peer` handlers returned by this method were previously
+[added to the reactor][reactor-addpeer] via the `InitPeer(Peer)` method,
+but not yet removed via the `RemovePeer(Peer)` method.
+Thus, a priori, reactors should already have this information.
+~~~
+
+ func (sw *Switch) NumPeers() (outbound, inbound, dialing int)
+~~~
+
+The `NumPeers()` method returns the current number of connected peers,
+distinguished between `outbound` and `inbound` peers.
+An `outbound` peer is a peer the node has dialed to, while an `inbound` peer is
+a peer the node has accepted a connection from.
+The third field `dialing` reports the number of peers to which the node is
+currently attempting to connect, so not (yet) connected peers.
+
+> **Note**
+>
+> The third field returned by `NumPeers()`, the number of peers in `dialing`
+> state, is not an information that should regard the protocol layer.
+> In fact, with the exception of the PEX reactor, which can be considered part
+> of the p2p layer implementation, no standard reactor actually uses this
+> information, that could be removed when this interface is refactored.
+
+### Broadcast
+
+The switch provides, mostly for historical or retro-compatibility reasons,
+a method for sending a message to all connected peers:
+~~~
+
+ func (sw *Switch) Broadcast(e Envelope) chan bool
+~~~
+
+The `Broadcast()` method is not blocking and returns a channel of booleans.
+For every connected `Peer`, it starts a background thread for sending the
+message to that peer, using the `Peer.Send()` method
+(which is blocking, as detailed in [Send Methods](#send-methods)).
+The result of each unicast send operation (success or failure) is added to the
+returned channel, which is closed when all operations are completed.
+
+> **Note**
+>
+> - The current _implementation_ of the `Switch.Broadcast(Envelope)` method is
+> not efficient, as the marshalling of the provided message is performed as
+> part of the `Peer.Send(Envelope)` helper method, that is, once per
+> connected peer.
+> - The return value of the broadcast method is not considered by any of the
+> standard reactors that employ the method. One of the reasons is that is is
+> not possible to associate each of the boolean outputs added to the
+> returned channel to a peer.
+
+### Vetting Peers
+
+The p2p layer relies on the registered reactors to gauge the _quality_ of peers.
+The following method can be invoked by a reactor to inform the p2p layer that a
+peer has presented a "good" behaviour.
+This information is registered in the node's address book and influences the
+operation of the Peer Exchange (PEX) protocol, as node discovery adopts a bias
+towards "good" peers:
+~~~
+
+ func (sw *Switch) MarkPeerAsGood(peer Peer)
+~~~
+
+At the moment, it is up to the consensus reactor to vet a peer.
+In the current logic, a peer is marked as good whenever the consensus protocol
+collects a multiple of `votesToContributeToBecomeGoodPeer = 10000` useful votes
+or `blocksToContributeToBecomeGoodPeer = 10000` useful block parts from that peer.
+By "useful", the consensus implementation considers messages that are valid and
+that are received by the node when the node is expected for such information,
+which excludes duplicated or late received messages.
+
+> **Note**
+>
+> The switch doesn't currently provide a method to mark a peer as a bad peer.
+> In fact, the peer quality management is really implemented in the current
+> version of the p2p layer.
+> This topic is being discussed in the [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/peer-quality.md).
+
+### Stopping Peers
+
+Reactors can instruct the p2p layer to disconnect from a peer.
+Using the p2p layer's nomenclature, the reactor requests a peer to be stopped.
+The peer's send and receive routines are in fact stopped, interrupting the
+communication with the peer.
+The `Peer` is then [removed from every registered reactor][reactor-removepeer],
+using the `RemovePeer(Peer)` method, and from the set of connected peers.
+~~~
+
+ func (sw *Switch) StopPeerForError(peer Peer, reason interface{})
+~~~
+
+All the standard reactors employ the above method for disconnecting from a peer
+in case of errors.
+These are errors that occur when processing a message received from a `Peer`.
+The produced `error` is provided to the method as the `reason`.
+
+The `StopPeerForError()` method has an important *caveat*: if the peer to be
+stopped is configured as a _persistent peer_, the switch will attempt
+reconnecting to that same peer.
+While this behaviour makes sense when the method is invoked by other components
+of the p2p layer (e.g., in the case of communication errors), it does not make
+sense when it is invoked by a reactor.
+
+> **Note**
+>
+> A more comprehensive discussion regarding this topic can be found on the
+> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/stop-peer.md).
+
+ func (sw *Switch) StopPeerGracefully(peer Peer)
+
+The second method instructs the switch to disconnect from a peer for no
+particular reason.
+This method is only adopted by the PEX reactor of a node operating in _seed mode_,
+as seed nodes disconnect from a peer after exchanging peer addresses with it.
+
+### Reactors Table
+
+The switch keeps track of all registered reactors, indexed by unique reactor names.
+A reactor can therefore use the switch to access another `Reactor` from its `name`:
+~~~
+
+ func (sw *Switch) Reactor(name string) Reactor
+~~~
+
+This method is currently only used by the Block Sync reactor to access the
+Consensus reactor implementation, from which it uses the exported
+`SwitchToConsensus()` method.
+While available, this inter-reactor interaction approach is discouraged and
+should be avoided, as it violates the assumption that reactors are independent.
+
+
+## `Peer` API
+
+The [`Peer`][peer-interface] interface represents a connected peer.
+A `Peer` instance encapsulates a multiplex connection that implements the
+actual communication (sending and receiving messages) with a peer.
+When a connection is established with a peer, the `Switch` provides the
+corresponding `Peer` instance to all registered reactors.
+From this point, reactors can use the methods of the new `Peer` instance.
+
+The table below summarizes the interaction of the standard reactors with
+connected peers, with the `Peer` methods used by them:
+
+| `Peer` API method | consensus | block sync | state sync | mempool | evidence | PEX |
+|--------------------------------------------|-----------|------------|------------|---------|-----------|-------|
+| `ID() ID` | x | x | x | x | x | x |
+| `IsRunning() bool` | x | | | x | x | |
+| `Quit() <-chan struct{}` | | | | x | x | |
+| `Get(string) interface{}` | x | | | x | x | |
+| `Set(string, interface{})` | x | | | | | |
+| `Send(Envelope) bool` | x | x | x | x | x | x |
+| `TrySend(Envelope) bool` | x | x | | | | |
+
+The above list is not exhaustive as it does not include all the `Peer` methods
+invoked by the PEX reactor, a special component that should be considered part
+of the p2p layer. This document does not cover the operation of the PEX reactor
+as a connection manager.
+
+### Identification
+
+Nodes in the p2p network are configured with a unique cryptographic key pair.
+The public part of this key pair is verified when establishing a connection
+with the peer, as part of the authentication handshake, and constitutes the
+peer's `ID`:
+~~~
+
+ func (p Peer) ID() p2p.ID
+~~~
+
+Observe that each time the node connects to a peer (e.g., after disconnecting
+from it), a new (distinct) `Peer` handler is provided to the reactors via
+`InitPeer(Peer)` method.
+In fact, the `Peer` handler is associated to a _connection_ with a peer, not to
+the actual _node_ in the network.
+To keep track of actual peers, the unique peer `p2p.ID` provided by the above
+method should be employed.
+
+### Peer state
+
+The switch starts the peer's send and receive routines before adding the peer
+to every registered reactor using the `AddPeer(Peer)` method.
+The reactors then usually start routines to interact with the new connected
+peer using the received `Peer` handler.
+For these routines it is useful to check whether the peer is still connected
+and its send and receive routines are still running:
+~~~
+
+ func (p Peer) IsRunning() bool
+ func (p Peer) Quit() <-chan struct{}
+~~~
+
+The above two methods provide the same information about the state of a `Peer`
+instance in two different ways.
+Both of them are defined in the [`Service`][service-interface] interface.
+The `IsRunning()` method is synchronous and returns whether the peer has been
+started and has not been stopped.
+The `Quit()` method returns a channel that is closed when the peer is stopped;
+it is an asynchronous state query.
+
+### Key-value store
+
+Each `Peer` instance provides a synchronized key-value store that allows
+sharing peer-specific state between reactors:
+
+~~~
+
+ func (p Peer) Get(key string) interface{}
+ func (p Peer) Set(key string, data interface{})
+~~~
+
+This key-value store can be seen as an asynchronous mechanism to exchange the
+state of a peer between reactors.
+In the current use-case of this mechanism, the Consensus reactor populates the
+key-value store with a `PeerState` instance for each connected peer.
+The Consensus reactor routines interacting with a peer read and update the
+shared peer state.
+The Evidence and Mempool reactors, in their turn, periodically query the
+key-value store of each peer for retrieving, in particular, the last height
+reported by the peer.
+This information, produced by the Consensus reactor, influences the interaction
+of these two reactors with their peers.
+
+> **Note**
+>
+> More details of how this key-value store is used to share state between reactors can be found on the
+> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/peer-kvstore.md).
+
+### Send methods
+
+Finally, a `Peer` instance allows a reactor to send messages to companion
+reactors running at that peer.
+This is ultimately the goal of the switch when it provides `Peer` instances to
+the registered reactors.
+There are two methods for sending messages:
+~~~
+
+ func (p Peer) Send(e Envelope) bool
+ func (p Peer) TrySend(e Envelope) bool
+~~~
+
+The two message-sending methods receive an `Envelope`, whose content should be
+set as follows:
+
+- `ChannelID`: the channel the message should be sent through, which defines
+ the reactor that will process the message;
+- `Src`: this field represents the source of an incoming message, which is
+ irrelevant for outgoing messages;
+- `Message`: the actual message's payload, which is marshalled using protocol buffers.
+
+The two message-sending methods attempt to add the message (`e.Payload`) to the
+send queue of the peer's destination channel (`e.ChannelID`).
+There is a send queue for each registered channel supported by the peer, and
+each send queue has a capacity.
+The capacity of the send queues for each channel are [configured][reactor-channels]
+by reactors via the corresponding `ChannelDescriptor`.
+
+The two message-sending methods return whether it was possible to enqueue
+the marshalled message to the channel's send queue.
+The most common reason for these methods to return `false` is the channel's
+send queue being full.
+Further reasons for returning `false` are: the peer being stopped, providing a
+non-registered channel ID, or errors when marshalling the message's payload.
+
+The difference between the two message-sending methods is _when_ they return `false`.
+The `Send()` method is a _blocking_ method, it returns `false` if the message
+could not be enqueued, because the channel's send queue is still full, after a
+10-second _timeout_.
+The `TrySend()` method is a _non-blocking_ method, it _immediately_ returns
+`false` when the channel's send queue is full.
+
+[peer-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/peer.go
+[service-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/libs/service/service.go
+[switch-type]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/switch.go
+
+[reactor-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/base_reactor.go
+[reactor-registration]: ./reactor.md#registration
+[reactor-channels]: ./reactor.md#registration
+[reactor-addpeer]: ./reactor.md#peer-management
+[reactor-removepeer]: ./reactor.md#stop-peer
diff --git a/cometbft/v0.38/spec/p2p/reactor-api/Reactor-Api.mdx b/cometbft/v0.38/spec/p2p/reactor-api/Reactor-Api.mdx
new file mode 100644
index 00000000..a7862fae
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/reactor-api/Reactor-Api.mdx
@@ -0,0 +1,234 @@
+---
+order: 2
+---
+
+# Reactor API
+
+A component has to implement the [`p2p.Reactor` interface][reactor-interface]
+in order to use communication services provided by the p2p layer.
+This interface is currently the main source of documentation for a reactor.
+
+The goal of this document is to specify the behaviour of the p2p communication
+layer when interacting with a reactor.
+So while the [`Reactor interface`][reactor-interface] declares the methods
+invoked and determines what the p2p layer expects from a reactor,
+this documentation focuses on the **temporal behaviour** that a reactor implementation
+should expect from the p2p layer. (That is, in which orders the functions may be called)
+
+This specification is accompanied by the [`reactor.qnt`](./reactor.qnt) file,
+a more comprehensive model of the reactor's operation written in
+[Quint][quint-repo], an executable specification language.
+The methods declared in the [`Reactor`][reactor-interface] interface are
+modeled in Quint, in the form of `pure def` methods, providing some examples of
+how they should be implemented.
+The behaviour of the p2p layer when interacting with a reactor, by invoking the
+interface methods, is modeled in the form of state transitions, or `action`s in
+the Quint nomenclature.
+
+## Overview
+
+The following _grammar_ is a simplified representation of the expected sequence of calls
+from the p2p layer to a reactor.
+Note that the grammar represents events referring to a _single reactor_, while
+the p2p layer supports the execution of multiple reactors.
+For a more detailed representation of the sequence of calls from the p2p layer
+to reactors, please refer to the companion Quint model.
+
+While useful to provide an overview of the operation of a reactor,
+grammars have some limitations in terms of the behaviour they can express.
+For instance, the following grammar only represents the management of _a single peer_,
+namely of a peer with a given ID which can connect, disconnect, and reconnect
+multiple times to the node.
+The p2p layer and every reactor should be able to handle multiple distinct peers in parallel.
+This means that multiple occurrences of non-terminal `peer-management` of the
+grammar below can "run" independently and in parallel, each one referring and
+producing events associated to a different peer:
+
+```abnf
+start = registration on-start *peer-management on-stop
+registration = get-channels set-switch
+
+; Refers to a single peer, a reactor must support multiple concurrent peers
+peer-management = init-peer start-peer stop-peer
+start-peer = [*receive] (connected-peer / start-error)
+connected-peer = add-peer *receive
+stop-peer = [peer-error] remove-peer
+
+; Service interface
+on-start = %s"OnStart()"
+on-stop = %s"OnStop()"
+; Reactor interface
+get-channels = %s"GetChannels()"
+set-switch = %s"SetSwitch(*Switch)"
+init-peer = %s"InitPeer(Peer)"
+add-peer = %s"AddPeer(Peer)"
+remove-peer = %s"RemovePeer(Peer, reason)"
+receive = %s"Receive(Envelope)"
+
+; Errors, for reference
+start-error = %s"log(Error starting peer)"
+peer-error = %s"log(Stopping peer for error)"
+```
+
+The grammar is written in case-sensitive Augmented Backus–Naur form (ABNF,
+specified in [IETF RFC 7405](https://datatracker.ietf.org/doc/html/rfc7405)).
+It is inspired on the grammar produced to specify the interaction of CometBFT
+with an ABCI++ application, available [here](../../abci/abci%2B%2B_comet_expected_behavior.md).
+
+## Registration
+
+To become a reactor, a component has first to implement the
+[`Reactor`][reactor-interface] interface,
+then to register the implementation with the p2p layer, using the
+`Switch.AddReactor(name string, reactor Reactor)` method,
+with a global unique `name` for the reactor.
+
+The registration must happen before the node, in general, and the p2p layer,
+in particular, are started.
+In other words, there is no support for registering a reactor on a running node:
+reactors must be registered as part of the setup of a node.
+
+```abnf
+registration = get-channels set-switch
+```
+
+The p2p layer retrieves from the reactor a list of channels the reactor is
+responsible for, using the `GetChannels()` method.
+The reactor implementation should thereafter expect the delivery of every
+message received by the p2p layer in the informed channels.
+
+The second method `SetSwitch(Switch)` concludes the handshake between the
+reactor and the p2p layer.
+The `Switch` is the main component of the p2p layer, being responsible for
+establishing connections with peers and routing messages.
+The `Switch` instance provides a number of methods for all registered reactors,
+documented in the companion [API for Reactors](./p2p-api.md#switch-api) document.
+
+## Service interface
+
+A reactor must implement the [`Service`](https://github.com/cometbft/cometbft/blob/v0.38.x/libs/service/service.go) interface,
+in particular, a startup `OnStart()` and a shutdown `OnStop()` methods:
+
+```abnf
+start = registration on-start *peer-management on-stop
+```
+
+As part of the startup of a node, all registered reactors are started by the p2p layer.
+And when the node is shut down, all registered reactors are stopped by the p2p layer.
+Observe that the `Service` interface specification establishes that a service
+can be started and stopped only once.
+So before being started or once stopped by the p2p layer, the reactor should
+not expect any interaction.
+
+## Peer management
+
+The core of a reactor's operation is the interaction with peers or, more
+precisely, with companion reactors operating on the same channels in peers connected to the node.
+The grammar extract below represents the interaction of the reactor with a
+single peer:
+
+```abnf
+; Refers to a single peer, a reactor must support multiple concurrent peers
+peer-management = init-peer start-peer stop-peer
+```
+
+The p2p layer informs all registered reactors when it establishes a connection
+with a `Peer`, using the `InitPeer(Peer)` method.
+When this method is invoked, the `Peer` has not yet been started, namely the
+routines for sending messages to and receiving messages from the peer are not running.
+This method should be used to initialize state or data related to the new
+peer, but not to interact with it.
+
+The next step is to start the communication routines with the new `Peer`.
+As detailed in the following, this procedure may or may not succeed.
+In any case, the peer is eventually stopped, which concludes the management of
+that `Peer` instance.
+
+## Start peer
+
+Once `InitPeer(Peer)` is invoked for every registered reactor, the p2p layer starts the peer's
+communication routines and adds the `Peer` to the set of connected peers.
+If both steps are concluded without errors, the reactor's `AddPeer(Peer)` is invoked:
+
+```abnf
+start-peer = [*receive] (connected-peer / start-error)
+connected-peer = add-peer *receive
+```
+
+In case of errors, a message is logged informing that the p2p layer failed to start the peer.
+This is not a common scenario and it is only expected to happen when
+interacting with a misbehaving or slow peer. A practical example is reported on this
+[issue](https://github.com/tendermint/tendermint/pull/9500).
+
+It is up to the reactor to define how to process the `AddPeer(Peer)` event.
+The typical behavior is to start routines that, given some conditions or events,
+send messages to the added peer, using the provided `Peer` instance.
+The companion [API for Reactors](./p2p-api.md#peer-api) documents the methods
+provided by `Peer` instances, available from when they are added to the reactors.
+
+## Stop Peer
+
+The p2p layer informs all registered reactors when it disconnects from a `Peer`,
+using the `RemovePeer(Peer, reason)` method:
+
+```abnf
+stop-peer = [peer-error] remove-peer
+```
+
+This method is invoked after the p2p layer has stopped peer's send and receive routines.
+Depending of the `reason` for which the peer was stopped, different log
+messages can be produced.
+After removing a peer from all reactors, the `Peer` instance is also removed from
+the set of connected peers.
+This enables the same peer to reconnect and `InitPeer(Peer)` to be invoked for
+the new connection.
+
+From the removal of a `Peer` , the reactor should not receive any further message
+from the peer and must not try sending messages to the removed peer.
+This usually means stopping the routines that were started by the companion
+`Add(Peer)` method.
+
+## Receive messages
+
+The main duty of a reactor is to handle incoming messages on the channels it
+has registered with the p2p layer.
+
+The _pre-condition_ for receiving a message from a `Peer` is that the p2p layer
+has previously invoked `InitPeer(Peer)`.
+This means that the reactor must be able to receive a message from a `Peer`
+_before_ `AddPeer(Peer)` is invoked.
+This happens because the peer's send and receive routines are started before,
+and should be already running when the p2p layer adds the peer to every
+registered reactor.
+
+```abnf
+start-peer = [*receive] (connected-peer / start-error)
+connected-peer = add-peer *receive
+```
+
+The most common scenario, however, is to start receiving messages from a peer
+after `AddPeer(Peer)` is invoked.
+An arbitrary number of messages can be received, until the peer is stopped and
+`RemovePeer(Peer)` is invoked.
+
+When a message is received from a connected peer on any of the channels
+registered by the reactor, the p2p layer will deliver the message to the
+reactor via the `Receive(Envelope)` method.
+The message is packed into an `Envelope` that contains:
+
+- `ChannelID`: the channel the message belongs to
+- `Src`: the source `Peer` handler, from which the message was received
+- `Message`: the actual message's payload, unmarshalled using protocol buffers
+
+Two important observations regarding the implementation of the `Receive` method:
+
+1. Concurrency: the implementation should consider concurrent invocations of
+ the `Receive` method carrying messages from different peers, as the
+ interaction with different peers is independent and messages can be received in parallel.
+1. Non-blocking: the implementation of the `Receive` method is expected not to block,
+ as it is invoked directly by the receive routines.
+ In other words, while `Receive` does not return, other messages from the
+ same sender are not delivered to any reactor.
+
+[reactor-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/base_reactor.go
+[quint-repo]: https://github.com/informalsystems/quint
diff --git a/cometbft/v0.38/spec/p2p/reactor-api/Reactors.mdx b/cometbft/v0.38/spec/p2p/reactor-api/Reactors.mdx
new file mode 100644
index 00000000..b8c27a58
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/reactor-api/Reactors.mdx
@@ -0,0 +1,47 @@
+---
+order: 1
+---
+
+# Reactors
+
+Reactor is the generic name for a component that employs the p2p communication layer.
+
+This section documents the interaction of the p2p communication layer with the
+reactors.
+The diagram below summarizes this interaction, namely the **northbound interface**
+of the p2p communication layer, representing some relevant event flows:
+
+
+
+Each of the protocols running a CometBFT node implements a reactor and registers
+the implementation with the p2p layer.
+The p2p layer provides network events to the registered reactors, the main
+two being new connections with peers and received messages.
+The reactors provide to the p2p layer messages to be sent to
+peers and commands to control the operation of the p2p layer.
+
+It is worth noting that the components depicted in the diagram below run
+multiple routines and that the illustrated actions happen in parallel.
+For instance, the connection establishment routines run in parallel, invoking
+the depicted `AddPeer` method concurrently.
+Once a connection is fully established, each `Peer` instance runs a send and a
+receive routines.
+The send routine collects messages from multiple reactors to a peer, packaging
+then into raw messages which are transmitted to the peer.
+The receive routine processes incoming messages and forwards them to the
+destination reactors, invoking the depicted `Receive` methods.
+In addition, the reactors run multiple routines for interacting
+with the peers (for example, to send messages to them) or with the `Switch`.
+
+The remaining of the documentation is organized as follows:
+
+- [Reactor API](./reactor.md): documents the [`p2p.Reactor`][reactor-interface]
+ interface and specifies the behaviour of the p2p layer when interacting with
+ a reactor.
+ In other words, the interaction of the p2p layer with the protocol layer (bottom-up).
+
+- [P2P API](./p2p-api.md): documents the interface provided by the p2p
+ layer to the reactors, through the `Switch` and `Peer` abstractions.
+ In other words, the interaction of the protocol layer with the p2p layer (top-down).
+
+[reactor-interface]: https://github.com/cometbft/cometbft/blob/v0.38.x/p2p/base_reactor.go
diff --git a/cometbft/v0.38/spec/p2p/reactor-api/reactor.qnt b/cometbft/v0.38/spec/p2p/reactor-api/reactor.qnt
new file mode 100644
index 00000000..002c5702
--- /dev/null
+++ b/cometbft/v0.38/spec/p2p/reactor-api/reactor.qnt
@@ -0,0 +1,276 @@
+// -*- mode: Bluespec; -*-
+/*
+ * Reactor is responsible for handling incoming messages on one or more
+ * Channel. Switch calls GetChannels when reactor is added to it. When a new
+ * peer joins our node, InitPeer and AddPeer are called. RemovePeer is called
+ * when the peer is stopped. Receive is called when a message is received on a
+ * channel associated with this reactor.
+ */
+// Code: https://github.com/cometbft/cometbft/blob/main/p2p/base_reactor.go
+module reactor {
+
+ // Unique ID of a node.
+ type NodeID = str
+
+ /*
+ * Peer is an interface representing a peer connected on a reactor.
+ */
+ type Peer = {
+ ID: NodeID,
+
+ // Other fields can be added to represent the p2p operation.
+ }
+
+ // Byte ID used by channels, must be globally unique.
+ type Byte = str
+
+ // Channel configuration.
+ type ChannelDescriptor = {
+ ID: Byte,
+ Priority: int,
+ }
+
+ /*
+ * Envelope contains a message with sender routing info.
+ */
+ type Envelope = {
+ Src: Peer, // Sender
+ Message: str, // Payload
+ ChannelID: Byte,
+ }
+
+ // A Routine is used to interact with an active Peer.
+ type Routine = {
+ name: str,
+ peer: Peer,
+ }
+
+ type ReactorState = {
+ // Peers that have been initialized but not yet removed.
+ // The reactor should expect receiving messages from them.
+ peers: Set[NodeID],
+
+ // The reactor runs multiple routines.
+ routines: Set[Routine],
+
+ // Values: init -> registered -> running -> stopped
+ state: str,
+
+ // Name with which the reactor was registered.
+ name: str,
+
+ // Channels the reactor is responsible for.
+ channels: Set[ChannelDescriptor],
+ }
+
+ // Produces a new, uninitialized reactor.
+ pure def NewReactor(): ReactorState = {
+ {
+ peers: Set(),
+ routines: Set(),
+ state: "init",
+ name: "",
+ channels: Set(),
+ }
+ }
+
+ // Pure definitions below represent the `p2p.Reactor` interface methods:
+
+ /*
+ * GetChannels returns the list of MConnection.ChannelDescriptor. Make sure
+ * that each ID is unique across all the reactors added to the switch.
+ */
+ pure def GetChannels(s: ReactorState): Set[ChannelDescriptor] = {
+ s.channels // Static list, configured at initialization.
+ }
+
+ /*
+ * SetSwitch allows setting a switch.
+ */
+ pure def SetSwitch(s: ReactorState, switch: bool): ReactorState = {
+ s.with("state", "registered")
+ }
+
+ /*
+ * Start the service.
+ * If it's already started or stopped, will return an error.
+ */
+ pure def OnStart(s: ReactorState): ReactorState = {
+ // Startup procedures should come here.
+ s.with("state", "running")
+ }
+
+ /*
+ * Stop the service.
+ * If it's already stopped, will return an error.
+ */
+ pure def OnStop(s: ReactorState): ReactorState = {
+ // Shutdown procedures should come here.
+ s.with("state", "stopped")
+ }
+
+ /*
+ * InitPeer is called by the switch before the peer is started. Use it to
+ * initialize data for the peer (e.g. peer state).
+ */
+ pure def InitPeer(s: ReactorState, peer: Peer): (ReactorState, Peer) = {
+ // This method can update the received peer, which is returned.
+ val updatedPeer = peer
+ (s.with("peers", s.peers.union(Set(peer.ID))), updatedPeer)
+ }
+
+ /*
+ * AddPeer is called by the switch after the peer is added and successfully
+ * started. Use it to start goroutines communicating with the peer.
+ */
+ pure def AddPeer(s: ReactorState, peer: Peer): ReactorState = {
+ // This method can be used to start routines to handle the peer.
+ // Below an example of an arbitrary 'ioRoutine' routine.
+ val startedRoutines = Set( {name: "ioRoutine", peer: peer} )
+ s.with("routines", s.routines.union(startedRoutines))
+ }
+
+ /*
+ * RemovePeer is called by the switch when the peer is stopped (due to error
+ * or other reason).
+ */
+ pure def RemovePeer(s: ReactorState, peer: Peer, reason: str): ReactorState = {
+ // This method should stop routines created by `AddPeer(Peer)`.
+ val stoppedRoutines = s.routines.filter(r => r.peer.ID == peer.ID)
+ s.with("peers", s.peers.exclude(Set(peer.ID)))
+ .with("routines", s.routines.exclude(stoppedRoutines))
+ }
+
+ /*
+ * Receive is called by the switch when an envelope is received from any connected
+ * peer on any of the channels registered by the reactor.
+ */
+ pure def Receive(s: ReactorState, e: Envelope): ReactorState = {
+ // This method should process the message payload: e.Message.
+ s
+ }
+
+ // Global state
+
+ // Reactors are uniquely identified by their names.
+ var reactors: str -> ReactorState
+
+ // Reactor (name) assigned to each channel ID.
+ var reactorsByCh: Byte -> str
+
+ // Helper action to (only) update the state of a given reactor.
+ action updateReactorTo(reactor: ReactorState): bool = all {
+ reactors' = reactors.set(reactor.name, reactor),
+ reactorsByCh' = reactorsByCh
+ }
+
+ // State transitions performed by the p2p layer, invoking `p2p.Reactor` methods:
+
+ // Code: Switch.AddReactor(name string, reactor Reactor)
+ action register(name: str, reactor: ReactorState): bool = all {
+ reactor.state == "init",
+ // Assign the reactor as responsible for its channel IDs, which
+ // should not be already assigned to another reactor.
+ val chIDs = reactor.GetChannels().map(c => c.ID)
+ all {
+ size(chIDs.intersect(reactorsByCh.keys())) == 0,
+ reactorsByCh' = reactorsByCh.keys().union(chIDs).
+ mapBy(id => if (id.in(chIDs)) name
+ else reactorsByCh.get(id)),
+ },
+ // Register the reactor by its name, which must be unique.
+ not(name.in(reactors.keys())),
+ reactors' = reactors.put(name,
+ reactor.SetSwitch(true).with("name", name))
+ }
+
+ // Code: Switch.OnStart()
+ action start(reactor: ReactorState): bool = all {
+ reactor.state == "registered",
+ updateReactorTo(reactor.OnStart())
+ }
+
+ // Code: Switch.addPeer(p Peer): preamble
+ action initPeer(reactor: ReactorState, peer: Peer): bool = all {
+ reactor.state == "running",
+ not(peer.ID.in(reactor.peers)),
+ updateReactorTo(reactor.InitPeer(peer)._1)
+ }
+
+ // Code: Switch.addPeer(p Peer): conclusion
+ action addPeer(reactor: ReactorState, peer: Peer): bool = all {
+ reactor.state == "running",
+ peer.ID.in(reactor.peers), // InitPeer(peer) and not RemovePeer(peer)
+ reactor.routines.filter(r => r.peer.ID == peer.ID).size() == 0,
+ updateReactorTo(reactor.AddPeer(peer))
+ }
+
+ // Code: Switch.stopAndRemovePeer(peer Peer, reason interface{})
+ action removePeer(reactor: ReactorState, peer: Peer, reason: str): bool = all {
+ reactor.state == "running",
+ peer.ID.in(reactor.peers), // InitPeer(peer) and not RemovePeer(peer)
+ // Routines might not be started, namely: not AddPeer(peer)
+ // Routines could also be already stopped if Peer has erroed.
+ updateReactorTo(reactor.RemovePeer(peer, reason))
+ }
+
+ // Code: Peer type, onReceive := func(chID byte, msgBytes []byte)
+ action receive(reactor: ReactorState, e: Envelope): bool = all {
+ reactor.state == "running",
+ // The message's sender is an active peer
+ e.Src.ID.in(reactor.peers),
+ // Reactor is assigned to the message's channel ID
+ e.ChannelID.in(reactorsByCh.keys()),
+ reactorsByCh.get(e.ChannelID) == reactor.name,
+ reactor.GetChannels().exists(c => c.ID == e.ChannelID),
+ updateReactorTo(reactor.Receive(e))
+ }
+
+ // Code: Switch.OnStop()
+ action stop(reactor: ReactorState): bool = all {
+ reactor.state == "running",
+ // Either no peer was added or all peers were removed
+ reactor.peers.size() == 0,
+ updateReactorTo(reactor.OnStop())
+ }
+
+ // Simulation support
+
+ action init = all {
+ reactors' = Map(),
+ reactorsByCh' = Map(),
+ }
+
+ // Modelled reactor configuration
+ pure val reactorName = "myReactor"
+ pure val reactorChannels = Set({ID: "3", Priority: 1}, {ID: "7", Priority: 2})
+
+ // For retro-compatibility: the state of the modelled reactor
+ def state(): ReactorState = {
+ reactors.get(reactorName)
+ }
+
+ pure val samplePeers = Set({ID: "p1"}, {ID: "p3"})
+ pure val sampleChIDs = Set("1", "3", "7") // ChannelID 1 not registered
+ pure val sampleMsgs = Set("ping", "pong")
+
+ action step = any {
+ register(reactorName, NewReactor.with("channels", reactorChannels)),
+ val reactor = reactors.get(reactorName)
+ any {
+ reactor.start(),
+ reactor.stop(),
+ nondet peer = oneOf(samplePeers)
+ any {
+ // Peer-specific actions
+ reactor.initPeer(peer),
+ reactor.addPeer(peer),
+ reactor.removePeer(peer, "no reason"),
+ reactor.receive({Src: peer,
+ ChannelID: oneOf(sampleChIDs),
+ Message: oneOf(sampleMsgs)}),
+ }
+ }
+ }
+
+}
diff --git a/cometbft/v0.38/spec/rpc/Rpc-Spe.mdx b/cometbft/v0.38/spec/rpc/Rpc-Spe.mdx
new file mode 100644
index 00000000..ff9ce2ce
--- /dev/null
+++ b/cometbft/v0.38/spec/rpc/Rpc-Spe.mdx
@@ -0,0 +1,1264 @@
+---
+order: 1
+parent:
+ title: RPC
+ order: 6
+---
+
+# RPC spec
+
+This file defines the JSON-RPC spec of CometBFT. This is meant to be implemented by all clients.
+
+## Support
+
+ | | [CometBFT](https://github.com/cometbft/cometbft/) | [Tendermint-Rs](https://github.com/informalsystems/tendermint-rs) |
+ |--------------|:----------------------------------------------------------:|:----------------------------------------------------------------:|
+ | JSON-RPC 2.0 | ✅ | ✅ |
+ | HTTP | ✅ | ✅ |
+ | HTTPS | ✅ | ❌ |
+ | WS | ✅ | ✅ |
+
+ | Routes | [CometBFT](https://github.com/cometbft/cometbft/) | [Tendermint-Rs](https://github.com/informalsystems/tendermint-rs) |
+ |-----------------------------------------|:----------------------------------------------------------:|:-----------------------------------------------------------------:|
+ | [Health](#health) | ✅ | ✅ |
+ | [Status](#status) | ✅ | ✅ |
+ | [NetInfo](#netinfo) | ✅ | ✅ |
+ | [Blockchain](#blockchain) | ✅ | ✅ |
+ | [Block](#block) | ✅ | ✅ |
+ | [BlockByHash](#blockbyhash) | ✅ | ❌ |
+ | [BlockResults](#blockresults) | ✅ | ✅ |
+ | [Commit](#commit) | ✅ | ✅ |
+ | [Validators](#validators) | ✅ | ✅ |
+ | [Genesis](#genesis) | ✅ | ✅ |
+ | [GenesisChunked](#genesischunked) | ✅ | ❌ |
+ | [ConsensusParams](#consensusparams) | ✅ | ❌ |
+ | [UnconfirmedTxs](#unconfirmedtxs) | ✅ | ❌ |
+ | [NumUnconfirmedTxs](#numunconfirmedtxs) | ✅ | ❌ |
+ | [Tx](#tx) | ✅ | ❌ |
+ | [BroadCastTxSync](#broadcasttxsync) | ✅ | ✅ |
+ | [BroadCastTxAsync](#broadcasttxasync) | ✅ | ✅ |
+ | [ABCIInfo](#abciinfo) | ✅ | ✅ |
+ | [ABCIQuery](#abciquery) | ✅ | ✅ |
+ | [BroadcastTxAsync](#broadcasttxasync) | ✅ | ✅ |
+ | [BroadcastEvidence](#broadcastevidence) | ✅ | ✅ |
+
+## Timestamps
+
+Timestamps in the RPC layer of CometBFT follows RFC3339Nano. The RFC3339Nano format removes trailing zeros from the seconds field.
+
+This means if a block has a timestamp like: `1985-04-12T23:20:50.5200000Z`, the value returned in the RPC will be `1985-04-12T23:20:50.52Z`.
+
+
+
+## Info Routes
+
+### Health
+
+Node heartbeat
+
+#### Parameters
+
+None
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/health
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"health\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": -1,
+ "result": {}
+}
+```
+
+### Status
+
+Get CometBFT status including node info, pubkey, latest block hash, app hash, block height and time.
+
+#### Parameters
+
+None
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/status
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"status\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": -1,
+ "result": {
+ "node_info": {
+ "protocol_version": {
+ "p2p": "8",
+ "block": "11",
+ "app": "0"
+ },
+ "id": "b93270b358a72a2db30089f3856475bb1f918d6d",
+ "listen_addr": "tcp://0.0.0.0:26656",
+ "network": "cosmoshub-4",
+ "version": "v0.34.8",
+ "channels": "40202122233038606100",
+ "moniker": "aib-hub-node",
+ "other": {
+ "tx_index": "on",
+ "rpc_address": "tcp://0.0.0.0:26657"
+ }
+ },
+ "sync_info": {
+ "latest_block_hash": "50F03C0EAACA8BCA7F9C14189ACE9C05A9A1BBB5268DB63DC6A3C848D1ECFD27",
+ "latest_app_hash": "2316CFF7644219F4F15BEE456435F280E2B38955EEA6D4617CCB6D7ABF781C22",
+ "latest_block_height": "5622165",
+ "latest_block_time": "2021-03-25T14:00:43.356134226Z",
+ "earliest_block_hash": "1455A0C15AC49BB506992EC85A3CD4D32367E53A087689815E01A524231C3ADF",
+ "earliest_app_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855",
+ "earliest_block_height": "5200791",
+ "earliest_block_time": "2019-12-11T16:11:34Z",
+ "catching_up": false
+ },
+ "validator_info": {
+ "address": "38FB765D0092470989360ECA1C89CD06C2C1583C",
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": "Z+8kntVegi1sQiWLYwFSVLNWqdAUGEy7lskL78gxLZI="
+ },
+ "voting_power": "0"
+ }
+ }
+}
+```
+
+### NetInfo
+
+Network information
+
+#### Parameters
+
+None
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/net_info
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"net_info\"}"
+```
+
+#### Response
+
+```json
+{
+ "id": 0,
+ "jsonrpc": "2.0",
+ "result": {
+ "listening": true,
+ "listeners": [
+ "Listener(@)"
+ ],
+ "n_peers": "1",
+ "peers": [
+ {
+ "node_id": "5576458aef205977e18fd50b274e9b5d9014525a",
+ "url": "tcp://5576458aef205977e18fd50b274e9b5d9014525a@95.179.155.35:26656"
+ }
+ ]
+ }
+}
+```
+
+### Blockchain
+
+Get block headers. Returned in descending order. May be limited in quantity.
+
+#### Parameters
+
+- `minHeight (integer)`: The lowest block to be returned in the response
+- `maxHeight (integer)`: The highest block to be returned in the response
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/blockchain
+
+curl http://127.0.0.1:26657/blockchain?minHeight=1&maxHeight=2
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"blockchain\",\"params\":{\"minHeight\":\"1\", \"maxHeight\":\"2\"}}"
+```
+
+#### Response
+
+```json
+{
+ "id": 0,
+ "jsonrpc": "2.0",
+ "result": {
+ "last_height": "1276718",
+ "block_metas": [
+ {
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "block_size": 1000000,
+ "header": {
+ "version": {
+ "block": "10",
+ "app": "0"
+ },
+ "chain_id": "cosmoshub-2",
+ "height": "12",
+ "time": "2019-04-22T17:01:51.701356223Z",
+ "last_block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812",
+ "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73",
+ "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8",
+ "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C",
+ "last_results_hash": "",
+ "evidence_hash": "",
+ "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E"
+ },
+ "num_txs": "54"
+ }
+ ]
+ }
+}
+```
+
+### Block
+
+Get block at a specified height.
+
+#### Parameters
+
+- `height (integer)`: height of the requested block. If no height is specified the latest block will be used.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/block
+
+curl http://127.0.0.1:26657/block?height=1
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block\",\"params\":{\"height\":\"1\"}}"
+```
+
+#### Response
+
+```json
+{
+ "id": 0,
+ "jsonrpc": "2.0",
+ "result": {
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "block": {
+ "header": {
+ "version": {
+ "block": "10",
+ "app": "0"
+ },
+ "chain_id": "cosmoshub-2",
+ "height": "12",
+ "time": "2019-04-22T17:01:51.701356223Z",
+ "last_block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812",
+ "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73",
+ "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8",
+ "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C",
+ "last_results_hash": "",
+ "evidence_hash": "",
+ "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E"
+ },
+ "data": [
+ "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0="
+ ],
+ "evidence": [
+ {
+ "type": "string",
+ "height": 0,
+ "time": 0,
+ "total_voting_power": 0,
+ "validator": {
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4="
+ },
+ "voting_power": 0,
+ "address": "string"
+ }
+ }
+ ],
+ "last_commit": {
+ "height": 0,
+ "round": 0,
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "signatures": [
+ {
+ "type": 2,
+ "height": "1262085",
+ "round": 0,
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "timestamp": "2019-08-01T11:39:38.867269833Z",
+ "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F",
+ "validator_index": 0,
+ "signature": "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg=="
+ }
+ ]
+ }
+ }
+ }
+}
+```
+
+### BlockByHash
+
+#### Parameters
+
+- `hash (string)`: Hash of the block to query for.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/block_by_hash?hash=0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block_by_hash\",\"params\":{\"hash\":\"0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED\"}}"
+```
+
+#### Response
+
+```json
+{
+ "id": 0,
+ "jsonrpc": "2.0",
+ "result": {
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "block": {
+ "header": {
+ "version": {
+ "block": "10",
+ "app": "0"
+ },
+ "chain_id": "cosmoshub-2",
+ "height": "12",
+ "time": "2019-04-22T17:01:51.701356223Z",
+ "last_block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812",
+ "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73",
+ "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8",
+ "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C",
+ "last_results_hash": "",
+ "evidence_hash": "",
+ "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E"
+ },
+ "data": [
+ "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0="
+ ],
+ "evidence": [
+ {
+ "type": "string",
+ "height": 0,
+ "time": 0,
+ "total_voting_power": 0,
+ "validator": {
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4="
+ },
+ "voting_power": 0,
+ "address": "string"
+ }
+ }
+ ],
+ "last_commit": {
+ "height": 0,
+ "round": 0,
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "signatures": [
+ {
+ "type": 2,
+ "height": "1262085",
+ "round": 0,
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "timestamp": "2019-08-01T11:39:38.867269833Z",
+ "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F",
+ "validator_index": 0,
+ "signature": "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg=="
+ }
+ ]
+ }
+ }
+ }
+}
+```
+
+### BlockResults
+
+### Parameters
+
+- `height (integer)`: Height of the block which contains the results. If no height is specified, the latest block height will be used
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/block_results
+
+
+curl http://127.0.0.1:26657/block_results?height=1
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block_results\",\"params\":{\"height\":\"1\"}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "height": "12",
+ "total_gas_used": "100",
+ "txs_results": [
+ {
+ "code": "0",
+ "data": "",
+ "log": "not enough gas",
+ "info": "",
+ "gas_wanted": "100",
+ "gas_used": "100",
+ "events": [
+ {
+ "type": "app",
+ "attributes": [
+ {
+ "key": "YWN0aW9u",
+ "value": "c2VuZA==",
+ "index": false
+ }
+ ]
+ }
+ ],
+ "codespace": "ibc"
+ }
+ ],
+ "begin_block_events": [
+ {
+ "type": "app",
+ "attributes": [
+ {
+ "key": "YWN0aW9u",
+ "value": "c2VuZA==",
+ "index": false
+ }
+ ]
+ }
+ ],
+ "end_block": [
+ {
+ "type": "app",
+ "attributes": [
+ {
+ "key": "YWN0aW9u",
+ "value": "c2VuZA==",
+ "index": false
+ }
+ ]
+ }
+ ],
+ "validator_updates": [
+ {
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM="
+ },
+ "power": "300"
+ }
+ ],
+ "consensus_params_updates": {
+ "block": {
+ "max_bytes": "22020096",
+ "max_gas": "1000",
+ "time_iota_ms": "1000"
+ },
+ "evidence": {
+ "max_age": "100000"
+ },
+ "validator": {
+ "pub_key_types": [
+ "ed25519"
+ ]
+ }
+ }
+ }
+}
+```
+
+### Commit
+
+#### Parameters
+
+- `height (integer)`: Height of the block the requested commit pertains to. If no height is set the latest commit will be returned.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/commit
+
+
+curl http://127.0.0.1:26657/commit?height=1
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"commit\",\"params\":{\"height\":\"1\"}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "signed_header": {
+ "header": {
+ "version": {
+ "block": "10",
+ "app": "0"
+ },
+ "chain_id": "cosmoshub-2",
+ "height": "12",
+ "time": "2019-04-22T17:01:51.701356223Z",
+ "last_block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812",
+ "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73",
+ "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0",
+ "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8",
+ "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C",
+ "last_results_hash": "",
+ "evidence_hash": "",
+ "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E"
+ },
+ "commit": {
+ "height": "1311801",
+ "round": 0,
+ "block_id": {
+ "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7",
+ "parts": {
+ "total": 1,
+ "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD"
+ }
+ },
+ "signatures": [
+ {
+ "block_id_flag": 2,
+ "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F",
+ "timestamp": "2019-04-22T17:01:58.376629719Z",
+ "signature": "14jaTQXYRt8kbLKEhdHq7AXycrFImiLuZx50uOjs2+Zv+2i7RTG/jnObD07Jo2ubZ8xd7bNBJMqkgtkd0oQHAw=="
+ }
+ ]
+ }
+ },
+ "canonical": true
+ }
+}
+```
+
+### Validators
+
+#### Parameters
+
+- `height (integer)`: Block height at which the validators were present on. If no height is set the latest commit will be returned.
+- `page (integer)`:
+- `per_page (integer)`:
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/validators
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"validators\",\"params\":{\"height\":\"1\", \"page\":\"1\", \"per_page\":\"20\"}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "block_height": "55",
+ "validators": [
+ {
+ "address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F",
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM="
+ },
+ "voting_power": "239727",
+ "proposer_priority": "-11896414"
+ }
+ ],
+ "count": "1",
+ "total": "25"
+ }
+}
+```
+
+### Genesis
+
+Get Genesis of the chain. If the response is large, this operation
+will return an error: use `genesis_chunked` instead.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/genesis
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"genesis\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "genesis": {
+ "genesis_time": "2019-04-22T17:00:00Z",
+ "chain_id": "cosmoshub-2",
+ "initial_height": "2",
+ "consensus_params": {
+ "block": {
+ "max_bytes": "22020096",
+ "max_gas": "1000",
+ "time_iota_ms": "1000"
+ },
+ "evidence": {
+ "max_age": "100000"
+ },
+ "validator": {
+ "pub_key_types": [
+ "ed25519"
+ ]
+ }
+ },
+ "validators": [
+ {
+ "address": "B00A6323737F321EB0B8D59C6FD497A14B60938A",
+ "pub_key": {
+ "type": "tendermint/PubKeyEd25519",
+ "value": "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM="
+ },
+ "power": "9328525",
+ "name": "Certus One"
+ }
+ ],
+ "app_hash": "",
+ "app_state": {}
+ }
+ }
+}
+```
+
+### GenesisChunked
+
+Get the genesis document in a chunks to support easily transfering larger documents.
+
+#### Parameters
+
+- `chunk` (integer): the index number of the chunk that you wish to
+ fetch. These IDs are 0 indexed.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/genesis_chunked?chunk=0
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"genesis_chunked\",\"params\":{\"chunk\":0}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "chunk": 0,
+ "total": 10,
+ "data": "dGVuZGVybWludAo="
+ }
+}
+```
+
+### ConsensusParams
+
+Get the consensus parameters.
+
+#### Parameters
+
+- `height (integer)`: Block height at which the consensus params would like to be fetched for.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/consensus_params
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"consensus_params\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "block_height": "1",
+ "consensus_params": {
+ "block": {
+ "max_bytes": "22020096",
+ "max_gas": "1000",
+ "time_iota_ms": "1000"
+ },
+ "evidence": {
+ "max_age": "100000"
+ },
+ "validator": {
+ "pub_key_types": [
+ "ed25519"
+ ]
+ }
+ }
+ }
+}
+```
+
+### UnconfirmedTxs
+
+Get a list of unconfirmed transactions.
+
+#### Parameters
+
+- `limit (integer)` The amount of txs to respond with.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/unconfirmed_txs
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"unconfirmed_txs\, \"params\":{\"limit\":\"20\"}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "n_txs": "82",
+ "total": "82",
+ "total_bytes": "19974",
+ "txs": [
+ "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA="
+ ]
+ }
+}
+```
+
+### NumUnconfirmedTxs
+
+Get data about unconfirmed transactions.
+
+#### Parameters
+
+None
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/num_unconfirmed_txs
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"num_unconfirmed_txs\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "n_txs": "31",
+ "total": "82",
+ "total_bytes": "19974"
+ }
+}
+```
+
+### Tx
+
+#### Parameters
+
+- `hash (string)`: The hash of the transaction
+- `prove (bool)`: If the response should include proof the transaction was included in a block.
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/num_unconfirmed_txs
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"num_unconfirmed_txs\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "hash": "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED",
+ "height": "1000",
+ "index": 0,
+ "tx_result": {
+ "log": "[{\"msg_index\":\"0\",\"success\":true,\"log\":\"\"}]",
+ "gas_wanted": "200000",
+ "gas_used": "28596",
+ "tags": [
+ {
+ "key": "YWN0aW9u",
+ "value": "c2VuZA==",
+ "index": false
+ }
+ ]
+ },
+ "tx": "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU="
+ }
+}
+```
+
+## Transaction Routes
+
+### BroadCastTxSync
+
+Returns with the response from CheckTx. Does not wait for DeliverTx result.
+
+#### Parameters
+
+- `tx (string)`: The transaction encoded
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/broadcast_tx_sync?tx=encoded_tx
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_tx_sync\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "code": "0",
+ "data": "",
+ "log": "",
+ "codespace": "ibc",
+ "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E"
+ },
+ "error": ""
+}
+```
+
+### BroadCastTxAsync
+
+Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results.
+
+#### Parameters
+
+- `tx (string)`: The transaction encoded
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/broadcast_tx_async?tx=encoded_tx
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_tx_async\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "code": "0",
+ "data": "",
+ "log": "",
+ "codespace": "ibc",
+ "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E"
+ },
+ "error": ""
+}
+```
+
+### CheckTx
+
+Checks the transaction without executing it.
+
+#### Parameters
+
+- `tx (string)`: String of the encoded transaction
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/check_tx?tx=encoded_tx
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"check_tx\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}"
+```
+
+#### Response
+
+```json
+{
+ "id": 0,
+ "jsonrpc": "2.0",
+ "error": "",
+ "result": {
+ "code": "0",
+ "data": "",
+ "log": "",
+ "info": "",
+ "gas_wanted": "1",
+ "gas_used": "0",
+ "events": [
+ {
+ "type": "app",
+ "attributes": [
+ {
+ "key": "YWN0aW9u",
+ "value": "c2VuZA==",
+ "index": false
+ }
+ ]
+ }
+ ],
+ "codespace": "bank"
+ }
+}
+```
+
+## ABCI Routes
+
+### ABCIInfo
+
+Get some info about the application.
+
+#### Parameters
+
+None
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/abci_info
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"abci_info\"}"
+```
+
+#### Response
+
+```json
+{
+ "jsonrpc": "2.0",
+ "id": 0,
+ "result": {
+ "response": {
+ "data": "{\"size\":0}",
+ "version": "0.16.1",
+ "app_version": "1314126"
+ }
+ }
+}
+```
+
+### ABCIQuery
+
+Query the application for some information.
+
+#### Parameters
+
+- `path (string)`: Path to the data. This is defined by the application.
+- `data (string)`: The data requested
+- `height (integer)`: Height at which the data is being requested for.
+- `prove (bool)`: Include proofs of the transactions inclusion in the block
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://127.0.0.1:26657/abci_query?path="a/b/c"=IHAVENOIDEA&height=1&prove=true
+```
+
+##### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"abci_query\",\"params\":{\"path\":\"a/b/c\", \"height\":\"1\", \"bool\":\"true\"}}"
+```
+
+#### Response
+
+```json
+{
+ "error": "",
+ "result": {
+ "response": {
+ "log": "exists",
+ "height": "0",
+ "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C",
+ "value": "61626364",
+ "key": "61626364",
+ "index": "-1",
+ "code": "0"
+ }
+ },
+ "id": 0,
+ "jsonrpc": "2.0"
+}
+```
+
+## Evidence Routes
+
+### BroadcastEvidence
+
+Broadcast evidence of the misbehavior.
+
+#### Parameters
+
+- `evidence (string)`:
+
+#### Request
+
+##### HTTP
+
+```sh
+curl http://localhost:26657/broadcast_evidence?evidence=JSON_EVIDENCE_encoded
+```
+
+#### JSONRPC
+
+```sh
+curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_evidence\",\"params\":{\"evidence\":\"JSON_EVIDENCE_encoded\"}}"
+```
+
+#### Response
+
+```json
+{
+ "error": "",
+ "result": "",
+ "id": 0,
+ "jsonrpc": "2.0"
+}
+```
diff --git a/docs.json b/docs.json
index b0a1747c..86bcccf7 100644
--- a/docs.json
+++ b/docs.json
@@ -123,6 +123,219 @@
"pages": [
"index"
]
+ },
+ {
+ "dropdown": "CometBFT",
+ "versions": [
+ {
+ "version": "v0.38",
+ "tabs": [
+ {
+ "tab": "Learn",
+ "groups": [
+ {
+ "group": "CometBFT",
+ "pages": [
+ "cometbft/v0.38/docs/README",
+ "cometbft/v0.38/docs/introduction/intro"
+ ]
+ },
+ {
+ "group": "Guides",
+ "pages": [
+ "cometbft/v0.38/docs/guides/Quick-Start",
+ "cometbft/v0.38/docs/guides/Install-CometBFT",
+ "cometbft/v0.38/docs/guides/Creating-an-application-in-Go",
+ "cometbft/v0.38/docs/guides/Creating-a-built-in-application-in-Go"
+ ]
+ },
+ {
+ "group": "Apps",
+ "pages": [
+ "cometbft/v0.38/docs/app-dev/Getting-Started",
+ "cometbft/v0.38/docs/app-dev/Application-Architecture-Guide",
+ "cometbft/v0.38/docs/app-dev/Using-ABCI-CLI",
+ "cometbft/v0.38/docs/app-dev/Indexing-Transactions"
+ ]
+ },
+ {
+ "group": "Core",
+ "pages": [
+ "cometbft/v0.38/docs/core/Using-CometBFT",
+ "cometbft/v0.38/docs/core/Running-in-production",
+ "cometbft/v0.38/docs/core/configuration",
+ "cometbft/v0.38/docs/core/mempool",
+ "cometbft/v0.38/docs/core/block-sync",
+ "cometbft/v0.38/docs/core/state-sync",
+ "cometbft/v0.38/docs/core/RPC",
+ "cometbft/v0.38/docs/core/Subscribing-to-events-via-Websocket",
+ "cometbft/v0.38/docs/core/metrics",
+ "cometbft/v0.38/docs/core/Validators",
+ "cometbft/v0.38/docs/core/light-client",
+ "cometbft/v0.38/docs/core/block-structure",
+ "cometbft/v0.38/docs/core/how-to-read-logs"
+ ]
+ },
+ {
+ "group": "Tools",
+ "pages": [
+ "cometbft/v0.38/docs/tools/Overview",
+ "cometbft/v0.38/docs/tools/debugging"
+ ]
+ },
+ {
+ "group": "Networks",
+ "pages": [
+ "cometbft/v0.38/docs/networks/Overview",
+ "cometbft/v0.38/docs/networks/Docker-Compose"
+ ]
+ },
+ {
+ "group": "CometBFT Quality Assurance",
+ "pages": [
+ "cometbft/v0.38/docs/qa/CometBFT-QA",
+ "cometbft/v0.38/docs/qa/Method",
+ "cometbft/v0.38/docs/qa/CometBFT-QA-38",
+ "cometbft/v0.38/docs/qa/CometBFT-QA-37",
+ "cometbft/v0.38/docs/qa/CometBFT-QA-34",
+ "cometbft/v0.38/docs/qa/TMCore-QA-37",
+ "cometbft/v0.38/docs/qa/TMCore-QA-34"
+ ]
+ },
+ {
+ "group": "Architecture & ADRs",
+ "pages": [
+ "cometbft/v0.38/docs/architecture/README",
+ "cometbft/v0.38/docs/architecture/adr-111-nop-mempool",
+ "cometbft/v0.38/docs/architecture/adr-template"
+ ]
+ },
+ {
+ "group": "RFCs",
+ "pages": [
+ "cometbft/v0.38/docs/rfc/README",
+ "cometbft/v0.38/docs/rfc/rfc-100-abci-vote-extension-propag",
+ "cometbft/v0.38/docs/rfc/rfc-template"
+ ]
+ }
+ ]
+ },
+ {
+ "tab": "Specification",
+ "groups": [
+ {
+ "group": "CometBFT Spec",
+ "pages": [
+ "cometbft/v0.38/spec/CometBFT-Spec"
+ ]
+ },
+ {
+ "group": "Core",
+ "pages": [
+ "cometbft/v0.38/spec/core/Overview",
+ "cometbft/v0.38/spec/core/Data_structures",
+ "cometbft/v0.38/spec/core/encoding",
+ "cometbft/v0.38/spec/core/genesis",
+ "cometbft/v0.38/spec/core/state"
+ ]
+ },
+ {
+ "group": "ABCI++",
+ "pages": [
+ "cometbft/v0.38/spec/abci/Overview",
+ "cometbft/v0.38/spec/abci/Outline",
+ "cometbft/v0.38/spec/abci/Methods",
+ "cometbft/v0.38/spec/abci/Requirements-for-the-Application",
+ "cometbft/v0.38/spec/abci/CometBFTs-expected-behavior",
+ "cometbft/v0.38/spec/abci/Client-and-server",
+ "cometbft/v0.38/spec/abci/Introduction"
+ ]
+ },
+ {
+ "group": "Consensus",
+ "pages": [
+ "cometbft/v0.38/spec/consensus/Overview",
+ "cometbft/v0.38/spec/consensus/Consensus-Paper",
+ "cometbft/v0.38/spec/consensus/Byzantine-Consensus-Algorithm",
+ "cometbft/v0.38/spec/consensus/Light-Client",
+ "cometbft/v0.38/spec/consensus/Creating-Proposal",
+ "cometbft/v0.38/spec/consensus/BFT-Time",
+ "cometbft/v0.38/spec/consensus/Proposer-Selection",
+ "cometbft/v0.38/spec/consensus/Evidence",
+ "cometbft/v0.38/spec/consensus/Validator-Signing",
+ "cometbft/v0.38/spec/consensus/WAL"
+ ]
+ },
+ {
+ "group": "Light Client",
+ "pages": [
+ "cometbft/v0.38/spec/light-client/Light-Client-Specification",
+ "cometbft/v0.38/spec/light-client/verification",
+ "cometbft/v0.38/spec/light-client/Fork-Detection",
+ "cometbft/v0.38/spec/light-client/Accountability"
+ ]
+ },
+ {
+ "group": "P2P",
+ "pages": [
+ "cometbft/v0.38/spec/p2p/Peer-to-Peer",
+ "cometbft/v0.38/spec/p2p/Implementation-of-the-p2p-layer",
+ {
+ "group": "Legacy Docs",
+ "pages": [
+ "cometbft/v0.38/spec/p2p/legacy-docs/Overview",
+ {
+ "group": "Messages",
+ "pages": [
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/Overview",
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/block-sync",
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/evidence",
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/mempool",
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/state-sync",
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/Peer-Exchange",
+ "cometbft/v0.38/spec/p2p/legacy-docs/messages/consensus"
+ ]
+ },
+ "cometbft/v0.38/spec/p2p/legacy-docs/P2P-Multiplex-Connection",
+ "cometbft/v0.38/spec/p2p/legacy-docs/Peers",
+ "cometbft/v0.38/spec/p2p/legacy-docs/P2P-Config",
+ "cometbft/v0.38/spec/p2p/legacy-docs/Peer-Discovery",
+ {
+ "group": "Reactors",
+ "pages": [
+ "cometbft/v0.38/spec/p2p/reactor-api/Reactors",
+ "cometbft/v0.38/spec/p2p/reactor-api/Reactor-Api",
+ "cometbft/v0.38/spec/p2p/reactor-api/API-for-Reactors"
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "group": "RPC",
+ "pages": [
+ "cometbft/v0.38/spec/rpc/Rpc-Spe"
+ ]
+ },
+ {
+ "group": "Blockchain",
+ "pages": [
+ "cometbft/v0.38/spec/blockchain/Blockchain"
+ ]
+ },
+ {
+ "group": "Ivy Proofs",
+ "pages": [
+ "cometbft/v0.38/spec/ivy-proofs/README"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "icon": "/assets/icons/sdk.svg"
},
{
"dropdown": "Cosmos SDK",
diff --git a/versions.json b/versions.json
index 6ff8c271..a4e11882 100644
--- a/versions.json
+++ b/versions.json
@@ -35,6 +35,14 @@
"defaultVersion": "next",
"repository": "cosmos/ibc-go",
"changelogPath": "CHANGELOG.md"
+ },
+ "cometbft": {
+ "versions": [
+ "v0.38"
+ ],
+ "defaultVersion": "v0.38",
+ "repository": "cometbft/cometbft",
+ "changelogPath": "CHANGELOG.md"
}
}
}