From 35b0268995b1605244bbe8a6de014d1f46fbe0c2 Mon Sep 17 00:00:00 2001 From: n0cte Date: Thu, 29 Jun 2023 17:56:52 +0400 Subject: [PATCH 01/14] add app creator for vm --- vm/vm.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/vm/vm.go b/vm/vm.go index a07226755..20077f996 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -133,10 +133,16 @@ type VM struct { indexerService *txindex.IndexerService clock mockable.Clock + + appCreator func(ids.ID) (abciTypes.Application, error) } func NewVM(app abciTypes.Application) *VM { - return &VM{app: app} + return &VM{app: app, appCreator: nil} +} + +func NewVMWithAppCreator(creator func(chainID ids.ID) (abciTypes.Application, error)) *VM { + return &VM{app: nil, appCreator: creator} } func (vm *VM) Initialize( @@ -150,6 +156,14 @@ func (vm *VM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { + if vm.appCreator != nil { + app, err := vm.appCreator(chainCtx.ChainID) + if err != nil { + return err + } + vm.app = app + } + vm.ctx = chainCtx vm.tmLogger = log.NewTMLogger(vm.ctx.Log) vm.dbManager = dbManager From 7ad6d64a9c77c349815b79190c72f10472bec9d6 Mon Sep 17 00:00:00 2001 From: n0cte Date: Thu, 6 Jul 2023 17:07:39 +0400 Subject: [PATCH 02/14] add another impl for vm --- types/block.go | 50 +- vm/block.go | 124 ++-- vm/database.go | 20 +- vm/funcs.go | 402 +++++++++++ vm/service.go | 76 ++- vm/vm.go | 934 ++++++++++++-------------- vm_/block.go | 95 +++ {vm => vm_}/block_utils.go | 0 {vm => vm_}/cmd/main.go | 0 {vm => vm_}/data/vm_test_genesis.json | 0 vm_/database.go | 119 ++++ {vm => vm_}/scripts/build.sh | 0 {vm => vm_}/scripts/build_test.sh | 0 vm_/service.go | 727 ++++++++++++++++++++ {vm => vm_}/service_test.go | 0 {vm => vm_}/service_utils.go | 0 vm_/vm.go | 700 +++++++++++++++++++ {vm => vm_}/vm_test.go | 0 18 files changed, 2640 insertions(+), 607 deletions(-) create mode 100644 vm/funcs.go create mode 100644 vm_/block.go rename {vm => vm_}/block_utils.go (100%) rename {vm => vm_}/cmd/main.go (100%) rename {vm => vm_}/data/vm_test_genesis.json (100%) create mode 100644 vm_/database.go rename {vm => vm_}/scripts/build.sh (100%) rename {vm => vm_}/scripts/build_test.sh (100%) create mode 100644 vm_/service.go rename {vm => vm_}/service_test.go (100%) rename {vm => vm_}/service_utils.go (100%) create mode 100644 vm_/vm.go rename {vm => vm_}/vm_test.go (100%) diff --git a/types/block.go b/types/block.go index 19f3a4ee6..84d55205b 100644 --- a/types/block.go +++ b/types/block.go @@ -68,16 +68,16 @@ func (b *Block) ValidateBasic() error { if b.LastCommit == nil { return errors.New("nil LastCommit") } - if err := b.LastCommit.ValidateBasic(); err != nil { - return fmt.Errorf("wrong LastCommit: %v", err) - } + //if err := b.LastCommit.ValidateBasic(); err != nil { + // return fmt.Errorf("wrong LastCommit: %v", err) + //} - if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", - b.LastCommit.Hash(), - b.LastCommitHash, - ) - } + //if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { + // return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", + // b.LastCommit.Hash(), + // b.LastCommitHash, + // ) + //} // NOTE: b.Data.Txs may be nil, but b.Data.Hash() still works fine. if !bytes.Equal(b.DataHash, b.Data.Hash()) { @@ -89,18 +89,18 @@ func (b *Block) ValidateBasic() error { } // NOTE: b.Evidence.Evidence may be nil, but we're just looping. - for i, ev := range b.Evidence.Evidence { - if err := ev.ValidateBasic(); err != nil { - return fmt.Errorf("invalid evidence (#%d): %v", i, err) - } - } + //for i, ev := range b.Evidence.Evidence { + // if err := ev.ValidateBasic(); err != nil { + // return fmt.Errorf("invalid evidence (#%d): %v", i, err) + // } + //} - if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", - b.EvidenceHash, - b.Evidence.Hash(), - ) - } + //if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { + // return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", + // b.EvidenceHash, + // b.Evidence.Hash(), + // ) + //} return nil } @@ -972,11 +972,11 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { } sigs := make([]CommitSig, len(cp.Signatures)) - for i := range cp.Signatures { - if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { - return nil, err - } - } + //for i := range cp.Signatures { + // if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { + // return nil, err + // } + //} commit.Signatures = sigs commit.Height = cp.Height diff --git a/vm/block.go b/vm/block.go index 771687cb2..407d86a97 100644 --- a/vm/block.go +++ b/vm/block.go @@ -2,6 +2,7 @@ package vm import ( "context" + "fmt" "time" "github.com/ava-labs/avalanchego/ids" @@ -11,85 +12,106 @@ import ( ) var ( - _ snowman.Block = &Block{} + _ choices.Decidable = (*Block)(nil) + _ snowman.Block = (*Block)(nil) ) -// Block implements the snowman.Block interface type Block struct { - id ids.ID - tmBlock *types.Block - vm *VM - status choices.Status + *types.Block + st choices.Status + vm *VM } -// newBlock returns a new Block wrapping the Tendermint Block type and implementing the snowman.Block interface -func (vm *VM) newBlock(tmBlock *types.Block) (*Block, error) { - var id ids.ID - copy(id[:], tmBlock.Hash()) - - return &Block{ - id: id, - tmBlock: tmBlock, - vm: vm, - }, nil +func NewBlock(vm *VM, block *types.Block, st choices.Status) *Block { + return &Block{Block: block, vm: vm, st: st} } -func (b *Block) ID() ids.ID { - return b.id +// ID returns a unique ID for this element. +// +// Typically, this is implemented by using a cryptographic hash of a +// binary representation of this element. An element should return the same +// IDs upon repeated calls. +func (block *Block) ID() ids.ID { + var id ids.ID + copy(id[:], block.Hash()) + return id } -func (b *Block) Accept(ctx context.Context) error { - b.SetStatus(choices.Accepted) - return b.vm.applyBlock(b) +// Accept this element. +// +// This element will be accepted by every correct node in the network. +func (block *Block) Accept(context.Context) error { + block.vm.log.Debug("try to accept block", "block", block.ID()) + block.st = choices.Accepted + return block.vm.applyBlock(block) } -func (b *Block) Reject(ctx context.Context) error { - b.SetStatus(choices.Rejected) - - return nil +// Reject this element. +// +// This element will not be accepted by any correct node in the network. +func (block *Block) Reject(context.Context) error { + block.vm.log.Debug("try to reject block", "block", block.ID()) + block.st = choices.Rejected + panic("implement me") } -func (b *Block) SetStatus(status choices.Status) { - b.status = status +// Status returns this element's current status. +// +// If Accept has been called on an element with this ID, Accepted should be +// returned. Similarly, if Reject has been called on an element with this +// ID, Rejected should be returned. If the contents of this element are +// unknown, then Unknown should be returned. Otherwise, Processing should be +// returned. +// +// TODO: Consider allowing Status to return an error. +func (block *Block) Status() choices.Status { + return block.st } -func (b *Block) Status() choices.Status { - return b.status -} - -func (b *Block) Parent() ids.ID { +// Parent returns the ID of this block's parent. +func (block *Block) Parent() ids.ID { var id ids.ID - parentHash := b.tmBlock.Header.LastBlockID.Hash - copy(id[:], parentHash) - + copy(id[:], block.LastBlockID.Hash.Bytes()) return id } -func (b *Block) Verify(context.Context) error { - if b == nil || b.tmBlock == nil { - return errInvalidBlock - } - - return b.tmBlock.ValidateBasic() +// Verify that the state transition this block would make if accepted is +// valid. If the state transition is invalid, a non-nil error should be +// returned. +// +// It is guaranteed that the Parent has been successfully verified. +// +// If nil is returned, it is guaranteed that either Accept or Reject will be +// called on this block, unless the VM is shut down. +func (block *Block) Verify(context.Context) error { + return block.ValidateBasic() } -func (b *Block) Bytes() []byte { - block, err := b.tmBlock.ToProto() +// Bytes returns the binary representation of this block. +// +// This is used for sending blocks to peers. The bytes should be able to be +// parsed into the same block on another node. +func (block *Block) Bytes() []byte { + b, err := block.ToProto() if err != nil { - panic(err) + panic(fmt.Sprintf("can't convert block to proto obj: %s", err)) } - data, err := block.Marshal() + data, err := b.Marshal() if err != nil { - panic(err) + panic(fmt.Sprintf("can't serialize block: %s", err)) } - return data } -func (b *Block) Height() uint64 { - return uint64(b.tmBlock.Height) +// Height returns the height of this block in the chain. +func (block *Block) Height() uint64 { + return uint64(block.Block.Height) } -func (b *Block) Timestamp() time.Time { - return b.tmBlock.Time +// Time this block was proposed at. This value should be consistent across +// all nodes. If this block hasn't been successfully verified, any value can +// be returned. If this block is the last accepted block, the timestamp must +// be returned correctly. Otherwise, accepted blocks can return any value. +func (block *Block) Timestamp() time.Time { + return block.Block.Time } diff --git a/vm/database.go b/vm/database.go index 07b62ff18..d136c1c8b 100644 --- a/vm/database.go +++ b/vm/database.go @@ -6,7 +6,7 @@ import ( ) var ( - _ dbm.DB = &Database{} + _ dbm.DB = (*Database)(nil) ) type ( @@ -24,6 +24,20 @@ type ( } ) +func NewDB(db database.Database) *Database { + return &Database{ + Database: db, + } +} + +func (db Database) Close() error { + return db.Database.Close() +} + +func (db Database) Has(key []byte) (bool, error) { + return db.Database.Has(key) +} + func (db Database) Get(key []byte) ([]byte, error) { res, err := db.Database.Get(key) if err != nil { @@ -43,6 +57,10 @@ func (db Database) SetSync(key []byte, value []byte) error { return db.Database.Put(key, value) } +func (db Database) Delete(key []byte) error { + return db.Database.Delete(key) +} + func (db Database) DeleteSync(key []byte) error { return db.Database.Delete(key) } diff --git a/vm/funcs.go b/vm/funcs.go new file mode 100644 index 000000000..c43c1d736 --- /dev/null +++ b/vm/funcs.go @@ -0,0 +1,402 @@ +package vm + +import ( + "errors" + "fmt" + "time" + + abci "github.com/consideritdone/landslidecore/abci/types" + "github.com/consideritdone/landslidecore/crypto" + "github.com/consideritdone/landslidecore/libs/log" + tmmath "github.com/consideritdone/landslidecore/libs/math" + mempl "github.com/consideritdone/landslidecore/mempool" + "github.com/consideritdone/landslidecore/node" + tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" + "github.com/consideritdone/landslidecore/proxy" + "github.com/consideritdone/landslidecore/rpc/client" + coretypes "github.com/consideritdone/landslidecore/rpc/core/types" + "github.com/consideritdone/landslidecore/state" + "github.com/consideritdone/landslidecore/store" + "github.com/consideritdone/landslidecore/types" +) + +var ( + // see README + defaultPerPage = 30 + maxPerPage = 100 +) + +func NewLocalGenesisDocProvider(data []byte) node.GenesisDocProvider { + return func() (*types.GenesisDoc, error) { + return types.GenesisDocFromJSON(data) + } +} + +func makeCommitMock(height int64, timestamp time.Time) *types.Commit { + var commitSig []types.CommitSig = nil + if height != 1 { + commitSig = []types.CommitSig{{Timestamp: time.Now()}} + } + return types.NewCommit( + height, + 0, + types.BlockID{ + Hash: []byte(""), + PartSetHeader: types.PartSetHeader{ + Hash: []byte(""), + Total: 1, + }, + }, + commitSig, + ) +} + +func validateBlock(state state.State, block *types.Block) error { + // Validate internal consistency. + if err := block.ValidateBasic(); err != nil { + return err + } + + // Validate basic info. + if block.Version.App != state.Version.Consensus.App || + block.Version.Block != state.Version.Consensus.Block { + return fmt.Errorf("wrong Block.Header.Version. Expected %v, got %v", + state.Version.Consensus, + block.Version, + ) + } + if block.ChainID != state.ChainID { + return fmt.Errorf("wrong Block.Header.ChainID. Expected %v, got %v", + state.ChainID, + block.ChainID, + ) + } + + // Validate block LastCommit. + if block.Height == state.InitialHeight { + if len(block.LastCommit.Signatures) != 0 { + return errors.New("initial block can't have LastCommit signatures") + } + } + + // NOTE: We can't actually verify it's the right proposer because we don't + // know what round the block was first proposed. So just check that it's + // a legit address and a known validator. + if len(block.ProposerAddress) != crypto.AddressSize { + return fmt.Errorf("expected ProposerAddress size %d, got %d", + crypto.AddressSize, + len(block.ProposerAddress), + ) + } + + // Validate block Time + switch { + case block.Height > state.InitialHeight: + if !(block.Time.After(state.LastBlockTime) || block.Time.Equal(state.LastBlockTime)) { + return fmt.Errorf("block time %v not greater than or equal to last block time %v", + block.Time, + state.LastBlockTime, + ) + } + + case block.Height == state.InitialHeight: + genesisTime := state.LastBlockTime + if !block.Time.Equal(genesisTime) { + return fmt.Errorf("block time %v is not equal to genesis time %v", + block.Time, + genesisTime, + ) + } + + default: + return fmt.Errorf("block height %v lower than initial height %v", + block.Height, state.InitialHeight) + } + + return nil +} + +func execBlockOnProxyApp( + logger log.Logger, + proxyAppConn proxy.AppConnConsensus, + block *types.Block, + store state.Store, + initialHeight int64, +) (*tmstate.ABCIResponses, error) { + var validTxs, invalidTxs = 0, 0 + + txIndex := 0 + abciResponses := new(tmstate.ABCIResponses) + dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) + abciResponses.DeliverTxs = dtxs + + // Execute transactions and get hash. + proxyCb := func(req *abci.Request, res *abci.Response) { + if r, ok := res.Value.(*abci.Response_DeliverTx); ok { + // TODO: make use of res.Log + // TODO: make use of this info + // Blocks may include invalid txs. + txRes := r.DeliverTx + if txRes.Code == abci.CodeTypeOK { + validTxs++ + } else { + logger.Debug("invalid tx", "code", txRes.Code, "log", txRes.Log) + invalidTxs++ + } + + abciResponses.DeliverTxs[txIndex] = txRes + txIndex++ + } + } + proxyAppConn.SetResponseCallback(proxyCb) + + commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight) + + byzVals := make([]abci.Evidence, 0) + for _, evidence := range block.Evidence.Evidence { + byzVals = append(byzVals, evidence.ABCI()...) + } + + // Begin block + var err error + pbh := block.Header.ToProto() + if pbh == nil { + return nil, errors.New("nil header") + } + + abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ + Hash: block.Hash(), + Header: *pbh, + LastCommitInfo: commitInfo, + ByzantineValidators: byzVals, + }) + if err != nil { + logger.Error("error in proxyAppConn.BeginBlock", "err", err) + return nil, err + } + + // run txs of block + for _, tx := range block.Txs { + proxyAppConn.DeliverTxAsync(abci.RequestDeliverTx{Tx: tx}) + if err := proxyAppConn.Error(); err != nil { + return nil, err + } + } + + // End block. + abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{Height: block.Height}) + if err != nil { + logger.Error("error in proxyAppConn.EndBlock", "err", err) + return nil, err + } + + logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs) + return abciResponses, nil +} + +func getBeginBlockValidatorInfo(block *types.Block, store state.Store, initialHeight int64) abci.LastCommitInfo { + voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) + return abci.LastCommitInfo{ + Round: block.LastCommit.Round, + Votes: voteInfos, + } +} + +func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { + return types.NewResults(ar.DeliverTxs).Hash() +} + +func updateState( + st state.State, + blockID types.BlockID, + header *types.Header, + abciResponses *tmstate.ABCIResponses, +) (state.State, error) { + return state.State{ + Version: st.Version, + ChainID: st.ChainID, + InitialHeight: st.InitialHeight, + LastBlockHeight: header.Height, + LastBlockID: blockID, + LastBlockTime: header.Time, + LastResultsHash: ABCIResponsesResultsHash(abciResponses), + AppHash: nil, + }, nil +} + +// TxPreCheck returns a function to filter transactions before processing. +// The function limits the size of a transaction to the block's maximum data size. +func TxPreCheck(state state.State) mempl.PreCheckFunc { + maxDataBytes := types.MaxDataBytesNoEvidence( + 22020096, + 1, + ) + return mempl.PreCheckMaxBytes(maxDataBytes) +} + +// TxPostCheck returns a function to filter transactions after processing. +// The function limits the gas wanted by a transaction to the block's maximum total gas. +func TxPostCheck(state state.State) mempl.PostCheckFunc { + return mempl.PostCheckMaxGas(-1) +} + +func fireEvents( + logger log.Logger, + eventBus types.BlockEventPublisher, + block *types.Block, + abciResponses *tmstate.ABCIResponses, +) { + if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + Block: block, + ResultBeginBlock: *abciResponses.BeginBlock, + ResultEndBlock: *abciResponses.EndBlock, + }); err != nil { + logger.Error("failed publishing new block", "err", err) + } + + if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + Header: block.Header, + NumTxs: int64(len(block.Txs)), + ResultBeginBlock: *abciResponses.BeginBlock, + ResultEndBlock: *abciResponses.EndBlock, + }); err != nil { + logger.Error("failed publishing new block header", "err", err) + } + + if len(block.Evidence.Evidence) != 0 { + for _, ev := range block.Evidence.Evidence { + if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ + Evidence: ev, + Height: block.Height, + }); err != nil { + logger.Error("failed publishing new evidence", "err", err) + } + } + } + + for i, tx := range block.Data.Txs { + if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ + Height: block.Height, + Index: uint32(i), + Tx: tx, + Result: *(abciResponses.DeliverTxs[i]), + }}); err != nil { + logger.Error("failed publishing event TX", "err", err) + } + } +} + +// bsHeight can be either latest committed or uncommitted (+1) height. +func getHeight(bs *store.BlockStore, heightPtr *int64) (int64, error) { + bsHeight := bs.Height() + bsBase := bs.Base() + if heightPtr != nil { + height := *heightPtr + if height <= 0 { + return 0, fmt.Errorf("height must be greater than 0, but got %d", height) + } + if height > bsHeight { + return 0, fmt.Errorf("height %d must be less than or equal to the current blockchain height %d", height, bsHeight) + } + if height < bsBase { + return 0, fmt.Errorf("height %d is not available, lowest height is %d", height, bsBase) + } + return height, nil + } + return bsHeight, nil +} + +func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { + if perPage < 1 { + panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) + } + + if pagePtr == nil { // no page parameter + return 1, nil + } + + pages := ((totalCount - 1) / perPage) + 1 + if pages == 0 { + pages = 1 // one page (even if it's empty) + } + page := *pagePtr + if page <= 0 || page > pages { + return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) + } + + return page, nil +} + +func validatePerPage(perPagePtr *int) int { + if perPagePtr == nil { // no per_page parameter + return defaultPerPage + } + + perPage := *perPagePtr + if perPage < 1 { + return defaultPerPage + } else if perPage > maxPerPage { + return maxPerPage + } + return perPage +} + +func validateSkipCount(page, perPage int) int { + skipCount := (page - 1) * perPage + if skipCount < 0 { + return 0 + } + return skipCount +} + +// filterMinMax returns error if either min or max are negative or min > max +// if 0, use blockstore base for min, latest block height for max +// enforce limit. +func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { + // filter negatives + if min < 0 || max < 0 { + return min, max, fmt.Errorf("heights must be non-negative") + } + + // adjust for default values + if min == 0 { + min = 1 + } + if max == 0 { + max = height + } + + // limit max to the height + max = tmmath.MinInt64(height, max) + + // limit min to the base + min = tmmath.MaxInt64(base, min) + + // limit min to within `limit` of max + // so the total number of blocks returned will be `limit` + min = tmmath.MaxInt64(min, max-limit+1) + + if min > max { + return min, max, fmt.Errorf("min height %d can't be greater than max height %d", min, max) + } + return min, max, nil +} + +func WaitForHeight(c Service, h int64, waiter client.Waiter) error { + if waiter == nil { + waiter = client.DefaultWaitStrategy + } + delta := int64(1) + for delta > 0 { + r := new(coretypes.ResultStatus) + if err := c.Status(nil, nil, r); err != nil { + return err + } + delta = h - r.SyncInfo.LatestBlockHeight + // wait for the time, or abort early + if err := waiter(delta); err != nil { + return err + } + } + return nil +} diff --git a/vm/service.go b/vm/service.go index 7caa1bcbd..9a3a5c7d6 100644 --- a/vm/service.go +++ b/vm/service.go @@ -85,8 +85,8 @@ type ( } TxArgs struct { - Hash []byte `json:"hash"` - Prove bool `json:"prove"` + Hash tmbytes.HexBytes `json:"hash"` + Prove bool `json:"prove"` } TxSearchArgs struct { @@ -170,7 +170,7 @@ func NewService(vm *VM) Service { } func (s *LocalService) ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error { - resInfo, err := s.vm.proxyApp.Query().InfoSync(proxy.RequestInfo) + resInfo, err := s.vm.app.Query().InfoSync(proxy.RequestInfo) if err != nil { return err } @@ -187,7 +187,7 @@ func (s *LocalService) ABCIQueryWithOptions( args *ABCIQueryWithOptionsArgs, reply *ctypes.ResultABCIQuery, ) error { - resQuery, err := s.vm.proxyApp.Query().QuerySync(abci.RequestQuery{ + resQuery, err := s.vm.app.Query().QuerySync(abci.RequestQuery{ Path: args.Path, Data: args.Data, Height: args.Opts.Height, @@ -215,13 +215,13 @@ func (s *LocalService) BroadcastTxCommit( deliverTxSub, err := s.vm.eventBus.Subscribe(subCtx, subscriber, q) if err != nil { err = fmt.Errorf("failed to subscribe to tx: %w", err) - s.vm.tmLogger.Error("Error on broadcast_tx_commit", "err", err) + s.vm.log.Error("Error on broadcast_tx_commit", "err", err) return err } defer func() { if err := s.vm.eventBus.Unsubscribe(context.Background(), subscriber, q); err != nil { - s.vm.tmLogger.Error("Error unsubscribing from eventBus", "err", err) + s.vm.log.Error("Error unsubscribing from eventBus", "err", err) } }() @@ -231,7 +231,7 @@ func (s *LocalService) BroadcastTxCommit( checkTxResCh <- res }, mempl.TxInfo{}) if err != nil { - s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) + s.vm.log.Error("Error on broadcastTxCommit", "err", err) return fmt.Errorf("error on broadcastTxCommit: %v", err) } checkTxResMsg := <-checkTxResCh @@ -264,12 +264,12 @@ func (s *LocalService) BroadcastTxCommit( reason = deliverTxSub.Err().Error() } err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) - s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) + s.vm.log.Error("Error on broadcastTxCommit", "err", err) return err // TODO: use config for timeout case <-time.After(10 * time.Second): err = errors.New("timed out waiting for tx to be included in a block") - s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) + s.vm.log.Error("Error on broadcastTxCommit", "err", err) return err } } @@ -290,7 +290,7 @@ func (s *LocalService) BroadcastTxAsync( func (s *LocalService) BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error { resCh := make(chan *abci.Response, 1) err := s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { - s.vm.tmLogger.With("module", "service").Debug("handled response from checkTx") + s.vm.log.With("module", "service").Debug("handled response from checkTx") resCh <- res }, mempl.TxInfo{}) if err != nil { @@ -404,10 +404,12 @@ func (s *LocalService) Validators(_ *http.Request, args *ValidatorsArgs, reply * } func (s *LocalService) Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error { + s.vm.log.Debug("query tx", "hash", args.Hash) r, err := s.vm.txIndexer.Get(args.Hash) if err != nil { return err } + s.vm.log.Debug("query tx", "r", args.Hash) if r == nil { return fmt.Errorf("tx (%X) not found", args.Hash) @@ -584,7 +586,7 @@ func (s *LocalService) BlockchainInfo( if err != nil { return err } - s.vm.tmLogger.Debug("BlockchainInfoHandler", "maxHeight", args.MaxHeight, "minHeight", args.MinHeight) + s.vm.log.Debug("BlockchainInfoHandler", "maxHeight", args.MaxHeight, "minHeight", args.MinHeight) var blockMetas []*types.BlockMeta for height := args.MaxHeight; height >= args.MinHeight; height-- { @@ -598,32 +600,32 @@ func (s *LocalService) BlockchainInfo( } func (s *LocalService) Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error { - if len(s.vm.genChunks) > 1 { - return errors.New("genesis response is large, please use the genesis_chunked API instead") - } - - reply.Genesis = s.vm.genesis + //if len(s.vm.genChunks) > 1 { + // return errors.New("genesis response is large, please use the genesis_chunked API instead") + //} + // + //reply.Genesis = s.vm.genesis return nil } func (s *LocalService) GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error { - if s.vm.genChunks == nil { - return fmt.Errorf("service configuration error, genesis chunks are not initialized") - } - - if len(s.vm.genChunks) == 0 { - return fmt.Errorf("service configuration error, there are no chunks") - } - - id := int(args.Chunk) - - if id > len(s.vm.genChunks)-1 { - return fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) - } - - reply.TotalChunks = len(s.vm.genChunks) - reply.ChunkNumber = id - reply.Data = s.vm.genChunks[id] + //if s.vm.genChunks == nil { + // return fmt.Errorf("service configuration error, genesis chunks are not initialized") + //} + // + //if len(s.vm.genChunks) == 0 { + // return fmt.Errorf("service configuration error, there are no chunks") + //} + // + //id := int(args.Chunk) + // + //if id > len(s.vm.genChunks)-1 { + // return fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) + //} + // + //reply.TotalChunks = len(s.vm.genChunks) + //reply.ChunkNumber = id + //reply.Data = s.vm.genChunks[id] return nil } @@ -659,8 +661,8 @@ func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.Result } reply.NodeInfo = p2p.DefaultNodeInfo{ - DefaultNodeID: p2p.ID(s.vm.ctx.NodeID.String()), - Network: fmt.Sprintf("%d", s.vm.ctx.NetworkID), + DefaultNodeID: p2p.ID(s.vm.chainCtx.NodeID.String()), + Network: fmt.Sprintf("%d", s.vm.chainCtx.NetworkID), } reply.SyncInfo = ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, @@ -692,7 +694,7 @@ func (s *LocalService) ConsensusState(_ *http.Request, _ *struct{}, reply *ctype func (s *LocalService) ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error { reply.BlockHeight = s.vm.blockStore.Height() - reply.ConsensusParams = *s.vm.genesis.ConsensusParams + //reply.ConsensusParams = *s.vm.genesis.ConsensusParams return nil } @@ -718,7 +720,7 @@ func (s *LocalService) NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ct } func (s *LocalService) CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error { - res, err := s.vm.proxyApp.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: args.Tx}) + res, err := s.vm.app.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: args.Tx}) if err != nil { return err } diff --git a/vm/vm.go b/vm/vm.go index 20077f996..ce023b5fc 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -2,14 +2,14 @@ package vm import ( "context" - "encoding/base64" "errors" "fmt" "net/http" "time" - "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/database" + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -18,26 +18,23 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" - dbm "github.com/tendermint/tm-db" abciTypes "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/config" - cs "github.com/consideritdone/landslidecore/consensus" - tmjson "github.com/consideritdone/landslidecore/libs/json" + "github.com/consideritdone/landslidecore/consensus" "github.com/consideritdone/landslidecore/libs/log" + "github.com/consideritdone/landslidecore/mempool" mempl "github.com/consideritdone/landslidecore/mempool" "github.com/consideritdone/landslidecore/node" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/proxy" rpccore "github.com/consideritdone/landslidecore/rpc/core" rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server" - sm "github.com/consideritdone/landslidecore/state" + "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/state/indexer" blockidxkv "github.com/consideritdone/landslidecore/state/indexer/block/kv" "github.com/consideritdone/landslidecore/state/txindex" @@ -46,16 +43,6 @@ import ( "github.com/consideritdone/landslidecore/types" ) -var ( - _ block.ChainVM = &VM{} - - Version = &version.Semantic{ - Major: 0, - Minor: 1, - Patch: 1, - } -) - const ( Name = "landslide" @@ -63,90 +50,240 @@ const ( missingCacheSize = 50 unverifiedCacheSize = 50 - // genesisChunkSize is the maximum size, in bytes, of each - // chunk in the genesis structure for the chunked API genesisChunkSize = 16 * 1024 * 1024 // 16 ) var ( - chainStateMetricsPrefix = "chain_state" - - lastAcceptedKey = []byte("last_accepted_key") - blockStoreDBPrefix = []byte("blockstore") - stateDBPrefix = []byte("state") - txIndexerDBPrefix = []byte("tx_index") - blockIndexerDBPrefix = []byte("block_events") + Version = version.Semantic{ + Major: 0, + Minor: 1, + Patch: 2, + } + _ common.NetworkAppHandler = (*VM)(nil) + _ common.CrossChainAppHandler = (*VM)(nil) + _ common.AppHandler = (*VM)(nil) + _ health.Checker = (*VM)(nil) + _ validators.Connector = (*VM)(nil) + _ common.VM = (*VM)(nil) + _ block.Getter = (*VM)(nil) + _ block.Parser = (*VM)(nil) + _ block.ChainVM = (*VM)(nil) + + dbPrefixBlockStore = []byte("block-store") + dbPrefixStateStore = []byte("state-store") + dbPrefixTxIndexer = []byte("tx-indexer") + dbPrefixBlockIndexer = []byte("block-indexer") proposerAddress = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} ) -var ( - errInvalidBlock = errors.New("invalid block") - errNoPendingTxs = errors.New("there is no txs to include to block") -) - -type VM struct { - ctx *snow.Context - dbManager manager.Manager - - toEngine chan<- common.Message - - // *chain.State helps to implement the VM interface by wrapping blocks - // with an efficient caching layer. - *chain.State - - tmLogger log.Logger - - blockStoreDB dbm.DB - blockStore *store.BlockStore - - stateDB dbm.DB - stateStore sm.Store - tmState *sm.State +type ( + AppCreator func(ids.ID) (abciTypes.Application, error) - mempool mempl.Mempool + VM struct { + appCreator AppCreator + app proxy.AppConns - // Tendermint Application - app abciTypes.Application + log log.Logger + chainCtx *snow.Context + toEngine chan<- common.Message - // Tendermint proxy app - proxyApp proxy.AppConns + verifiedBlocks map[ids.ID]*Block + blockStore *store.BlockStore + stateStore state.Store + state state.State + genesis *types.GenesisDoc - // EventBus is a common bus for all events going through the system. - eventBus *types.EventBus + mempool *mempool.CListMempool + eventBus *types.EventBus - // [acceptedBlockDB] is the database to store the last accepted - // block. - acceptedBlockDB database.Database + txIndexer txindex.TxIndexer + blockIndexer indexer.BlockIndexer + indexerService *txindex.IndexerService - genesis *types.GenesisDoc - // cache of chunked genesis data. - genChunks []string - - // Metrics - multiGatherer metrics.MultiGatherer - - txIndexer txindex.TxIndexer - txIndexerDB dbm.DB - blockIndexer indexer.BlockIndexer - blockIndexerDB dbm.DB - indexerService *txindex.IndexerService - - clock mockable.Clock + bootstrapped utils.Atomic[bool] + preferred ids.ID + } +) - appCreator func(ids.ID) (abciTypes.Application, error) +func New(appCreator AppCreator) *VM { + return &VM{ + appCreator: appCreator, + app: nil, + } +} + +// Notify this engine of a request for data from [nodeID]. +// +// The meaning of [request], and what should be sent in response to it, is +// application (VM) specific. +// +// It is not guaranteed that: +// * [request] is well-formed/valid. +// +// This node should typically send an AppResponse to [nodeID] in response to +// a valid message using the same request ID before the deadline. However, +// the VM may arbitrarily choose to not send a response to this request. +func (vm *VM) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + panic("implement me") +} + +// Notify this engine that an AppRequest message it sent to [nodeID] with +// request ID [requestID] failed. +// +// This may be because the request timed out or because the message couldn't +// be sent to [nodeID]. +// +// It is guaranteed that: +// * This engine sent a request to [nodeID] with ID [requestID]. +// * AppRequestFailed([nodeID], [requestID]) has not already been called. +// * AppResponse([nodeID], [requestID]) has not already been called. +func (vm *VM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + panic("implement me") +} + +// Notify this engine of a response to the AppRequest message it sent to +// [nodeID] with request ID [requestID]. +// +// The meaning of [response] is application (VM) specifc. +// +// It is guaranteed that: +// * This engine sent a request to [nodeID] with ID [requestID]. +// * AppRequestFailed([nodeID], [requestID]) has not already been called. +// * AppResponse([nodeID], [requestID]) has not already been called. +// +// It is not guaranteed that: +// * [response] contains the expected response +// * [response] is well-formed/valid. +// +// If [response] is invalid or not the expected response, the VM chooses how +// to react. For example, the VM may send another AppRequest, or it may give +// up trying to get the requested information. +func (vm *VM) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + panic("implement me") +} + +// Notify this engine of a gossip message from [nodeID]. +// +// The meaning of [msg] is application (VM) specific, and the VM defines how +// to react to this message. +// +// This message is not expected in response to any event, and it does not +// need to be responded to. +// +// A node may gossip the same message multiple times. That is, +// AppGossip([nodeID], [msg]) may be called multiple times. +func (vm *VM) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { + panic("implement me") +} + +// CrossChainAppRequest Notify this engine of a request for data from +// [chainID]. +// +// The meaning of [request], and what should be sent in response to it, is +// application (VM) specific. +// +// Guarantees surrounding the request are specific to the implementation of +// the requesting VM. For example, the request may or may not be guaranteed +// to be well-formed/valid depending on the implementation of the requesting +// VM. +// +// This node should typically send a CrossChainAppResponse to [chainID] in +// response to a valid message using the same request ID before the +// deadline. However, the VM may arbitrarily choose to not send a response +// to this request. +func (vm *VM) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + panic("implement me") +} + +// CrossChainAppRequestFailed notifies this engine that a +// CrossChainAppRequest message it sent to [chainID] with request ID +// [requestID] failed. +// +// This may be because the request timed out or because the message couldn't +// be sent to [chainID]. +// +// It is guaranteed that: +// * This engine sent a request to [chainID] with ID [requestID]. +// * CrossChainAppRequestFailed([chainID], [requestID]) has not already been +// called. +// * CrossChainAppResponse([chainID], [requestID]) has not already been +// called. +func (vm *VM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { + panic("implement me") +} + +// CrossChainAppResponse notifies this engine of a response to the +// CrossChainAppRequest message it sent to [chainID] with request ID +// [requestID]. +// +// The meaning of [response] is application (VM) specific. +// +// It is guaranteed that: +// * This engine sent a request to [chainID] with ID [requestID]. +// * CrossChainAppRequestFailed([chainID], [requestID]) has not already been +// called. +// * CrossChainAppResponse([chainID], [requestID]) has not already been +// called. +// +// Guarantees surrounding the response are specific to the implementation of +// the responding VM. For example, the response may or may not be guaranteed +// to be well-formed/valid depending on the implementation of the requesting +// VM. +// +// If [response] is invalid or not the expected response, the VM chooses how +// to react. For example, the VM may send another CrossChainAppRequest, or +// it may give up trying to get the requested information. +func (vm *VM) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + panic("implement me") +} + +// HealthCheck returns health check results and, if not healthy, a non-nil +// error +// +// It is expected that the results are json marshallable. +func (vm *VM) HealthCheck(context.Context) (interface{}, error) { + return nil, nil } -func NewVM(app abciTypes.Application) *VM { - return &VM{app: app, appCreator: nil} +// Connector represents a handler that is called when a connection is marked as connected +func (vm *VM) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + vm.log.Info("connected", "nodeID", nodeID.String(), "nodeVersion", nodeVersion.String()) + return nil } -func NewVMWithAppCreator(creator func(chainID ids.ID) (abciTypes.Application, error)) *VM { - return &VM{app: nil, appCreator: creator} +// Connector represents a handler that is called when a connection is marked as disconnected +func (vm *VM) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + vm.log.Info("disconnected", "nodeID", nodeID.String()) + return nil } +// Initialize this VM. +// [chainCtx]: Metadata about this VM. +// +// [chainCtx.networkID]: The ID of the network this VM's chain is +// running on. +// [chainCtx.chainID]: The unique ID of the chain this VM is running on. +// [chainCtx.Log]: Used to log messages +// [chainCtx.NodeID]: The unique staker ID of this node. +// [chainCtx.Lock]: A Read/Write lock shared by this VM and the +// consensus engine that manages this VM. The write +// lock is held whenever code in the consensus engine +// calls the VM. +// +// [dbManager]: The manager of the database this VM will persist data to. +// [genesisBytes]: The byte-encoding of the genesis information of this +// +// VM. The VM uses it to initialize its state. For +// example, if this VM were an account-based payments +// system, `genesisBytes` would probably contain a genesis +// transaction that gives coins to some accounts, and this +// transaction would be in the genesis block. +// +// [toEngine]: The channel used to send messages to the consensus engine. +// [fxs]: Feature extensions that attach to this VM. func (vm *VM) Initialize( - _ context.Context, + ctx context.Context, chainCtx *snow.Context, dbManager manager.Manager, genesisBytes []byte, @@ -156,309 +293,276 @@ func (vm *VM) Initialize( fxs []*common.Fx, appSender common.AppSender, ) error { - if vm.appCreator != nil { - app, err := vm.appCreator(chainCtx.ChainID) - if err != nil { - return err - } - vm.app = app - } - - vm.ctx = chainCtx - vm.tmLogger = log.NewTMLogger(vm.ctx.Log) - vm.dbManager = dbManager - + vm.chainCtx = chainCtx vm.toEngine = toEngine + vm.log = log.NewTMLogger(vm.chainCtx.Log).With("module", "vm") + vm.verifiedBlocks = make(map[ids.ID]*Block) - baseDB := dbManager.Current().Database - - vm.blockStoreDB = Database{prefixdb.NewNested(blockStoreDBPrefix, baseDB)} - vm.blockStore = store.NewBlockStore(vm.blockStoreDB) - - vm.stateDB = Database{prefixdb.NewNested(stateDBPrefix, baseDB)} - vm.stateStore = sm.NewStore(vm.stateDB) + db := dbManager.Current().Database - if err := vm.initGenesis(genesisBytes); err != nil { - return err - } + dbBlockStore := NewDB(prefixdb.NewNested(dbPrefixBlockStore, db)) + vm.blockStore = store.NewBlockStore(dbBlockStore) - if err := vm.initGenesisChunks(); err != nil { - return err - } + dbStateStore := NewDB(prefixdb.NewNested(dbPrefixStateStore, db)) + vm.stateStore = state.NewStore(dbStateStore) - state, err := vm.stateStore.LoadFromDBOrGenesisDoc(vm.genesis) + app, err := vm.appCreator(chainCtx.ChainID) if err != nil { - return fmt.Errorf("failed to load tmState from genesis: %w ", err) - } - vm.tmState = &state - - // genesis only - if vm.tmState.LastBlockHeight == 0 { - // TODO use decoded/encoded genesis bytes - block, partSet := vm.tmState.MakeBlock(1, []types.Tx{genesisBytes}, nil, nil, nil) - vm.tmLogger.Info("init block", "b", block, "part set", partSet) - } - - //vm.genesisHash = vm.ethConfig.Genesis.ToBlock(nil).Hash() // must create genesis hash before [vm.readLastAccepted] - - // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := node.CreateAndStartProxyAppConns(proxy.NewLocalClientCreator(vm.app), vm.tmLogger) - if err != nil { - return fmt.Errorf("failed to create and start proxy app: %w ", err) - } - vm.proxyApp = proxyApp - - // Create EventBus - eventBus, err := node.CreateAndStartEventBus(vm.tmLogger) - if err != nil { - return fmt.Errorf("failed to create and start event bus: %w ", err) - } - vm.eventBus = eventBus - - vm.txIndexerDB = Database{prefixdb.NewNested(txIndexerDBPrefix, baseDB)} - vm.txIndexer = txidxkv.NewTxIndex(vm.txIndexerDB) - vm.blockIndexerDB = Database{prefixdb.NewNested(blockIndexerDBPrefix, baseDB)} - vm.blockIndexer = blockidxkv.New(vm.blockIndexerDB) - vm.indexerService = txindex.NewIndexerService(vm.txIndexer, vm.blockIndexer, eventBus) - vm.indexerService.SetLogger(vm.tmLogger.With("module", "txindex")) - - if err := vm.indexerService.Start(); err != nil { - return err - } - - if err := vm.doHandshake(vm.genesis, vm.tmLogger.With("module", "consensus")); err != nil { return err } - state, err = vm.stateStore.Load() + vm.state, vm.genesis, err = node.LoadStateFromDBOrGenesisDocProvider( + dbStateStore, + NewLocalGenesisDocProvider(genesisBytes), + ) if err != nil { - return fmt.Errorf("failed to load tmState: %w ", err) + return nil } - vm.tmState = &state - genesisBlock, err := vm.buildGenesisBlock(genesisBytes) + vm.app, err = node.CreateAndStartProxyAppConns(proxy.NewLocalClientCreator(app), vm.log) if err != nil { - return fmt.Errorf("failed to build genesis block: %w ", err) - } - - vm.mempool = vm.createMempool() - - if err := vm.initializeMetrics(); err != nil { return err } - if err := vm.initChainState(genesisBlock); err != nil { + vm.eventBus, err = node.CreateAndStartEventBus(vm.log) + if err != nil { return err } - return nil -} - -// builds genesis block if required -func (vm *VM) buildGenesisBlock(genesisData []byte) (*types.Block, error) { - if vm.tmState.LastBlockHeight != 0 { - return nil, nil - } - txs := types.Txs{types.Tx(genesisData)} - if len(txs) == 0 { - return nil, errNoPendingTxs - } - height := vm.tmState.LastBlockHeight + 1 + dbTxIndexer := NewDB(prefixdb.NewNested(dbPrefixTxIndexer, db)) + vm.txIndexer = txidxkv.NewTxIndex(dbTxIndexer) - commit := makeCommitMock(height, time.Now()) - genesisBlock, _ := vm.tmState.MakeBlock(height, txs, commit, nil, proposerAddress) - return genesisBlock, nil -} + dbBlockIndexer := NewDB(prefixdb.NewNested(dbPrefixBlockIndexer, db)) + vm.blockIndexer = blockidxkv.New(dbBlockIndexer) -// Initializes Genesis if required -func (vm *VM) initGenesis(genesisData []byte) error { - // load genesis from database - genesis, err := node.LoadGenesisDoc(vm.stateDB) - // genesis not found in database - if err != nil { - if err == node.ErrNoGenesisDoc { - // get it from json - genesis, err = types.GenesisDocFromJSON(genesisData) - if err != nil { - return fmt.Errorf("failed to decode genesis bytes: %w ", err) - } - // save to database - err = node.SaveGenesisDoc(vm.stateDB, genesis) - if err != nil { - return fmt.Errorf("failed to save genesis data: %w ", err) - } - } else { - return err - } + vm.indexerService = txindex.NewIndexerService(vm.txIndexer, vm.blockIndexer, vm.eventBus) + vm.indexerService.SetLogger(vm.log.With("module", "indexer")) + if err := vm.indexerService.Start(); err != nil { + return err } - vm.genesis = genesis - return nil -} - -// InitGenesisChunks configures the environment -// and should be called on service startup. -func (vm *VM) initGenesisChunks() error { - if vm.genesis == nil { - return fmt.Errorf("empty genesis") + handshaker := consensus.NewHandshaker( + vm.stateStore, + vm.state, + vm.blockStore, + vm.genesis, + ) + handshaker.SetLogger(vm.log.With("module", "consensus")) + handshaker.SetEventBus(vm.eventBus) + if err := handshaker.Handshake(vm.app); err != nil { + return fmt.Errorf("error during handshake: %v", err) } - data, err := tmjson.Marshal(vm.genesis) + vm.state, err = vm.stateStore.Load() if err != nil { - return err + return nil } - for i := 0; i < len(data); i += genesisChunkSize { - end := i + genesisChunkSize - - if end > len(data) { - end = len(data) - } + vm.mempool = mempl.NewCListMempool( + config.DefaultMempoolConfig(), + vm.app.Mempool(), + vm.state.LastBlockHeight, + vm, + mempool.WithMetrics(mempool.NopMetrics()), + mempool.WithPreCheck(state.TxPreCheck(vm.state)), + mempool.WithPostCheck(state.TxPostCheck(vm.state)), + ) + vm.mempool.SetLogger(vm.log.With("module", "mempool")) + vm.mempool.EnableTxsAvailable() - vm.genChunks = append(vm.genChunks, base64.StdEncoding.EncodeToString(data[i:end])) + if vm.state.LastBlockHeight == 0 { + block, _ := vm.state.MakeBlock(1, types.Txs{types.Tx(genesisBytes)}, makeCommitMock(1, time.Now()), nil, proposerAddress) + blck := NewBlock(vm, block, choices.Processing) + blck.Accept(ctx) } + vm.log.Info("vm initialization completed") return nil } -func (vm *VM) createMempool() *mempl.CListMempool { - cfg := config.DefaultMempoolConfig() - mempool := mempl.NewCListMempool( - cfg, - vm.proxyApp.Mempool(), - vm.tmState.LastBlockHeight, - vm, - mempl.WithMetrics(mempl.NopMetrics()), // TODO: use prometheus metrics based on config - mempl.WithPreCheck(sm.TxPreCheck(*vm.tmState)), - mempl.WithPostCheck(sm.TxPostCheck(*vm.tmState)), - ) - mempoolLogger := vm.tmLogger.With("module", "mempool") - mempool.SetLogger(mempoolLogger) - - return mempool -} - -// NotifyBlockReady tells the consensus engine that a new block -// is ready to be created func (vm *VM) NotifyBlockReady() { select { case vm.toEngine <- common.PendingTxs: - vm.tmLogger.Debug("Notify consensys engine") + vm.log.Debug("notify consensys engine") default: - vm.tmLogger.Error("Failed to push PendingTxs notification to the consensus engine.") + vm.log.Error("failed to push PendingTxs notification to the consensus engine.") } } -func (vm *VM) doHandshake(genesis *types.GenesisDoc, consensusLogger log.Logger) error { - handshaker := cs.NewHandshaker(vm.stateStore, *vm.tmState, vm.blockStore, genesis) - handshaker.SetLogger(consensusLogger) - handshaker.SetEventBus(vm.eventBus) - if err := handshaker.Handshake(vm.proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) +// SetState communicates to VM its next state it starts +func (vm *VM) SetState(ctx context.Context, state snow.State) error { + vm.log.Debug("set state", "state", state.String()) + switch state { + case snow.Bootstrapping: + vm.bootstrapped.Set(false) + case snow.NormalOp: + vm.bootstrapped.Set(true) + default: + return snow.ErrUnknownState } return nil } -// readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the -// last accepted block hash and height by reading directly from [vm.chaindb] instead of relying -// on [chain]. -// Note: assumes chaindb, ethConfig, and genesisHash have been initialized. -//func (vm *VM) readLastAccepted() (tmbytes.HexBytes, uint64, error) { -// // Attempt to load last accepted block to determine if it is necessary to -// // initialize state with the genesis block. -// lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) -// switch { -// case lastAcceptedErr == database.ErrNotFound: -// // If there is nothing in the database, return the genesis block hash and height -// return vm.genesisHash, 0, nil -// case lastAcceptedErr != nil: -// return common.Hash{}, 0, fmt.Errorf("failed to get last accepted block ID due to: %w", lastAcceptedErr) -// case len(lastAcceptedBytes) != common.HashLength: -// return common.Hash{}, 0, fmt.Errorf("last accepted bytes should have been length %d, but found %d", common.HashLength, len(lastAcceptedBytes)) -// default: -// lastAcceptedHash := common.BytesToHash(lastAcceptedBytes) -// height := rawdb.ReadHeaderNumber(vm.chaindb, lastAcceptedHash) -// if height == nil { -// return common.Hash{}, 0, fmt.Errorf("failed to retrieve header number of last accepted block: %s", lastAcceptedHash) -// } -// return lastAcceptedHash, *height, nil -// } -//} - -func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { - block, err := vm.newBlock(lastAcceptedBlock) - if err != nil { - return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) - } - block.status = choices.Accepted - - config := &chain.Config{ - DecidedCacheSize: decidedCacheSize, - MissingCacheSize: missingCacheSize, - UnverifiedCacheSize: unverifiedCacheSize, - //GetBlockIDAtHeight: vm.GetBlockIDAtHeight, - GetBlock: vm.getBlock, - UnmarshalBlock: vm.parseBlock, - BuildBlock: vm.buildBlock, - LastAcceptedBlock: block, - } +// Shutdown is called when the node is shutting down. +func (vm *VM) Shutdown(context.Context) error { + vm.log.Debug("call shutdown") + panic("implement me") +} - // Register chain state metrics - chainStateRegisterer := prometheus.NewRegistry() - state, err := chain.NewMeteredState(chainStateRegisterer, config) - if err != nil { - return fmt.Errorf("could not create metered state: %w", err) - } - vm.State = state +// Version returns the version of the VM. +func (vm *VM) Version(context.Context) (string, error) { + return Version.String(), nil +} - return vm.multiGatherer.Register(chainStateMetricsPrefix, chainStateRegisterer) +// Creates the HTTP handlers for custom VM network calls. +// +// This exposes handlers that the outside world can use to communicate with +// a static reference to the VM. Each handler has the path: +// [Address of node]/ext/VM/[VM ID]/[extension] +// +// Returns a mapping from [extension]s to HTTP handlers. +// +// Each extension can specify how locking is managed for convenience. +// +// For example, it might make sense to have an extension for creating +// genesis bytes this VM can interpret. +// +// Note: If this method is called, no other method will be called on this VM. +// Each registered VM will have a single instance created to handle static +// APIs. This instance will be handled separately from instances created to +// service an instance of a chain. +func (vm *VM) CreateStaticHandlers(context.Context) (map[string]*common.HTTPHandler, error) { + // ToDo: need to add implementation + return nil, nil } -func (vm *VM) initializeMetrics() error { - vm.multiGatherer = metrics.NewMultiGatherer() +// Creates the HTTP handlers for custom chain network calls. +// +// This exposes handlers that the outside world can use to communicate with +// the chain. Each handler has the path: +// [Address of node]/ext/bc/[chain ID]/[extension] +// +// Returns a mapping from [extension]s to HTTP handlers. +// +// Each extension can specify how locking is managed for convenience. +// +// For example, if this VM implements an account-based payments system, +// it have an extension called `accounts`, where clients could get +// information about their accounts. +func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, error) { + mux := http.NewServeMux() + rpcLogger := vm.log.With("module", "rpc-server") + rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) - if err := vm.ctx.Metrics.Register(vm.multiGatherer); err != nil { - return err + server := rpc.NewServer() + server.RegisterCodec(json.NewCodec(), "application/json") + server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") + if err := server.RegisterService(NewService(vm), Name); err != nil { + return nil, err } - return nil + return map[string]*common.HTTPHandler{ + "/rpc": { + LockOptions: common.WriteLock, + Handler: server, + }, + }, nil } -// parseBlock parses [b] into a block to be wrapped by ChainState. -func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { +// Attempt to load a block. +// +// If the block does not exist, database.ErrNotFound should be returned. +// +// It is expected that blocks that have been successfully verified should be +// returned correctly. It is also expected that blocks that have been +// accepted by the consensus engine should be able to be fetched. It is not +// required for blocks that have been rejected by the consensus engine to be +// able to be fetched. +func (vm *VM) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + if b, ok := vm.verifiedBlocks[blkID]; ok { + return b, nil + } + b := vm.blockStore.LoadBlockByHash(blkID[:]) + vm.log.Debug("get block", "blkID", blkID.String(), "block", b) + return NewBlock(vm, b, choices.Accepted), nil +} + +// Attempt to create a block from a stream of bytes. +// +// The block should be represented by the full byte array, without extra +// bytes. +// +// It is expected for all historical blocks to be parseable. +func (vm *VM) ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, error) { + vm.log.Debug("parse block") + protoBlock := new(tmproto.Block) - err := protoBlock.Unmarshal(b) - if err != nil { + if err := protoBlock.Unmarshal(blockBytes); err != nil { return nil, err } + vm.log.Debug("parse block", "protoBlock", protoBlock.Header.Height) - tmBlock, err := types.BlockFromProto(protoBlock) + block, err := types.BlockFromProto(protoBlock) if err != nil { return nil, err } + vm.log.Debug("parse block", "block", block.Hash()) - // Note: the status of block is set by ChainState - block, err := vm.newBlock(tmBlock) - if err != nil { - return nil, err + blk := NewBlock(vm, block, choices.Processing) + vm.log.Debug("parse block", "height", blk.Height(), "id", blk.ID()) + return blk, nil +} + +// Attempt to create a new block from data contained in the VM. +// +// If the VM doesn't want to issue a new block, an error should be +// returned. +func (vm *VM) BuildBlock(context.Context) (snowman.Block, error) { + vm.log.Debug("build block") + txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) + if len(txs) == 0 { + return nil, fmt.Errorf("no txs") } - return block, nil + height := vm.state.LastBlockHeight + 1 + commit := makeCommitMock(height, time.Now()) + block, _ := vm.state.MakeBlock(height, txs, commit, nil, proposerAddress) + + prev := vm.blockStore.LoadBlockByHash(vm.preferred[:]) + block.LastBlockID = types.BlockID{ + Hash: prev.Hash(), + PartSetHeader: prev.LastBlockID.PartSetHeader, + } + + blk := NewBlock(vm, block, choices.Processing) + vm.verifiedBlocks[blk.ID()] = blk + + vm.log.Debug("build block", "height", blk.Height(), "id", blk.ID()) + return blk, nil } -// getBlock attempts to retrieve block [id] from the VM to be wrapped -// by ChainState. -func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { - var hash []byte - copy(hash, id[:]) - tmBlock := vm.blockStore.LoadBlockByHash(hash) - // If [tmBlock] is nil, return [database.ErrNotFound] here - // so that the miss is considered cacheable. - if tmBlock == nil { - return nil, database.ErrNotFound +// Notify the VM of the currently preferred block. +// +// This should always be a block that has no children known to consensus. +func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { + vm.log.Debug("set preference", "blkID", blkID.String()) + vm.preferred = blkID + return nil +} + +// LastAccepted returns the ID of the last accepted block. +// +// If no blocks have been accepted by consensus yet, it is assumed there is +// a definitionally accepted block, the Genesis block, that will be +// returned. +func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { + height := vm.blockStore.Height() + block := vm.blockStore.LoadBlock(height) + if block == nil { + vm.log.Error("block store return empty block", "height", height) + return ids.Empty, errors.New("block not found") } - // Note: the status of block is set by ChainState - return vm.newBlock(tmBlock) + return ids.ID(block.Hash()), nil } func (vm *VM) applyBlock(block *Block) error { @@ -470,32 +574,27 @@ func (vm *VM) applyBlock(block *Block) error { return err } - if err := validateBlock(state, block.tmBlock); err != nil { + if err := validateBlock(state, block.Block); err != nil { return err } - abciResponses, err := execBlockOnProxyApp( - vm.tmLogger, - vm.proxyApp.Consensus(), - block.tmBlock, vm.stateStore, - state.InitialHeight, - ) + abciResponses, err := execBlockOnProxyApp(vm.log, vm.app.Consensus(), block.Block, vm.stateStore, state.InitialHeight) if err != nil { return err } // Save the results before we commit. - if err := vm.stateStore.SaveABCIResponses(block.tmBlock.Height, abciResponses); err != nil { + if err := vm.stateStore.SaveABCIResponses(block.Block.Height, abciResponses); err != nil { return err } blockID := types.BlockID{ - Hash: block.tmBlock.Hash(), - PartSetHeader: block.tmBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + Hash: block.Block.Hash(), + PartSetHeader: block.Block.MakePartSet(types.BlockPartSizeBytes).Header(), } // Update the state with the block and responses. - state, err = updateState(state, blockID, &block.tmBlock.Header, abciResponses) + state, err = updateState(state, blockID, &block.Block.Header, abciResponses) if err != nil { return err } @@ -503,34 +602,34 @@ func (vm *VM) applyBlock(block *Block) error { // while mempool is Locked, flush to ensure all async requests have completed // in the ABCI app before Commit. if err := vm.mempool.FlushAppConn(); err != nil { - vm.tmLogger.Error("client error during mempool.FlushAppConn", "err", err) + vm.log.Error("client error during mempool.FlushAppConn", "err", err) return err } // Commit block, get hash back - res, err := vm.proxyApp.Consensus().CommitSync() + res, err := vm.app.Consensus().CommitSync() if err != nil { - vm.tmLogger.Error("client error during proxyAppConn.CommitSync", "err", err) + vm.log.Error("client error during proxyAppConn.CommitSync", "err", err) return err } // ResponseCommit has no error code - just data - vm.tmLogger.Info( + vm.log.Info( "committed state", "height", block.Height, - "num_txs", len(block.tmBlock.Txs), + "num_txs", len(block.Block.Txs), "app_hash", fmt.Sprintf("%X", res.Data), ) - deliverTxResponses := make([]*abciTypes.ResponseDeliverTx, len(block.tmBlock.Txs)) - for i := range block.tmBlock.Txs { + deliverTxResponses := make([]*abciTypes.ResponseDeliverTx, len(block.Block.Txs)) + for i := range block.Block.Txs { deliverTxResponses[i] = &abciTypes.ResponseDeliverTx{Code: abciTypes.CodeTypeOK} } // Update mempool. if err := vm.mempool.Update( - block.tmBlock.Height, - block.tmBlock.Txs, + block.Block.Height, + block.Block.Txs, deliverTxResponses, TxPreCheck(state), TxPostCheck(state), @@ -538,163 +637,12 @@ func (vm *VM) applyBlock(block *Block) error { return err } - vm.tmState.LastBlockHeight = block.tmBlock.Height + vm.state.LastBlockHeight = block.Block.Height if err := vm.stateStore.Save(state); err != nil { return err } - vm.blockStore.SaveBlock(block.tmBlock, block.tmBlock.MakePartSet(types.BlockPartSizeBytes), block.tmBlock.LastCommit) - - fireEvents(vm.tmLogger, vm.eventBus, block.tmBlock, abciResponses) - return nil -} - -// buildBlock builds a block to be wrapped by ChainState -func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { - txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) - if len(txs) == 0 { - return nil, errNoPendingTxs - } - height := vm.tmState.LastBlockHeight + 1 - - commit := makeCommitMock(height, time.Now()) - block, _ := vm.tmState.MakeBlock(height, txs, commit, nil, proposerAddress) - - // Note: the status of block is set by ChainState - blk, err := vm.newBlock(block) - blk.SetStatus(choices.Processing) - if err != nil { - return nil, err - } - vm.tmLogger.Debug(fmt.Sprintf("Built block %s", blk.ID())) - - return blk, nil -} - -func (vm *VM) AppGossip(_ context.Context, nodeID ids.NodeID, msg []byte) error { - return nil -} - -func (vm *VM) SetState(ctx context.Context, state snow.State) error { - return nil -} - -func (vm *VM) Shutdown(ctx context.Context) error { - // first stop the non-reactor services - if err := vm.eventBus.Stop(); err != nil { - return fmt.Errorf("Error closing eventBus: %w ", err) - } - if err := vm.indexerService.Stop(); err != nil { - return fmt.Errorf("Error closing indexerService: %w ", err) - } - //TODO: investigate wal configuration - // stop mempool WAL - //if vm.config.Mempool.WalEnabled() { - // n.mempool.CloseWAL() - //} - //if n.prometheusSrv != nil { - // if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { - // // Error from closing listeners, or context timeout: - // n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) - // } - //} - if err := vm.blockStore.Close(); err != nil { - return fmt.Errorf("Error closing blockStore: %w ", err) - } - if err := vm.stateStore.Close(); err != nil { - return fmt.Errorf("Error closing stateStore: %w ", err) - } - return nil - //timestampVM and deprecated landslide - //if vm.state == nil { - // return nil - //} - // - //return vm.state.Close() // close versionDB - - //coreth - //if vm.ctx == nil { - // return nil - //} - //vm.Network.Shutdown() - //if err := vm.StateSyncClient.Shutdown(); err != nil { - // log.Error("error stopping state syncer", "err", err) - //} - //close(vm.shutdownChan) - //vm.eth.Stop() - //vm.shutdownWg.Wait() - //return nil -} - -func (vm *VM) Version(ctx context.Context) (string, error) { - return Version.String(), nil -} - -func (vm *VM) CreateStaticHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { - //TODO implement me - return nil, nil -} - -func (vm *VM) CreateHandlers(_ context.Context) (map[string]*common.HTTPHandler, error) { - mux := http.NewServeMux() - rpcLogger := vm.tmLogger.With("module", "rpc-server") - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) - - server := rpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(NewService(vm), Name); err != nil { - return nil, err - } - - return map[string]*common.HTTPHandler{ - "/rpc": { - LockOptions: common.WriteLock, - Handler: server, - }, - }, nil -} + vm.blockStore.SaveBlock(block.Block, block.Block.MakePartSet(types.BlockPartSizeBytes), block.Block.LastCommit) -func (vm *VM) ProxyApp() proxy.AppConns { - return vm.proxyApp -} - -func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { - //TODO implement me - return nil -} - -func (vm *VM) AppRequest(_ context.Context, nodeID ids.NodeID, requestID uint32, time time.Time, request []byte) error { - return nil -} - -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { - return nil -} - -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { - return nil -} - -func (vm *VM) CrossChainAppRequest(_ context.Context, _ ids.ID, _ uint32, deadline time.Time, request []byte) error { + fireEvents(vm.log, vm.eventBus, block.Block, abciResponses) return nil } - -func (vm *VM) CrossChainAppRequestFailed(_ context.Context, _ ids.ID, _ uint32) error { - return nil -} - -func (vm *VM) CrossChainAppResponse(_ context.Context, _ ids.ID, _ uint32, response []byte) error { - return nil -} - -func (vm *VM) Connected(_ context.Context, id ids.NodeID, nodeVersion *version.Application) error { - return nil // noop -} - -func (vm *VM) Disconnected(_ context.Context, id ids.NodeID) error { - return nil // noop -} - -func (vm *VM) HealthCheck(ctx context.Context) (interface{}, error) { return nil, nil } diff --git a/vm_/block.go b/vm_/block.go new file mode 100644 index 000000000..771687cb2 --- /dev/null +++ b/vm_/block.go @@ -0,0 +1,95 @@ +package vm + +import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/consideritdone/landslidecore/types" +) + +var ( + _ snowman.Block = &Block{} +) + +// Block implements the snowman.Block interface +type Block struct { + id ids.ID + tmBlock *types.Block + vm *VM + status choices.Status +} + +// newBlock returns a new Block wrapping the Tendermint Block type and implementing the snowman.Block interface +func (vm *VM) newBlock(tmBlock *types.Block) (*Block, error) { + var id ids.ID + copy(id[:], tmBlock.Hash()) + + return &Block{ + id: id, + tmBlock: tmBlock, + vm: vm, + }, nil +} + +func (b *Block) ID() ids.ID { + return b.id +} + +func (b *Block) Accept(ctx context.Context) error { + b.SetStatus(choices.Accepted) + return b.vm.applyBlock(b) +} + +func (b *Block) Reject(ctx context.Context) error { + b.SetStatus(choices.Rejected) + + return nil +} + +func (b *Block) SetStatus(status choices.Status) { + b.status = status +} + +func (b *Block) Status() choices.Status { + return b.status +} + +func (b *Block) Parent() ids.ID { + var id ids.ID + parentHash := b.tmBlock.Header.LastBlockID.Hash + copy(id[:], parentHash) + + return id +} + +func (b *Block) Verify(context.Context) error { + if b == nil || b.tmBlock == nil { + return errInvalidBlock + } + + return b.tmBlock.ValidateBasic() +} + +func (b *Block) Bytes() []byte { + block, err := b.tmBlock.ToProto() + if err != nil { + panic(err) + } + data, err := block.Marshal() + if err != nil { + panic(err) + } + + return data +} + +func (b *Block) Height() uint64 { + return uint64(b.tmBlock.Height) +} + +func (b *Block) Timestamp() time.Time { + return b.tmBlock.Time +} diff --git a/vm/block_utils.go b/vm_/block_utils.go similarity index 100% rename from vm/block_utils.go rename to vm_/block_utils.go diff --git a/vm/cmd/main.go b/vm_/cmd/main.go similarity index 100% rename from vm/cmd/main.go rename to vm_/cmd/main.go diff --git a/vm/data/vm_test_genesis.json b/vm_/data/vm_test_genesis.json similarity index 100% rename from vm/data/vm_test_genesis.json rename to vm_/data/vm_test_genesis.json diff --git a/vm_/database.go b/vm_/database.go new file mode 100644 index 000000000..07b62ff18 --- /dev/null +++ b/vm_/database.go @@ -0,0 +1,119 @@ +package vm + +import ( + "github.com/ava-labs/avalanchego/database" + dbm "github.com/tendermint/tm-db" +) + +var ( + _ dbm.DB = &Database{} +) + +type ( + Database struct { + database.Database + } + Iterator struct { + database.Iterator + + start []byte + end []byte + } + Batch struct { + database.Batch + } +) + +func (db Database) Get(key []byte) ([]byte, error) { + res, err := db.Database.Get(key) + if err != nil { + if err.Error() == "not found" { + return nil, nil + } + return nil, err + } + return res, nil +} + +func (db Database) Set(key []byte, value []byte) error { + return db.Database.Put(key, value) +} + +func (db Database) SetSync(key []byte, value []byte) error { + return db.Database.Put(key, value) +} + +func (db Database) DeleteSync(key []byte) error { + return db.Database.Delete(key) +} + +func (db Database) Iterator(start, end []byte) (dbm.Iterator, error) { + return Iterator{db.Database.NewIteratorWithStart(start), start, end}, nil +} + +func (db Database) ReverseIterator(start, end []byte) (dbm.Iterator, error) { + return Iterator{db.Database.NewIteratorWithStart(start), start, end}, nil +} + +func (db Database) NewBatch() dbm.Batch { + return Batch{db.Database.NewBatch()} +} + +func (db Database) Print() error { + //TODO implement me + return nil +} + +func (db Database) Stats() map[string]string { + //TODO implement me + return nil +} + +func (iter Iterator) Domain() (start []byte, end []byte) { + return iter.start, iter.end +} + +func (iter Iterator) Valid() bool { + return iter.Iterator.Error() == nil && len(iter.Iterator.Key()) > 0 +} + +func (iter Iterator) Next() { + iter.Iterator.Next() +} + +func (iter Iterator) Key() (key []byte) { + return iter.Iterator.Key() +} + +func (iter Iterator) Value() (value []byte) { + return iter.Iterator.Value() +} + +func (iter Iterator) Error() error { + return iter.Iterator.Error() +} + +func (iter Iterator) Close() error { + iter.Iterator.Release() + return iter.Error() +} + +func (b Batch) Set(key, value []byte) error { + return b.Batch.Put(key, value) +} + +func (b Batch) Delete(key []byte) error { + return b.Batch.Delete(key) +} + +func (b Batch) Write() error { + return b.Batch.Write() +} + +func (b Batch) WriteSync() error { + return b.Batch.Write() +} + +func (b Batch) Close() error { + return nil +} diff --git a/vm/scripts/build.sh b/vm_/scripts/build.sh similarity index 100% rename from vm/scripts/build.sh rename to vm_/scripts/build.sh diff --git a/vm/scripts/build_test.sh b/vm_/scripts/build_test.sh similarity index 100% rename from vm/scripts/build_test.sh rename to vm_/scripts/build_test.sh diff --git a/vm_/service.go b/vm_/service.go new file mode 100644 index 000000000..7caa1bcbd --- /dev/null +++ b/vm_/service.go @@ -0,0 +1,727 @@ +package vm + +import ( + "context" + "errors" + "fmt" + "net/http" + "sort" + "time" + + abci "github.com/consideritdone/landslidecore/abci/types" + tmbytes "github.com/consideritdone/landslidecore/libs/bytes" + tmmath "github.com/consideritdone/landslidecore/libs/math" + tmquery "github.com/consideritdone/landslidecore/libs/pubsub/query" + mempl "github.com/consideritdone/landslidecore/mempool" + "github.com/consideritdone/landslidecore/p2p" + "github.com/consideritdone/landslidecore/proxy" + "github.com/consideritdone/landslidecore/rpc/core" + ctypes "github.com/consideritdone/landslidecore/rpc/core/types" + "github.com/consideritdone/landslidecore/types" +) + +type ( + LocalService struct { + vm *VM + } + + Service interface { + ABCIService + HistoryService + NetworkService + SignService + StatusService + MempoolService + } + + ABCIQueryArgs struct { + Path string `json:"path"` + Data tmbytes.HexBytes `json:"data"` + } + + ABCIQueryOptions struct { + Height int64 `json:"height"` + Prove bool `json:"prove"` + } + + ABCIQueryWithOptionsArgs struct { + Path string `json:"path"` + Data tmbytes.HexBytes `json:"data"` + Opts ABCIQueryOptions `json:"opts"` + } + + BroadcastTxArgs struct { + Tx types.Tx `json:"tx"` + } + + ABCIService interface { + // Reading from abci app + ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error + ABCIQuery(_ *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error + ABCIQueryWithOptions(_ *http.Request, args *ABCIQueryWithOptionsArgs, reply *ctypes.ResultABCIQuery) error + + // Writing to abci app + BroadcastTxCommit(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTxCommit) error + BroadcastTxAsync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error + BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error + } + + BlockHeightArgs struct { + Height *int64 `json:"height"` + } + + BlockHashArgs struct { + Hash []byte `json:"hash"` + } + + CommitArgs struct { + Height *int64 `json:"height"` + } + + ValidatorsArgs struct { + Height *int64 `json:"height"` + Page *int `json:"page"` + PerPage *int `json:"perPage"` + } + + TxArgs struct { + Hash []byte `json:"hash"` + Prove bool `json:"prove"` + } + + TxSearchArgs struct { + Query string `json:"query"` + Prove bool `json:"prove"` + Page *int `json:"page"` + PerPage *int `json:"perPage"` + OrderBy string `json:"orderBy"` + } + + BlockSearchArgs struct { + Query string `json:"query"` + Page *int `json:"page"` + PerPage *int `json:"perPage"` + OrderBy string `json:"orderBy"` + } + + SignService interface { + Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error + BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error + BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error + Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error + Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error + Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error + TxSearch(_ *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error + BlockSearch(_ *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error + } + + BlockchainInfoArgs struct { + MinHeight int64 `json:"minHeight"` + MaxHeight int64 `json:"maxHeight"` + } + + GenesisChunkedArgs struct { + Chunk uint `json:"chunk"` + } + + HistoryService interface { + BlockchainInfo(_ *http.Request, args *BlockchainInfoArgs, reply *ctypes.ResultBlockchainInfo) error + Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error + GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error + } + + StatusService interface { + Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error + } + + ConsensusParamsArgs struct { + Height *int64 `json:"height"` + } + + NetworkService interface { + NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error + DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error + ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error + ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error + Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error + } + + UnconfirmedTxsArgs struct { + Limit *int `json:"limit"` + } + + CheckTxArgs struct { + Tx []byte `json:"tx"` + } + + MempoolService interface { + UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error + NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error + CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error + } +) + +var ( + DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} +) + +func NewService(vm *VM) Service { + return &LocalService{vm} +} + +func (s *LocalService) ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error { + resInfo, err := s.vm.proxyApp.Query().InfoSync(proxy.RequestInfo) + if err != nil { + return err + } + reply.Response = *resInfo + return nil +} + +func (s *LocalService) ABCIQuery(req *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error { + return s.ABCIQueryWithOptions(req, &ABCIQueryWithOptionsArgs{args.Path, args.Data, DefaultABCIQueryOptions}, reply) +} + +func (s *LocalService) ABCIQueryWithOptions( + _ *http.Request, + args *ABCIQueryWithOptionsArgs, + reply *ctypes.ResultABCIQuery, +) error { + resQuery, err := s.vm.proxyApp.Query().QuerySync(abci.RequestQuery{ + Path: args.Path, + Data: args.Data, + Height: args.Opts.Height, + Prove: args.Opts.Prove, + }) + if err != nil { + return err + } + reply.Response = *resQuery + return nil +} + +func (s *LocalService) BroadcastTxCommit( + _ *http.Request, + args *BroadcastTxArgs, + reply *ctypes.ResultBroadcastTxCommit, +) error { + subscriber := "" + + // Subscribe to tx being committed in block. + subCtx, cancel := context.WithTimeout(context.Background(), core.SubscribeTimeout) + defer cancel() + + q := types.EventQueryTxFor(args.Tx) + deliverTxSub, err := s.vm.eventBus.Subscribe(subCtx, subscriber, q) + if err != nil { + err = fmt.Errorf("failed to subscribe to tx: %w", err) + s.vm.tmLogger.Error("Error on broadcast_tx_commit", "err", err) + return err + } + + defer func() { + if err := s.vm.eventBus.Unsubscribe(context.Background(), subscriber, q); err != nil { + s.vm.tmLogger.Error("Error unsubscribing from eventBus", "err", err) + } + }() + + // Broadcast tx and wait for CheckTx result + checkTxResCh := make(chan *abci.Response, 1) + err = s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { + checkTxResCh <- res + }, mempl.TxInfo{}) + if err != nil { + s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) + return fmt.Errorf("error on broadcastTxCommit: %v", err) + } + checkTxResMsg := <-checkTxResCh + checkTxRes := checkTxResMsg.GetCheckTx() + if checkTxRes.Code != abci.CodeTypeOK { + *reply = ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: args.Tx.Hash(), + } + return nil + } + + // Wait for the tx to be included in a block or timeout. + select { + case msg := <-deliverTxSub.Out(): // The tx was included in a block. + deliverTxRes := msg.Data().(types.EventDataTx) + *reply = ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, + DeliverTx: deliverTxRes.Result, + Hash: args.Tx.Hash(), + Height: deliverTxRes.Height, + } + return nil + case <-deliverTxSub.Cancelled(): + var reason string + if deliverTxSub.Err() == nil { + reason = "Tendermint exited" + } else { + reason = deliverTxSub.Err().Error() + } + err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) + s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) + return err + // TODO: use config for timeout + case <-time.After(10 * time.Second): + err = errors.New("timed out waiting for tx to be included in a block") + s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) + return err + } +} + +func (s *LocalService) BroadcastTxAsync( + _ *http.Request, + args *BroadcastTxArgs, + reply *ctypes.ResultBroadcastTx, +) error { + err := s.vm.mempool.CheckTx(args.Tx, nil, mempl.TxInfo{}) + if err != nil { + return err + } + reply.Hash = args.Tx.Hash() + return nil +} + +func (s *LocalService) BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error { + resCh := make(chan *abci.Response, 1) + err := s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { + s.vm.tmLogger.With("module", "service").Debug("handled response from checkTx") + resCh <- res + }, mempl.TxInfo{}) + if err != nil { + return err + } + res := <-resCh + r := res.GetCheckTx() + + reply.Code = r.Code + reply.Data = r.Data + reply.Log = r.Log + reply.Codespace = r.Codespace + reply.Hash = args.Tx.Hash() + + return nil +} + +func (s *LocalService) Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error { + height, err := getHeight(s.vm.blockStore, args.Height) + if err != nil { + return err + } + block := s.vm.blockStore.LoadBlock(height) + blockMeta := s.vm.blockStore.LoadBlockMeta(height) + + if blockMeta != nil { + reply.BlockID = blockMeta.BlockID + } + reply.Block = block + return nil +} + +func (s *LocalService) BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error { + block := s.vm.blockStore.LoadBlockByHash(args.Hash) + if block == nil { + reply.BlockID = types.BlockID{} + reply.Block = nil + return nil + } + blockMeta := s.vm.blockStore.LoadBlockMeta(block.Height) + reply.BlockID = blockMeta.BlockID + reply.Block = block + return nil +} + +func (s *LocalService) BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error { + height, err := getHeight(s.vm.blockStore, args.Height) + if err != nil { + return err + } + + results, err := s.vm.stateStore.LoadABCIResponses(height) + if err != nil { + return err + } + + reply.Height = height + reply.TxsResults = results.DeliverTxs + reply.BeginBlockEvents = results.BeginBlock.Events + reply.EndBlockEvents = results.EndBlock.Events + reply.ValidatorUpdates = results.EndBlock.ValidatorUpdates + reply.ConsensusParamUpdates = results.EndBlock.ConsensusParamUpdates + return nil +} + +func (s *LocalService) Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error { + height, err := getHeight(s.vm.blockStore, args.Height) + if err != nil { + return err + } + + blockMeta := s.vm.blockStore.LoadBlockMeta(height) + if blockMeta == nil { + return nil + } + + header := blockMeta.Header + commit := s.vm.blockStore.LoadBlockCommit(height) + res := ctypes.NewResultCommit(&header, commit, !(height == s.vm.blockStore.Height())) + + reply.SignedHeader = res.SignedHeader + reply.CanonicalCommit = res.CanonicalCommit + return nil +} + +func (s *LocalService) Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error { + height, err := getHeight(s.vm.blockStore, args.Height) + if err != nil { + return err + } + + validators, err := s.vm.stateStore.LoadValidators(height) + if err != nil { + return err + } + + totalCount := len(validators.Validators) + perPage := validatePerPage(args.PerPage) + page, err := validatePage(args.Page, perPage, totalCount) + if err != nil { + return err + } + + skipCount := validateSkipCount(page, perPage) + + reply.BlockHeight = height + reply.Validators = validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + reply.Count = len(reply.Validators) + reply.Total = totalCount + return nil +} + +func (s *LocalService) Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error { + r, err := s.vm.txIndexer.Get(args.Hash) + if err != nil { + return err + } + + if r == nil { + return fmt.Errorf("tx (%X) not found", args.Hash) + } + + height := r.Height + index := r.Index + + var proof types.TxProof + if args.Prove { + block := s.vm.blockStore.LoadBlock(height) + proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + } + + reply.Hash = args.Hash + reply.Height = height + reply.Index = index + reply.TxResult = r.Result + reply.Tx = r.Tx + reply.Proof = proof + return nil +} + +func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error { + q, err := tmquery.New(args.Query) + if err != nil { + return err + } + + var ctx context.Context + if req != nil { + ctx = req.Context() + } else { + ctx = context.Background() + } + + results, err := s.vm.txIndexer.Search(ctx, q) + if err != nil { + return err + } + + // sort results (must be done before pagination) + switch args.OrderBy { + case "desc": + sort.Slice(results, func(i, j int) bool { + if results[i].Height == results[j].Height { + return results[i].Index > results[j].Index + } + return results[i].Height > results[j].Height + }) + case "asc", "": + sort.Slice(results, func(i, j int) bool { + if results[i].Height == results[j].Height { + return results[i].Index < results[j].Index + } + return results[i].Height < results[j].Height + }) + default: + return errors.New("expected order_by to be either `asc` or `desc` or empty") + } + + // paginate results + totalCount := len(results) + perPage := validatePerPage(args.PerPage) + + page, err := validatePage(args.Page, perPage, totalCount) + if err != nil { + return err + } + + skipCount := validateSkipCount(page, perPage) + pageSize := tmmath.MinInt(perPage, totalCount-skipCount) + + apiResults := make([]*ctypes.ResultTx, 0, pageSize) + for i := skipCount; i < skipCount+pageSize; i++ { + r := results[i] + + var proof types.TxProof + if args.Prove { + block := s.vm.blockStore.LoadBlock(r.Height) + proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines + } + + apiResults = append(apiResults, &ctypes.ResultTx{ + Hash: types.Tx(r.Tx).Hash(), + Height: r.Height, + Index: r.Index, + TxResult: r.Result, + Tx: r.Tx, + Proof: proof, + }) + } + + reply.Txs = apiResults + reply.TotalCount = totalCount + return nil +} + +func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error { + q, err := tmquery.New(args.Query) + if err != nil { + return err + } + + var ctx context.Context + if req != nil { + ctx = req.Context() + } else { + ctx = context.Background() + } + + results, err := s.vm.blockIndexer.Search(ctx, q) + if err != nil { + return err + } + + // sort results (must be done before pagination) + switch args.OrderBy { + case "desc", "": + sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) + + case "asc": + sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) + + default: + return errors.New("expected order_by to be either `asc` or `desc` or empty") + } + + // paginate results + totalCount := len(results) + perPage := validatePerPage(args.PerPage) + + page, err := validatePage(args.Page, perPage, totalCount) + if err != nil { + return err + } + + skipCount := validateSkipCount(page, perPage) + pageSize := tmmath.MinInt(perPage, totalCount-skipCount) + + apiResults := make([]*ctypes.ResultBlock, 0, pageSize) + for i := skipCount; i < skipCount+pageSize; i++ { + block := s.vm.blockStore.LoadBlock(results[i]) + if block != nil { + blockMeta := s.vm.blockStore.LoadBlockMeta(block.Height) + if blockMeta != nil { + apiResults = append(apiResults, &ctypes.ResultBlock{ + Block: block, + BlockID: blockMeta.BlockID, + }) + } + } + } + + reply.Blocks = apiResults + reply.TotalCount = totalCount + return nil +} + +func (s *LocalService) BlockchainInfo( + _ *http.Request, + args *BlockchainInfoArgs, + reply *ctypes.ResultBlockchainInfo, +) error { + // maximum 20 block metas + const limit int64 = 20 + var err error + args.MinHeight, args.MaxHeight, err = filterMinMax( + s.vm.blockStore.Base(), + s.vm.blockStore.Height(), + args.MinHeight, + args.MaxHeight, + limit) + if err != nil { + return err + } + s.vm.tmLogger.Debug("BlockchainInfoHandler", "maxHeight", args.MaxHeight, "minHeight", args.MinHeight) + + var blockMetas []*types.BlockMeta + for height := args.MaxHeight; height >= args.MinHeight; height-- { + blockMeta := s.vm.blockStore.LoadBlockMeta(height) + blockMetas = append(blockMetas, blockMeta) + } + + reply.LastHeight = s.vm.blockStore.Height() + reply.BlockMetas = blockMetas + return nil +} + +func (s *LocalService) Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error { + if len(s.vm.genChunks) > 1 { + return errors.New("genesis response is large, please use the genesis_chunked API instead") + } + + reply.Genesis = s.vm.genesis + return nil +} + +func (s *LocalService) GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error { + if s.vm.genChunks == nil { + return fmt.Errorf("service configuration error, genesis chunks are not initialized") + } + + if len(s.vm.genChunks) == 0 { + return fmt.Errorf("service configuration error, there are no chunks") + } + + id := int(args.Chunk) + + if id > len(s.vm.genChunks)-1 { + return fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) + } + + reply.TotalChunks = len(s.vm.genChunks) + reply.ChunkNumber = id + reply.Data = s.vm.genChunks[id] + return nil +} + +func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error { + var ( + earliestBlockHeight int64 + earliestBlockHash tmbytes.HexBytes + earliestAppHash tmbytes.HexBytes + earliestBlockTimeNano int64 + ) + + if earliestBlockMeta := s.vm.blockStore.LoadBaseMeta(); earliestBlockMeta != nil { + earliestBlockHeight = earliestBlockMeta.Header.Height + earliestAppHash = earliestBlockMeta.Header.AppHash + earliestBlockHash = earliestBlockMeta.BlockID.Hash + earliestBlockTimeNano = earliestBlockMeta.Header.Time.UnixNano() + } + + var ( + latestBlockHash tmbytes.HexBytes + latestAppHash tmbytes.HexBytes + latestBlockTimeNano int64 + + latestHeight = s.vm.blockStore.Height() + ) + + if latestHeight != 0 { + if latestBlockMeta := s.vm.blockStore.LoadBlockMeta(latestHeight); latestBlockMeta != nil { + latestBlockHash = latestBlockMeta.BlockID.Hash + latestAppHash = latestBlockMeta.Header.AppHash + latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() + } + } + + reply.NodeInfo = p2p.DefaultNodeInfo{ + DefaultNodeID: p2p.ID(s.vm.ctx.NodeID.String()), + Network: fmt.Sprintf("%d", s.vm.ctx.NetworkID), + } + reply.SyncInfo = ctypes.SyncInfo{ + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: time.Unix(0, latestBlockTimeNano), + EarliestBlockHash: earliestBlockHash, + EarliestAppHash: earliestAppHash, + EarliestBlockHeight: earliestBlockHeight, + EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + } + return nil +} + +// ToDo: no peers, because it's vm +func (s *LocalService) NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error { + return nil +} + +// ToDo: we doesn't have consensusState +func (s *LocalService) DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error { + return nil +} + +// ToDo: we doesn't have consensusState +func (s *LocalService) ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error { + return nil +} + +func (s *LocalService) ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error { + reply.BlockHeight = s.vm.blockStore.Height() + reply.ConsensusParams = *s.vm.genesis.ConsensusParams + return nil +} + +func (s *LocalService) Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error { + *reply = ctypes.ResultHealth{} + return nil +} + +func (s *LocalService) UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error { + limit := validatePerPage(args.Limit) + txs := s.vm.mempool.ReapMaxTxs(limit) + reply.Count = len(txs) + reply.Total = s.vm.mempool.Size() + reply.Txs = txs + return nil +} + +func (s *LocalService) NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error { + reply.Count = s.vm.mempool.Size() + reply.Total = s.vm.mempool.Size() + reply.TotalBytes = s.vm.mempool.TxsBytes() + return nil +} + +func (s *LocalService) CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error { + res, err := s.vm.proxyApp.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: args.Tx}) + if err != nil { + return err + } + reply.ResponseCheckTx = *res + return nil +} diff --git a/vm/service_test.go b/vm_/service_test.go similarity index 100% rename from vm/service_test.go rename to vm_/service_test.go diff --git a/vm/service_utils.go b/vm_/service_utils.go similarity index 100% rename from vm/service_utils.go rename to vm_/service_utils.go diff --git a/vm_/vm.go b/vm_/vm.go new file mode 100644 index 000000000..20077f996 --- /dev/null +++ b/vm_/vm.go @@ -0,0 +1,700 @@ +package vm + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "time" + + "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/components/chain" + "github.com/gorilla/rpc/v2" + "github.com/prometheus/client_golang/prometheus" + dbm "github.com/tendermint/tm-db" + + abciTypes "github.com/consideritdone/landslidecore/abci/types" + "github.com/consideritdone/landslidecore/config" + cs "github.com/consideritdone/landslidecore/consensus" + tmjson "github.com/consideritdone/landslidecore/libs/json" + "github.com/consideritdone/landslidecore/libs/log" + mempl "github.com/consideritdone/landslidecore/mempool" + "github.com/consideritdone/landslidecore/node" + tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" + "github.com/consideritdone/landslidecore/proxy" + rpccore "github.com/consideritdone/landslidecore/rpc/core" + rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server" + sm "github.com/consideritdone/landslidecore/state" + "github.com/consideritdone/landslidecore/state/indexer" + blockidxkv "github.com/consideritdone/landslidecore/state/indexer/block/kv" + "github.com/consideritdone/landslidecore/state/txindex" + txidxkv "github.com/consideritdone/landslidecore/state/txindex/kv" + "github.com/consideritdone/landslidecore/store" + "github.com/consideritdone/landslidecore/types" +) + +var ( + _ block.ChainVM = &VM{} + + Version = &version.Semantic{ + Major: 0, + Minor: 1, + Patch: 1, + } +) + +const ( + Name = "landslide" + + decidedCacheSize = 100 + missingCacheSize = 50 + unverifiedCacheSize = 50 + + // genesisChunkSize is the maximum size, in bytes, of each + // chunk in the genesis structure for the chunked API + genesisChunkSize = 16 * 1024 * 1024 // 16 +) + +var ( + chainStateMetricsPrefix = "chain_state" + + lastAcceptedKey = []byte("last_accepted_key") + blockStoreDBPrefix = []byte("blockstore") + stateDBPrefix = []byte("state") + txIndexerDBPrefix = []byte("tx_index") + blockIndexerDBPrefix = []byte("block_events") + + proposerAddress = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} +) + +var ( + errInvalidBlock = errors.New("invalid block") + errNoPendingTxs = errors.New("there is no txs to include to block") +) + +type VM struct { + ctx *snow.Context + dbManager manager.Manager + + toEngine chan<- common.Message + + // *chain.State helps to implement the VM interface by wrapping blocks + // with an efficient caching layer. + *chain.State + + tmLogger log.Logger + + blockStoreDB dbm.DB + blockStore *store.BlockStore + + stateDB dbm.DB + stateStore sm.Store + tmState *sm.State + + mempool mempl.Mempool + + // Tendermint Application + app abciTypes.Application + + // Tendermint proxy app + proxyApp proxy.AppConns + + // EventBus is a common bus for all events going through the system. + eventBus *types.EventBus + + // [acceptedBlockDB] is the database to store the last accepted + // block. + acceptedBlockDB database.Database + + genesis *types.GenesisDoc + // cache of chunked genesis data. + genChunks []string + + // Metrics + multiGatherer metrics.MultiGatherer + + txIndexer txindex.TxIndexer + txIndexerDB dbm.DB + blockIndexer indexer.BlockIndexer + blockIndexerDB dbm.DB + indexerService *txindex.IndexerService + + clock mockable.Clock + + appCreator func(ids.ID) (abciTypes.Application, error) +} + +func NewVM(app abciTypes.Application) *VM { + return &VM{app: app, appCreator: nil} +} + +func NewVMWithAppCreator(creator func(chainID ids.ID) (abciTypes.Application, error)) *VM { + return &VM{app: nil, appCreator: creator} +} + +func (vm *VM) Initialize( + _ context.Context, + chainCtx *snow.Context, + dbManager manager.Manager, + genesisBytes []byte, + upgradeBytes []byte, + configBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, + appSender common.AppSender, +) error { + if vm.appCreator != nil { + app, err := vm.appCreator(chainCtx.ChainID) + if err != nil { + return err + } + vm.app = app + } + + vm.ctx = chainCtx + vm.tmLogger = log.NewTMLogger(vm.ctx.Log) + vm.dbManager = dbManager + + vm.toEngine = toEngine + + baseDB := dbManager.Current().Database + + vm.blockStoreDB = Database{prefixdb.NewNested(blockStoreDBPrefix, baseDB)} + vm.blockStore = store.NewBlockStore(vm.blockStoreDB) + + vm.stateDB = Database{prefixdb.NewNested(stateDBPrefix, baseDB)} + vm.stateStore = sm.NewStore(vm.stateDB) + + if err := vm.initGenesis(genesisBytes); err != nil { + return err + } + + if err := vm.initGenesisChunks(); err != nil { + return err + } + + state, err := vm.stateStore.LoadFromDBOrGenesisDoc(vm.genesis) + if err != nil { + return fmt.Errorf("failed to load tmState from genesis: %w ", err) + } + vm.tmState = &state + + // genesis only + if vm.tmState.LastBlockHeight == 0 { + // TODO use decoded/encoded genesis bytes + block, partSet := vm.tmState.MakeBlock(1, []types.Tx{genesisBytes}, nil, nil, nil) + vm.tmLogger.Info("init block", "b", block, "part set", partSet) + } + + //vm.genesisHash = vm.ethConfig.Genesis.ToBlock(nil).Hash() // must create genesis hash before [vm.readLastAccepted] + + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). + proxyApp, err := node.CreateAndStartProxyAppConns(proxy.NewLocalClientCreator(vm.app), vm.tmLogger) + if err != nil { + return fmt.Errorf("failed to create and start proxy app: %w ", err) + } + vm.proxyApp = proxyApp + + // Create EventBus + eventBus, err := node.CreateAndStartEventBus(vm.tmLogger) + if err != nil { + return fmt.Errorf("failed to create and start event bus: %w ", err) + } + vm.eventBus = eventBus + + vm.txIndexerDB = Database{prefixdb.NewNested(txIndexerDBPrefix, baseDB)} + vm.txIndexer = txidxkv.NewTxIndex(vm.txIndexerDB) + vm.blockIndexerDB = Database{prefixdb.NewNested(blockIndexerDBPrefix, baseDB)} + vm.blockIndexer = blockidxkv.New(vm.blockIndexerDB) + vm.indexerService = txindex.NewIndexerService(vm.txIndexer, vm.blockIndexer, eventBus) + vm.indexerService.SetLogger(vm.tmLogger.With("module", "txindex")) + + if err := vm.indexerService.Start(); err != nil { + return err + } + + if err := vm.doHandshake(vm.genesis, vm.tmLogger.With("module", "consensus")); err != nil { + return err + } + + state, err = vm.stateStore.Load() + if err != nil { + return fmt.Errorf("failed to load tmState: %w ", err) + } + vm.tmState = &state + + genesisBlock, err := vm.buildGenesisBlock(genesisBytes) + if err != nil { + return fmt.Errorf("failed to build genesis block: %w ", err) + } + + vm.mempool = vm.createMempool() + + if err := vm.initializeMetrics(); err != nil { + return err + } + + if err := vm.initChainState(genesisBlock); err != nil { + return err + } + + return nil +} + +// builds genesis block if required +func (vm *VM) buildGenesisBlock(genesisData []byte) (*types.Block, error) { + if vm.tmState.LastBlockHeight != 0 { + return nil, nil + } + txs := types.Txs{types.Tx(genesisData)} + if len(txs) == 0 { + return nil, errNoPendingTxs + } + height := vm.tmState.LastBlockHeight + 1 + + commit := makeCommitMock(height, time.Now()) + genesisBlock, _ := vm.tmState.MakeBlock(height, txs, commit, nil, proposerAddress) + return genesisBlock, nil +} + +// Initializes Genesis if required +func (vm *VM) initGenesis(genesisData []byte) error { + // load genesis from database + genesis, err := node.LoadGenesisDoc(vm.stateDB) + // genesis not found in database + if err != nil { + if err == node.ErrNoGenesisDoc { + // get it from json + genesis, err = types.GenesisDocFromJSON(genesisData) + if err != nil { + return fmt.Errorf("failed to decode genesis bytes: %w ", err) + } + // save to database + err = node.SaveGenesisDoc(vm.stateDB, genesis) + if err != nil { + return fmt.Errorf("failed to save genesis data: %w ", err) + } + } else { + return err + } + } + + vm.genesis = genesis + return nil +} + +// InitGenesisChunks configures the environment +// and should be called on service startup. +func (vm *VM) initGenesisChunks() error { + if vm.genesis == nil { + return fmt.Errorf("empty genesis") + } + + data, err := tmjson.Marshal(vm.genesis) + if err != nil { + return err + } + + for i := 0; i < len(data); i += genesisChunkSize { + end := i + genesisChunkSize + + if end > len(data) { + end = len(data) + } + + vm.genChunks = append(vm.genChunks, base64.StdEncoding.EncodeToString(data[i:end])) + } + + return nil +} + +func (vm *VM) createMempool() *mempl.CListMempool { + cfg := config.DefaultMempoolConfig() + mempool := mempl.NewCListMempool( + cfg, + vm.proxyApp.Mempool(), + vm.tmState.LastBlockHeight, + vm, + mempl.WithMetrics(mempl.NopMetrics()), // TODO: use prometheus metrics based on config + mempl.WithPreCheck(sm.TxPreCheck(*vm.tmState)), + mempl.WithPostCheck(sm.TxPostCheck(*vm.tmState)), + ) + mempoolLogger := vm.tmLogger.With("module", "mempool") + mempool.SetLogger(mempoolLogger) + + return mempool +} + +// NotifyBlockReady tells the consensus engine that a new block +// is ready to be created +func (vm *VM) NotifyBlockReady() { + select { + case vm.toEngine <- common.PendingTxs: + vm.tmLogger.Debug("Notify consensys engine") + default: + vm.tmLogger.Error("Failed to push PendingTxs notification to the consensus engine.") + } +} + +func (vm *VM) doHandshake(genesis *types.GenesisDoc, consensusLogger log.Logger) error { + handshaker := cs.NewHandshaker(vm.stateStore, *vm.tmState, vm.blockStore, genesis) + handshaker.SetLogger(consensusLogger) + handshaker.SetEventBus(vm.eventBus) + if err := handshaker.Handshake(vm.proxyApp); err != nil { + return fmt.Errorf("error during handshake: %v", err) + } + return nil +} + +// readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the +// last accepted block hash and height by reading directly from [vm.chaindb] instead of relying +// on [chain]. +// Note: assumes chaindb, ethConfig, and genesisHash have been initialized. +//func (vm *VM) readLastAccepted() (tmbytes.HexBytes, uint64, error) { +// // Attempt to load last accepted block to determine if it is necessary to +// // initialize state with the genesis block. +// lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) +// switch { +// case lastAcceptedErr == database.ErrNotFound: +// // If there is nothing in the database, return the genesis block hash and height +// return vm.genesisHash, 0, nil +// case lastAcceptedErr != nil: +// return common.Hash{}, 0, fmt.Errorf("failed to get last accepted block ID due to: %w", lastAcceptedErr) +// case len(lastAcceptedBytes) != common.HashLength: +// return common.Hash{}, 0, fmt.Errorf("last accepted bytes should have been length %d, but found %d", common.HashLength, len(lastAcceptedBytes)) +// default: +// lastAcceptedHash := common.BytesToHash(lastAcceptedBytes) +// height := rawdb.ReadHeaderNumber(vm.chaindb, lastAcceptedHash) +// if height == nil { +// return common.Hash{}, 0, fmt.Errorf("failed to retrieve header number of last accepted block: %s", lastAcceptedHash) +// } +// return lastAcceptedHash, *height, nil +// } +//} + +func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { + block, err := vm.newBlock(lastAcceptedBlock) + if err != nil { + return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) + } + block.status = choices.Accepted + + config := &chain.Config{ + DecidedCacheSize: decidedCacheSize, + MissingCacheSize: missingCacheSize, + UnverifiedCacheSize: unverifiedCacheSize, + //GetBlockIDAtHeight: vm.GetBlockIDAtHeight, + GetBlock: vm.getBlock, + UnmarshalBlock: vm.parseBlock, + BuildBlock: vm.buildBlock, + LastAcceptedBlock: block, + } + + // Register chain state metrics + chainStateRegisterer := prometheus.NewRegistry() + state, err := chain.NewMeteredState(chainStateRegisterer, config) + if err != nil { + return fmt.Errorf("could not create metered state: %w", err) + } + vm.State = state + + return vm.multiGatherer.Register(chainStateMetricsPrefix, chainStateRegisterer) +} + +func (vm *VM) initializeMetrics() error { + vm.multiGatherer = metrics.NewMultiGatherer() + + if err := vm.ctx.Metrics.Register(vm.multiGatherer); err != nil { + return err + } + + return nil +} + +// parseBlock parses [b] into a block to be wrapped by ChainState. +func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { + protoBlock := new(tmproto.Block) + err := protoBlock.Unmarshal(b) + if err != nil { + return nil, err + } + + tmBlock, err := types.BlockFromProto(protoBlock) + if err != nil { + return nil, err + } + + // Note: the status of block is set by ChainState + block, err := vm.newBlock(tmBlock) + if err != nil { + return nil, err + } + + return block, nil +} + +// getBlock attempts to retrieve block [id] from the VM to be wrapped +// by ChainState. +func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { + var hash []byte + copy(hash, id[:]) + tmBlock := vm.blockStore.LoadBlockByHash(hash) + // If [tmBlock] is nil, return [database.ErrNotFound] here + // so that the miss is considered cacheable. + if tmBlock == nil { + return nil, database.ErrNotFound + } + // Note: the status of block is set by ChainState + return vm.newBlock(tmBlock) +} + +func (vm *VM) applyBlock(block *Block) error { + vm.mempool.Lock() + defer vm.mempool.Unlock() + + state, err := vm.stateStore.Load() + if err != nil { + return err + } + + if err := validateBlock(state, block.tmBlock); err != nil { + return err + } + + abciResponses, err := execBlockOnProxyApp( + vm.tmLogger, + vm.proxyApp.Consensus(), + block.tmBlock, vm.stateStore, + state.InitialHeight, + ) + if err != nil { + return err + } + + // Save the results before we commit. + if err := vm.stateStore.SaveABCIResponses(block.tmBlock.Height, abciResponses); err != nil { + return err + } + + blockID := types.BlockID{ + Hash: block.tmBlock.Hash(), + PartSetHeader: block.tmBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + } + + // Update the state with the block and responses. + state, err = updateState(state, blockID, &block.tmBlock.Header, abciResponses) + if err != nil { + return err + } + + // while mempool is Locked, flush to ensure all async requests have completed + // in the ABCI app before Commit. + if err := vm.mempool.FlushAppConn(); err != nil { + vm.tmLogger.Error("client error during mempool.FlushAppConn", "err", err) + return err + } + + // Commit block, get hash back + res, err := vm.proxyApp.Consensus().CommitSync() + if err != nil { + vm.tmLogger.Error("client error during proxyAppConn.CommitSync", "err", err) + return err + } + + // ResponseCommit has no error code - just data + vm.tmLogger.Info( + "committed state", + "height", block.Height, + "num_txs", len(block.tmBlock.Txs), + "app_hash", fmt.Sprintf("%X", res.Data), + ) + + deliverTxResponses := make([]*abciTypes.ResponseDeliverTx, len(block.tmBlock.Txs)) + for i := range block.tmBlock.Txs { + deliverTxResponses[i] = &abciTypes.ResponseDeliverTx{Code: abciTypes.CodeTypeOK} + } + + // Update mempool. + if err := vm.mempool.Update( + block.tmBlock.Height, + block.tmBlock.Txs, + deliverTxResponses, + TxPreCheck(state), + TxPostCheck(state), + ); err != nil { + return err + } + + vm.tmState.LastBlockHeight = block.tmBlock.Height + if err := vm.stateStore.Save(state); err != nil { + return err + } + vm.blockStore.SaveBlock(block.tmBlock, block.tmBlock.MakePartSet(types.BlockPartSizeBytes), block.tmBlock.LastCommit) + + fireEvents(vm.tmLogger, vm.eventBus, block.tmBlock, abciResponses) + return nil +} + +// buildBlock builds a block to be wrapped by ChainState +func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { + txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) + if len(txs) == 0 { + return nil, errNoPendingTxs + } + height := vm.tmState.LastBlockHeight + 1 + + commit := makeCommitMock(height, time.Now()) + block, _ := vm.tmState.MakeBlock(height, txs, commit, nil, proposerAddress) + + // Note: the status of block is set by ChainState + blk, err := vm.newBlock(block) + blk.SetStatus(choices.Processing) + if err != nil { + return nil, err + } + vm.tmLogger.Debug(fmt.Sprintf("Built block %s", blk.ID())) + + return blk, nil +} + +func (vm *VM) AppGossip(_ context.Context, nodeID ids.NodeID, msg []byte) error { + return nil +} + +func (vm *VM) SetState(ctx context.Context, state snow.State) error { + return nil +} + +func (vm *VM) Shutdown(ctx context.Context) error { + // first stop the non-reactor services + if err := vm.eventBus.Stop(); err != nil { + return fmt.Errorf("Error closing eventBus: %w ", err) + } + if err := vm.indexerService.Stop(); err != nil { + return fmt.Errorf("Error closing indexerService: %w ", err) + } + //TODO: investigate wal configuration + // stop mempool WAL + //if vm.config.Mempool.WalEnabled() { + // n.mempool.CloseWAL() + //} + //if n.prometheusSrv != nil { + // if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { + // // Error from closing listeners, or context timeout: + // n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + // } + //} + if err := vm.blockStore.Close(); err != nil { + return fmt.Errorf("Error closing blockStore: %w ", err) + } + if err := vm.stateStore.Close(); err != nil { + return fmt.Errorf("Error closing stateStore: %w ", err) + } + return nil + //timestampVM and deprecated landslide + //if vm.state == nil { + // return nil + //} + // + //return vm.state.Close() // close versionDB + + //coreth + //if vm.ctx == nil { + // return nil + //} + //vm.Network.Shutdown() + //if err := vm.StateSyncClient.Shutdown(); err != nil { + // log.Error("error stopping state syncer", "err", err) + //} + //close(vm.shutdownChan) + //vm.eth.Stop() + //vm.shutdownWg.Wait() + //return nil +} + +func (vm *VM) Version(ctx context.Context) (string, error) { + return Version.String(), nil +} + +func (vm *VM) CreateStaticHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { + //TODO implement me + return nil, nil +} + +func (vm *VM) CreateHandlers(_ context.Context) (map[string]*common.HTTPHandler, error) { + mux := http.NewServeMux() + rpcLogger := vm.tmLogger.With("module", "rpc-server") + rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) + + server := rpc.NewServer() + server.RegisterCodec(json.NewCodec(), "application/json") + server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") + if err := server.RegisterService(NewService(vm), Name); err != nil { + return nil, err + } + + return map[string]*common.HTTPHandler{ + "/rpc": { + LockOptions: common.WriteLock, + Handler: server, + }, + }, nil +} + +func (vm *VM) ProxyApp() proxy.AppConns { + return vm.proxyApp +} + +func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { + //TODO implement me + return nil +} + +func (vm *VM) AppRequest(_ context.Context, nodeID ids.NodeID, requestID uint32, time time.Time, request []byte) error { + return nil +} + +// This VM doesn't (currently) have any app-specific messages +func (vm *VM) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + return nil +} + +// This VM doesn't (currently) have any app-specific messages +func (vm *VM) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { + return nil +} + +func (vm *VM) CrossChainAppRequest(_ context.Context, _ ids.ID, _ uint32, deadline time.Time, request []byte) error { + return nil +} + +func (vm *VM) CrossChainAppRequestFailed(_ context.Context, _ ids.ID, _ uint32) error { + return nil +} + +func (vm *VM) CrossChainAppResponse(_ context.Context, _ ids.ID, _ uint32, response []byte) error { + return nil +} + +func (vm *VM) Connected(_ context.Context, id ids.NodeID, nodeVersion *version.Application) error { + return nil // noop +} + +func (vm *VM) Disconnected(_ context.Context, id ids.NodeID) error { + return nil // noop +} + +func (vm *VM) HealthCheck(ctx context.Context) (interface{}, error) { return nil, nil } diff --git a/vm/vm_test.go b/vm_/vm_test.go similarity index 100% rename from vm/vm_test.go rename to vm_/vm_test.go From 8914481e945d12bc67190f4df8ae14535b87885a Mon Sep 17 00:00:00 2001 From: n0cte Date: Fri, 7 Jul 2023 15:57:11 +0400 Subject: [PATCH 03/14] fix tests and remove old impl --- {vm_ => vm}/cmd/main.go | 13 +- {vm_ => vm}/data/vm_test_genesis.json | 0 {vm_ => vm}/scripts/build.sh | 0 {vm_ => vm}/scripts/build_test.sh | 0 vm/service.go | 8 +- {vm_ => vm}/service_test.go | 37 +- vm/vm.go | 116 +++- {vm_ => vm}/vm_test.go | 7 +- vm_/block.go | 95 ---- vm_/block_utils.go | 271 ---------- vm_/database.go | 119 ----- vm_/service.go | 727 -------------------------- vm_/service_utils.go | 131 ----- vm_/vm.go | 700 ------------------------- 14 files changed, 121 insertions(+), 2103 deletions(-) rename {vm_ => vm}/cmd/main.go (72%) rename {vm_ => vm}/data/vm_test_genesis.json (100%) rename {vm_ => vm}/scripts/build.sh (100%) rename {vm_ => vm}/scripts/build_test.sh (100%) rename {vm_ => vm}/service_test.go (91%) rename {vm_ => vm}/vm_test.go (95%) delete mode 100644 vm_/block.go delete mode 100644 vm_/block_utils.go delete mode 100644 vm_/database.go delete mode 100644 vm_/service.go delete mode 100644 vm_/service_utils.go delete mode 100644 vm_/vm.go diff --git a/vm_/cmd/main.go b/vm/cmd/main.go similarity index 72% rename from vm_/cmd/main.go rename to vm/cmd/main.go index e0f8b9743..b511ea15f 100644 --- a/vm_/cmd/main.go +++ b/vm/cmd/main.go @@ -3,23 +3,24 @@ package main import ( "context" "fmt" - "github.com/consideritdone/landslidecore/abci/example/counter" - landslideCoreVM "github.com/consideritdone/landslidecore/vm" "os" + "github.com/consideritdone/landslidecore/abci/example/counter" + "github.com/consideritdone/landslidecore/vm" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/ulimit" "github.com/ava-labs/avalanchego/vms/rpcchainvm" ) func main() { - if err := ulimit.Set(ulimit.DefaultFDLimit, logging.NoLog{}); err != nil { fmt.Printf("failed to set fd limit correctly due to: %s", err) os.Exit(1) } - vm := landslideCoreVM.NewVM(counter.NewApplication(true)) - - rpcchainvm.Serve(context.Background(), vm) + rpcchainvm.Serve( + context.Background(), + vm.New(vm.LocalAppCreator(counter.NewApplication(true))), + ) } diff --git a/vm_/data/vm_test_genesis.json b/vm/data/vm_test_genesis.json similarity index 100% rename from vm_/data/vm_test_genesis.json rename to vm/data/vm_test_genesis.json diff --git a/vm_/scripts/build.sh b/vm/scripts/build.sh similarity index 100% rename from vm_/scripts/build.sh rename to vm/scripts/build.sh diff --git a/vm_/scripts/build_test.sh b/vm/scripts/build_test.sh similarity index 100% rename from vm_/scripts/build_test.sh rename to vm/scripts/build_test.sh diff --git a/vm/service.go b/vm/service.go index 9a3a5c7d6..aca8dc3e6 100644 --- a/vm/service.go +++ b/vm/service.go @@ -600,11 +600,7 @@ func (s *LocalService) BlockchainInfo( } func (s *LocalService) Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error { - //if len(s.vm.genChunks) > 1 { - // return errors.New("genesis response is large, please use the genesis_chunked API instead") - //} - // - //reply.Genesis = s.vm.genesis + reply.Genesis = s.vm.genesis return nil } @@ -694,7 +690,7 @@ func (s *LocalService) ConsensusState(_ *http.Request, _ *struct{}, reply *ctype func (s *LocalService) ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error { reply.BlockHeight = s.vm.blockStore.Height() - //reply.ConsensusParams = *s.vm.genesis.ConsensusParams + reply.ConsensusParams = *s.vm.genesis.ConsensusParams return nil } diff --git a/vm_/service_test.go b/vm/service_test.go similarity index 91% rename from vm_/service_test.go rename to vm/service_test.go index 1eb747ef4..639c0db06 100644 --- a/vm_/service_test.go +++ b/vm/service_test.go @@ -2,6 +2,7 @@ package vm import ( "context" + "fmt" "testing" "time" @@ -19,8 +20,8 @@ func TestABCIService(t *testing.T) { reply := new(ctypes.ResultABCIInfo) assert.NoError(t, service.ABCIInfo(nil, nil, reply)) assert.Equal(t, uint64(1), reply.Response.AppVersion) - assert.Equal(t, int64(0), reply.Response.LastBlockHeight) - assert.Equal(t, []uint8([]byte(nil)), reply.Response.LastBlockAppHash) + assert.Equal(t, int64(1), reply.Response.LastBlockHeight) + assert.Equal(t, []uint8([]byte{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), reply.Response.LastBlockAppHash) t.Logf("%+v", reply) }) @@ -117,7 +118,7 @@ func TestHistoryService(t *testing.T) { t.Run("BlockchainInfo", func(t *testing.T) { reply := new(ctypes.ResultBlockchainInfo) assert.NoError(t, service.BlockchainInfo(nil, &BlockchainInfoArgs{1, 100}, reply)) - assert.Equal(t, int64(1), reply.LastHeight) + assert.Equal(t, int64(2), reply.LastHeight) }) t.Run("Genesis", func(t *testing.T) { @@ -148,7 +149,7 @@ func TestNetworkService(t *testing.T) { t.Run("ConsensusParams", func(t *testing.T) { reply := new(ctypes.ResultConsensusParams) assert.NoError(t, service.ConsensusParams(nil, nil, reply)) - assert.Equal(t, int64(0), reply.BlockHeight) + assert.Equal(t, int64(1), reply.BlockHeight) txReply := new(ctypes.ResultBroadcastTx) assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: []byte{0x00}}, txReply)) @@ -160,7 +161,7 @@ func TestNetworkService(t *testing.T) { assert.NoError(t, blk.Accept(context.Background())) assert.NoError(t, service.ConsensusParams(nil, nil, reply)) - assert.Equal(t, int64(1), reply.BlockHeight) + assert.Equal(t, int64(2), reply.BlockHeight) }) t.Run("Health", func(t *testing.T) { @@ -239,17 +240,17 @@ func TestSignService(t *testing.T) { assert.EqualValues(t, tx, reply.Tx) }) - //t.Run("TxSearch", func(t *testing.T) { - // reply := new(ctypes.ResultTxSearch) - // assert.NoError(t, service.TxSearch(nil, &TxSearchArgs{Query: "tx.height>0"}, reply)) - // assert.True(t, len(reply.Txs) > 0) - //}) - - //t.Run("BlockSearch", func(t *testing.T) { - // reply := new(ctypes.ResultBlockSearch) - // assert.NoError(t, service.BlockSearch(nil, &BlockSearchArgs{Query: "block.height>0"}, reply)) - // assert.True(t, len(reply.Blocks) > 0) - //}) + t.Run("TxSearch", func(t *testing.T) { + reply := new(ctypes.ResultTxSearch) + assert.NoError(t, service.TxSearch(nil, &TxSearchArgs{Query: fmt.Sprintf("tx.hash='%s'", txReply.Hash)}, reply)) + assert.True(t, len(reply.Txs) > 0) + }) + + t.Run("BlockSearch", func(t *testing.T) { + reply := new(ctypes.ResultBlockSearch) + assert.NoError(t, service.BlockSearch(nil, &BlockSearchArgs{Query: "block.height=2"}, reply)) + assert.True(t, len(reply.Blocks) > 0) + }) } func TestStatusService(t *testing.T) { @@ -270,7 +271,7 @@ func TestStatusService(t *testing.T) { t.Run("Status", func(t *testing.T) { reply1 := new(ctypes.ResultStatus) assert.NoError(t, service.Status(nil, nil, reply1)) - assert.Equal(t, int64(0), reply1.SyncInfo.LatestBlockHeight) + assert.Equal(t, int64(1), reply1.SyncInfo.LatestBlockHeight) blk, err := vm.BuildBlock(context.Background()) assert.NoError(t, err) @@ -279,7 +280,7 @@ func TestStatusService(t *testing.T) { reply2 := new(ctypes.ResultStatus) assert.NoError(t, service.Status(nil, nil, reply2)) - assert.Equal(t, int64(1), reply2.SyncInfo.LatestBlockHeight) + assert.Equal(t, int64(2), reply2.SyncInfo.LatestBlockHeight) }) } diff --git a/vm/vm.go b/vm/vm.go index ce023b5fc..8269d7043 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -10,6 +10,7 @@ import ( "github.com/gorilla/rpc/v2" "github.com/ava-labs/avalanchego/api/health" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" @@ -75,6 +76,9 @@ var ( dbPrefixBlockIndexer = []byte("block-indexer") proposerAddress = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + + errInvalidBlock = errors.New("invalid block") + errNoPendingTxs = errors.New("there is no txs to include to block") ) type ( @@ -88,11 +92,10 @@ type ( chainCtx *snow.Context toEngine chan<- common.Message - verifiedBlocks map[ids.ID]*Block - blockStore *store.BlockStore - stateStore state.Store - state state.State - genesis *types.GenesisDoc + blockStore *store.BlockStore + stateStore state.Store + state state.State + genesis *types.GenesisDoc mempool *mempool.CListMempool eventBus *types.EventBus @@ -100,12 +103,20 @@ type ( txIndexer txindex.TxIndexer blockIndexer indexer.BlockIndexer indexerService *txindex.IndexerService + multiGatherer metrics.MultiGatherer - bootstrapped utils.Atomic[bool] - preferred ids.ID + bootstrapped utils.Atomic[bool] + verifiedBlocks map[ids.ID]*Block + preferred ids.ID } ) +func LocalAppCreator(app abciTypes.Application) AppCreator { + return func(ids.ID) (abciTypes.Application, error) { + return app, nil + } +} + func New(appCreator AppCreator) *VM { return &VM{ appCreator: appCreator, @@ -370,16 +381,51 @@ func (vm *VM) Initialize( vm.mempool.SetLogger(vm.log.With("module", "mempool")) vm.mempool.EnableTxsAvailable() + vm.multiGatherer = metrics.NewMultiGatherer() + if err := vm.chainCtx.Metrics.Register(vm.multiGatherer); err != nil { + return err + } + if vm.state.LastBlockHeight == 0 { block, _ := vm.state.MakeBlock(1, types.Txs{types.Tx(genesisBytes)}, makeCommitMock(1, time.Now()), nil, proposerAddress) - blck := NewBlock(vm, block, choices.Processing) - blck.Accept(ctx) + if err := NewBlock(vm, block, choices.Processing).Accept(ctx); err != nil { + return err + } } vm.log.Info("vm initialization completed") return nil } +// func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { +// block, err := vm.newBlock(lastAcceptedBlock) +// if err != nil { +// return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) +// } +// block.status = choices.Accepted +// +// config := &chain.Config{ +// DecidedCacheSize: decidedCacheSize, +// MissingCacheSize: missingCacheSize, +// UnverifiedCacheSize: unverifiedCacheSize, +// //GetBlockIDAtHeight: vm.GetBlockIDAtHeight, +// GetBlock: vm.getBlock, +// UnmarshalBlock: vm.parseBlock, +// BuildBlock: vm.buildBlock, +// LastAcceptedBlock: block, +// } +// +// // Register chain state metrics +// chainStateRegisterer := prometheus.NewRegistry() +// state, err := chain.NewMeteredState(chainStateRegisterer, config) +// if err != nil { +// return fmt.Errorf("could not create metered state: %w", err) +// } +// vm.State = state +// +// return vm.multiGatherer.Register(chainStateMetricsPrefix, chainStateRegisterer) +// } + func (vm *VM) NotifyBlockReady() { select { case vm.toEngine <- common.PendingTxs: @@ -405,8 +451,30 @@ func (vm *VM) SetState(ctx context.Context, state snow.State) error { // Shutdown is called when the node is shutting down. func (vm *VM) Shutdown(context.Context) error { - vm.log.Debug("call shutdown") - panic("implement me") + vm.log.Debug("shutdown start") + + if err := vm.indexerService.Stop(); err != nil { + return fmt.Errorf("error closing indexerService: %w ", err) + } + + if err := vm.eventBus.Stop(); err != nil { + return fmt.Errorf("error closing eventBus: %w ", err) + } + + if err := vm.app.Stop(); err != nil { + return fmt.Errorf("error closing app: %w ", err) + } + + if err := vm.stateStore.Close(); err != nil { + return fmt.Errorf("error closing stateStore: %w ", err) + } + + if err := vm.blockStore.Close(); err != nil { + return fmt.Errorf("Error closing blockStore: %w ", err) + } + + vm.log.Debug("shutdown completed") + return nil } // Version returns the version of the VM. @@ -479,11 +547,11 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, e // required for blocks that have been rejected by the consensus engine to be // able to be fetched. func (vm *VM) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + vm.log.Debug("get block", "blkID", blkID.String()) if b, ok := vm.verifiedBlocks[blkID]; ok { return b, nil } b := vm.blockStore.LoadBlockByHash(blkID[:]) - vm.log.Debug("get block", "blkID", blkID.String(), "block", b) return NewBlock(vm, b, choices.Accepted), nil } @@ -498,19 +566,19 @@ func (vm *VM) ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, protoBlock := new(tmproto.Block) if err := protoBlock.Unmarshal(blockBytes); err != nil { + vm.log.Error("can't parse block", "err", err) return nil, err } - vm.log.Debug("parse block", "protoBlock", protoBlock.Header.Height) block, err := types.BlockFromProto(protoBlock) if err != nil { + vm.log.Error("can't create block from proto", "err", err) return nil, err } - vm.log.Debug("parse block", "block", block.Hash()) - blk := NewBlock(vm, block, choices.Processing) - vm.log.Debug("parse block", "height", blk.Height(), "id", blk.ID()) - return blk, nil + vm.log.Debug("parsed block", "id", ids.ID(block.Hash())) + + return NewBlock(vm, block, choices.Processing), nil } // Attempt to create a new block from data contained in the VM. @@ -521,14 +589,14 @@ func (vm *VM) BuildBlock(context.Context) (snowman.Block, error) { vm.log.Debug("build block") txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) if len(txs) == 0 { - return nil, fmt.Errorf("no txs") + return nil, errNoPendingTxs } height := vm.state.LastBlockHeight + 1 commit := makeCommitMock(height, time.Now()) block, _ := vm.state.MakeBlock(height, txs, commit, nil, proposerAddress) - prev := vm.blockStore.LoadBlockByHash(vm.preferred[:]) + prev := vm.blockStore.LoadBlock(height - 1) block.LastBlockID = types.BlockID{ Hash: prev.Hash(), PartSetHeader: prev.LastBlockID.PartSetHeader, @@ -556,13 +624,7 @@ func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { // a definitionally accepted block, the Genesis block, that will be // returned. func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { - height := vm.blockStore.Height() - block := vm.blockStore.LoadBlock(height) - if block == nil { - vm.log.Error("block store return empty block", "height", height) - return ids.Empty, errors.New("block not found") - } - return ids.ID(block.Hash()), nil + return ids.ID(vm.state.LastBlockID.Hash), nil } func (vm *VM) applyBlock(block *Block) error { @@ -638,6 +700,8 @@ func (vm *VM) applyBlock(block *Block) error { } vm.state.LastBlockHeight = block.Block.Height + vm.state.LastBlockID = blockID + vm.state.LastBlockTime = block.Time if err := vm.stateStore.Save(state); err != nil { return err } diff --git a/vm_/vm_test.go b/vm/vm_test.go similarity index 95% rename from vm_/vm_test.go rename to vm/vm_test.go index 02801de19..212eb7513 100644 --- a/vm_/vm_test.go +++ b/vm/vm_test.go @@ -15,7 +15,6 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -51,7 +50,7 @@ func newTestVM(app atypes.Application) (*VM, *snow.Context, chan common.Message, Patch: 0, }) msgChan := make(chan common.Message, 1) - vm := NewVM(app) + vm := New(LocalAppCreator(app)) snowCtx := snow.DefaultContextTest() snowCtx.Log = logging.NewLogger( fmt.Sprintf("<%s Chain>", blockchainID), @@ -123,7 +122,7 @@ func TestInitVm(t *testing.T) { err = blk1.Accept(context.Background()) assert.NoError(t, err) - tmBlk1 := blk1.(*chain.BlockWrapper).Block.(*Block).tmBlock + tmBlk1 := blk1.(*Block).Block t.Logf("Block: %d", blk1.Height()) t.Logf("TM Block Tx count: %d", len(tmBlk1.Data.Txs)) @@ -161,7 +160,7 @@ func TestInitVm(t *testing.T) { err = blk2.Accept(context.Background()) assert.NoError(t, err) - tmBlk2 := blk2.(*chain.BlockWrapper).Block.(*Block).tmBlock + tmBlk2 := blk2.(*Block).Block t.Logf("Block: %d", blk2.Height()) t.Logf("TM Block Tx count: %d", len(tmBlk2.Data.Txs)) diff --git a/vm_/block.go b/vm_/block.go deleted file mode 100644 index 771687cb2..000000000 --- a/vm_/block.go +++ /dev/null @@ -1,95 +0,0 @@ -package vm - -import ( - "context" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/consideritdone/landslidecore/types" -) - -var ( - _ snowman.Block = &Block{} -) - -// Block implements the snowman.Block interface -type Block struct { - id ids.ID - tmBlock *types.Block - vm *VM - status choices.Status -} - -// newBlock returns a new Block wrapping the Tendermint Block type and implementing the snowman.Block interface -func (vm *VM) newBlock(tmBlock *types.Block) (*Block, error) { - var id ids.ID - copy(id[:], tmBlock.Hash()) - - return &Block{ - id: id, - tmBlock: tmBlock, - vm: vm, - }, nil -} - -func (b *Block) ID() ids.ID { - return b.id -} - -func (b *Block) Accept(ctx context.Context) error { - b.SetStatus(choices.Accepted) - return b.vm.applyBlock(b) -} - -func (b *Block) Reject(ctx context.Context) error { - b.SetStatus(choices.Rejected) - - return nil -} - -func (b *Block) SetStatus(status choices.Status) { - b.status = status -} - -func (b *Block) Status() choices.Status { - return b.status -} - -func (b *Block) Parent() ids.ID { - var id ids.ID - parentHash := b.tmBlock.Header.LastBlockID.Hash - copy(id[:], parentHash) - - return id -} - -func (b *Block) Verify(context.Context) error { - if b == nil || b.tmBlock == nil { - return errInvalidBlock - } - - return b.tmBlock.ValidateBasic() -} - -func (b *Block) Bytes() []byte { - block, err := b.tmBlock.ToProto() - if err != nil { - panic(err) - } - data, err := block.Marshal() - if err != nil { - panic(err) - } - - return data -} - -func (b *Block) Height() uint64 { - return uint64(b.tmBlock.Height) -} - -func (b *Block) Timestamp() time.Time { - return b.tmBlock.Time -} diff --git a/vm_/block_utils.go b/vm_/block_utils.go deleted file mode 100644 index 49723682a..000000000 --- a/vm_/block_utils.go +++ /dev/null @@ -1,271 +0,0 @@ -package vm - -import ( - "errors" - "fmt" - "time" - - "github.com/consideritdone/landslidecore/crypto" - "github.com/consideritdone/landslidecore/state" - "github.com/consideritdone/landslidecore/types" - - abci "github.com/consideritdone/landslidecore/abci/types" - "github.com/consideritdone/landslidecore/libs/log" - mempl "github.com/consideritdone/landslidecore/mempool" - tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" - "github.com/consideritdone/landslidecore/proxy" -) - -func makeCommitMock(height int64, timestamp time.Time) *types.Commit { - var commitSig []types.CommitSig = nil - if height != 1 { - commitSig = []types.CommitSig{{Timestamp: time.Now()}} - } - return types.NewCommit( - height, - 0, - types.BlockID{ - Hash: []byte(""), - PartSetHeader: types.PartSetHeader{ - Hash: []byte(""), - Total: 1, - }, - }, - commitSig, - ) -} - -func validateBlock(state state.State, block *types.Block) error { - // Validate internal consistency. - if err := block.ValidateBasic(); err != nil { - return err - } - - // Validate basic info. - if block.Version.App != state.Version.Consensus.App || - block.Version.Block != state.Version.Consensus.Block { - return fmt.Errorf("wrong Block.Header.Version. Expected %v, got %v", - state.Version.Consensus, - block.Version, - ) - } - if block.ChainID != state.ChainID { - return fmt.Errorf("wrong Block.Header.ChainID. Expected %v, got %v", - state.ChainID, - block.ChainID, - ) - } - - // Validate block LastCommit. - if block.Height == state.InitialHeight { - if len(block.LastCommit.Signatures) != 0 { - return errors.New("initial block can't have LastCommit signatures") - } - } - - // NOTE: We can't actually verify it's the right proposer because we don't - // know what round the block was first proposed. So just check that it's - // a legit address and a known validator. - if len(block.ProposerAddress) != crypto.AddressSize { - return fmt.Errorf("expected ProposerAddress size %d, got %d", - crypto.AddressSize, - len(block.ProposerAddress), - ) - } - - // Validate block Time - switch { - case block.Height > state.InitialHeight: - if !(block.Time.After(state.LastBlockTime) || block.Time.Equal(state.LastBlockTime)) { - return fmt.Errorf("block time %v not greater than or equal to last block time %v", - block.Time, - state.LastBlockTime, - ) - } - - case block.Height == state.InitialHeight: - genesisTime := state.LastBlockTime - if !block.Time.Equal(genesisTime) { - return fmt.Errorf("block time %v is not equal to genesis time %v", - block.Time, - genesisTime, - ) - } - - default: - return fmt.Errorf("block height %v lower than initial height %v", - block.Height, state.InitialHeight) - } - - return nil -} - -func execBlockOnProxyApp( - logger log.Logger, - proxyAppConn proxy.AppConnConsensus, - block *types.Block, - store state.Store, - initialHeight int64, -) (*tmstate.ABCIResponses, error) { - var validTxs, invalidTxs = 0, 0 - - txIndex := 0 - abciResponses := new(tmstate.ABCIResponses) - dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) - abciResponses.DeliverTxs = dtxs - - // Execute transactions and get hash. - proxyCb := func(req *abci.Request, res *abci.Response) { - if r, ok := res.Value.(*abci.Response_DeliverTx); ok { - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - txRes := r.DeliverTx - if txRes.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("invalid tx", "code", txRes.Code, "log", txRes.Log) - invalidTxs++ - } - - abciResponses.DeliverTxs[txIndex] = txRes - txIndex++ - } - } - proxyAppConn.SetResponseCallback(proxyCb) - - commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight) - - byzVals := make([]abci.Evidence, 0) - for _, evidence := range block.Evidence.Evidence { - byzVals = append(byzVals, evidence.ABCI()...) - } - - // Begin block - var err error - pbh := block.Header.ToProto() - if pbh == nil { - return nil, errors.New("nil header") - } - - abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ - Hash: block.Hash(), - Header: *pbh, - LastCommitInfo: commitInfo, - ByzantineValidators: byzVals, - }) - if err != nil { - logger.Error("error in proxyAppConn.BeginBlock", "err", err) - return nil, err - } - - // run txs of block - for _, tx := range block.Txs { - proxyAppConn.DeliverTxAsync(abci.RequestDeliverTx{Tx: tx}) - if err := proxyAppConn.Error(); err != nil { - return nil, err - } - } - - // End block. - abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{Height: block.Height}) - if err != nil { - logger.Error("error in proxyAppConn.EndBlock", "err", err) - return nil, err - } - - logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs) - return abciResponses, nil -} - -func getBeginBlockValidatorInfo(block *types.Block, store state.Store, initialHeight int64) abci.LastCommitInfo { - voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) - return abci.LastCommitInfo{ - Round: block.LastCommit.Round, - Votes: voteInfos, - } -} - -func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { - return types.NewResults(ar.DeliverTxs).Hash() -} - -func updateState( - st state.State, - blockID types.BlockID, - header *types.Header, - abciResponses *tmstate.ABCIResponses, -) (state.State, error) { - return state.State{ - Version: st.Version, - ChainID: st.ChainID, - InitialHeight: st.InitialHeight, - LastBlockHeight: header.Height, - LastBlockID: blockID, - LastBlockTime: header.Time, - LastResultsHash: ABCIResponsesResultsHash(abciResponses), - AppHash: nil, - }, nil -} - -// TxPreCheck returns a function to filter transactions before processing. -// The function limits the size of a transaction to the block's maximum data size. -func TxPreCheck(state state.State) mempl.PreCheckFunc { - maxDataBytes := types.MaxDataBytesNoEvidence( - 22020096, - 1, - ) - return mempl.PreCheckMaxBytes(maxDataBytes) -} - -// TxPostCheck returns a function to filter transactions after processing. -// The function limits the gas wanted by a transaction to the block's maximum total gas. -func TxPostCheck(state state.State) mempl.PostCheckFunc { - return mempl.PostCheckMaxGas(-1) -} - -func fireEvents( - logger log.Logger, - eventBus types.BlockEventPublisher, - block *types.Block, - abciResponses *tmstate.ABCIResponses, -) { - if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ - Block: block, - ResultBeginBlock: *abciResponses.BeginBlock, - ResultEndBlock: *abciResponses.EndBlock, - }); err != nil { - logger.Error("failed publishing new block", "err", err) - } - - if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ - Header: block.Header, - NumTxs: int64(len(block.Txs)), - ResultBeginBlock: *abciResponses.BeginBlock, - ResultEndBlock: *abciResponses.EndBlock, - }); err != nil { - logger.Error("failed publishing new block header", "err", err) - } - - if len(block.Evidence.Evidence) != 0 { - for _, ev := range block.Evidence.Evidence { - if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ - Evidence: ev, - Height: block.Height, - }); err != nil { - logger.Error("failed publishing new evidence", "err", err) - } - } - } - - for i, tx := range block.Data.Txs { - if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ - Height: block.Height, - Index: uint32(i), - Tx: tx, - Result: *(abciResponses.DeliverTxs[i]), - }}); err != nil { - logger.Error("failed publishing event TX", "err", err) - } - } -} diff --git a/vm_/database.go b/vm_/database.go deleted file mode 100644 index 07b62ff18..000000000 --- a/vm_/database.go +++ /dev/null @@ -1,119 +0,0 @@ -package vm - -import ( - "github.com/ava-labs/avalanchego/database" - dbm "github.com/tendermint/tm-db" -) - -var ( - _ dbm.DB = &Database{} -) - -type ( - Database struct { - database.Database - } - Iterator struct { - database.Iterator - - start []byte - end []byte - } - Batch struct { - database.Batch - } -) - -func (db Database) Get(key []byte) ([]byte, error) { - res, err := db.Database.Get(key) - if err != nil { - if err.Error() == "not found" { - return nil, nil - } - return nil, err - } - return res, nil -} - -func (db Database) Set(key []byte, value []byte) error { - return db.Database.Put(key, value) -} - -func (db Database) SetSync(key []byte, value []byte) error { - return db.Database.Put(key, value) -} - -func (db Database) DeleteSync(key []byte) error { - return db.Database.Delete(key) -} - -func (db Database) Iterator(start, end []byte) (dbm.Iterator, error) { - return Iterator{db.Database.NewIteratorWithStart(start), start, end}, nil -} - -func (db Database) ReverseIterator(start, end []byte) (dbm.Iterator, error) { - return Iterator{db.Database.NewIteratorWithStart(start), start, end}, nil -} - -func (db Database) NewBatch() dbm.Batch { - return Batch{db.Database.NewBatch()} -} - -func (db Database) Print() error { - //TODO implement me - return nil -} - -func (db Database) Stats() map[string]string { - //TODO implement me - return nil -} - -func (iter Iterator) Domain() (start []byte, end []byte) { - return iter.start, iter.end -} - -func (iter Iterator) Valid() bool { - return iter.Iterator.Error() == nil && len(iter.Iterator.Key()) > 0 -} - -func (iter Iterator) Next() { - iter.Iterator.Next() -} - -func (iter Iterator) Key() (key []byte) { - return iter.Iterator.Key() -} - -func (iter Iterator) Value() (value []byte) { - return iter.Iterator.Value() -} - -func (iter Iterator) Error() error { - return iter.Iterator.Error() -} - -func (iter Iterator) Close() error { - iter.Iterator.Release() - return iter.Error() -} - -func (b Batch) Set(key, value []byte) error { - return b.Batch.Put(key, value) -} - -func (b Batch) Delete(key []byte) error { - return b.Batch.Delete(key) -} - -func (b Batch) Write() error { - return b.Batch.Write() -} - -func (b Batch) WriteSync() error { - return b.Batch.Write() -} - -func (b Batch) Close() error { - return nil -} diff --git a/vm_/service.go b/vm_/service.go deleted file mode 100644 index 7caa1bcbd..000000000 --- a/vm_/service.go +++ /dev/null @@ -1,727 +0,0 @@ -package vm - -import ( - "context" - "errors" - "fmt" - "net/http" - "sort" - "time" - - abci "github.com/consideritdone/landslidecore/abci/types" - tmbytes "github.com/consideritdone/landslidecore/libs/bytes" - tmmath "github.com/consideritdone/landslidecore/libs/math" - tmquery "github.com/consideritdone/landslidecore/libs/pubsub/query" - mempl "github.com/consideritdone/landslidecore/mempool" - "github.com/consideritdone/landslidecore/p2p" - "github.com/consideritdone/landslidecore/proxy" - "github.com/consideritdone/landslidecore/rpc/core" - ctypes "github.com/consideritdone/landslidecore/rpc/core/types" - "github.com/consideritdone/landslidecore/types" -) - -type ( - LocalService struct { - vm *VM - } - - Service interface { - ABCIService - HistoryService - NetworkService - SignService - StatusService - MempoolService - } - - ABCIQueryArgs struct { - Path string `json:"path"` - Data tmbytes.HexBytes `json:"data"` - } - - ABCIQueryOptions struct { - Height int64 `json:"height"` - Prove bool `json:"prove"` - } - - ABCIQueryWithOptionsArgs struct { - Path string `json:"path"` - Data tmbytes.HexBytes `json:"data"` - Opts ABCIQueryOptions `json:"opts"` - } - - BroadcastTxArgs struct { - Tx types.Tx `json:"tx"` - } - - ABCIService interface { - // Reading from abci app - ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error - ABCIQuery(_ *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error - ABCIQueryWithOptions(_ *http.Request, args *ABCIQueryWithOptionsArgs, reply *ctypes.ResultABCIQuery) error - - // Writing to abci app - BroadcastTxCommit(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTxCommit) error - BroadcastTxAsync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error - BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error - } - - BlockHeightArgs struct { - Height *int64 `json:"height"` - } - - BlockHashArgs struct { - Hash []byte `json:"hash"` - } - - CommitArgs struct { - Height *int64 `json:"height"` - } - - ValidatorsArgs struct { - Height *int64 `json:"height"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - } - - TxArgs struct { - Hash []byte `json:"hash"` - Prove bool `json:"prove"` - } - - TxSearchArgs struct { - Query string `json:"query"` - Prove bool `json:"prove"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - OrderBy string `json:"orderBy"` - } - - BlockSearchArgs struct { - Query string `json:"query"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - OrderBy string `json:"orderBy"` - } - - SignService interface { - Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error - BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error - BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error - Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error - Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error - Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error - TxSearch(_ *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error - BlockSearch(_ *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error - } - - BlockchainInfoArgs struct { - MinHeight int64 `json:"minHeight"` - MaxHeight int64 `json:"maxHeight"` - } - - GenesisChunkedArgs struct { - Chunk uint `json:"chunk"` - } - - HistoryService interface { - BlockchainInfo(_ *http.Request, args *BlockchainInfoArgs, reply *ctypes.ResultBlockchainInfo) error - Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error - GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error - } - - StatusService interface { - Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error - } - - ConsensusParamsArgs struct { - Height *int64 `json:"height"` - } - - NetworkService interface { - NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error - DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error - ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error - ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error - Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error - } - - UnconfirmedTxsArgs struct { - Limit *int `json:"limit"` - } - - CheckTxArgs struct { - Tx []byte `json:"tx"` - } - - MempoolService interface { - UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error - NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error - CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error - } -) - -var ( - DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} -) - -func NewService(vm *VM) Service { - return &LocalService{vm} -} - -func (s *LocalService) ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error { - resInfo, err := s.vm.proxyApp.Query().InfoSync(proxy.RequestInfo) - if err != nil { - return err - } - reply.Response = *resInfo - return nil -} - -func (s *LocalService) ABCIQuery(req *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error { - return s.ABCIQueryWithOptions(req, &ABCIQueryWithOptionsArgs{args.Path, args.Data, DefaultABCIQueryOptions}, reply) -} - -func (s *LocalService) ABCIQueryWithOptions( - _ *http.Request, - args *ABCIQueryWithOptionsArgs, - reply *ctypes.ResultABCIQuery, -) error { - resQuery, err := s.vm.proxyApp.Query().QuerySync(abci.RequestQuery{ - Path: args.Path, - Data: args.Data, - Height: args.Opts.Height, - Prove: args.Opts.Prove, - }) - if err != nil { - return err - } - reply.Response = *resQuery - return nil -} - -func (s *LocalService) BroadcastTxCommit( - _ *http.Request, - args *BroadcastTxArgs, - reply *ctypes.ResultBroadcastTxCommit, -) error { - subscriber := "" - - // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(context.Background(), core.SubscribeTimeout) - defer cancel() - - q := types.EventQueryTxFor(args.Tx) - deliverTxSub, err := s.vm.eventBus.Subscribe(subCtx, subscriber, q) - if err != nil { - err = fmt.Errorf("failed to subscribe to tx: %w", err) - s.vm.tmLogger.Error("Error on broadcast_tx_commit", "err", err) - return err - } - - defer func() { - if err := s.vm.eventBus.Unsubscribe(context.Background(), subscriber, q); err != nil { - s.vm.tmLogger.Error("Error unsubscribing from eventBus", "err", err) - } - }() - - // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan *abci.Response, 1) - err = s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { - checkTxResCh <- res - }, mempl.TxInfo{}) - if err != nil { - s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) - return fmt.Errorf("error on broadcastTxCommit: %v", err) - } - checkTxResMsg := <-checkTxResCh - checkTxRes := checkTxResMsg.GetCheckTx() - if checkTxRes.Code != abci.CodeTypeOK { - *reply = ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: args.Tx.Hash(), - } - return nil - } - - // Wait for the tx to be included in a block or timeout. - select { - case msg := <-deliverTxSub.Out(): // The tx was included in a block. - deliverTxRes := msg.Data().(types.EventDataTx) - *reply = ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: deliverTxRes.Result, - Hash: args.Tx.Hash(), - Height: deliverTxRes.Height, - } - return nil - case <-deliverTxSub.Cancelled(): - var reason string - if deliverTxSub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = deliverTxSub.Err().Error() - } - err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) - s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) - return err - // TODO: use config for timeout - case <-time.After(10 * time.Second): - err = errors.New("timed out waiting for tx to be included in a block") - s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) - return err - } -} - -func (s *LocalService) BroadcastTxAsync( - _ *http.Request, - args *BroadcastTxArgs, - reply *ctypes.ResultBroadcastTx, -) error { - err := s.vm.mempool.CheckTx(args.Tx, nil, mempl.TxInfo{}) - if err != nil { - return err - } - reply.Hash = args.Tx.Hash() - return nil -} - -func (s *LocalService) BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error { - resCh := make(chan *abci.Response, 1) - err := s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { - s.vm.tmLogger.With("module", "service").Debug("handled response from checkTx") - resCh <- res - }, mempl.TxInfo{}) - if err != nil { - return err - } - res := <-resCh - r := res.GetCheckTx() - - reply.Code = r.Code - reply.Data = r.Data - reply.Log = r.Log - reply.Codespace = r.Codespace - reply.Hash = args.Tx.Hash() - - return nil -} - -func (s *LocalService) Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error { - height, err := getHeight(s.vm.blockStore, args.Height) - if err != nil { - return err - } - block := s.vm.blockStore.LoadBlock(height) - blockMeta := s.vm.blockStore.LoadBlockMeta(height) - - if blockMeta != nil { - reply.BlockID = blockMeta.BlockID - } - reply.Block = block - return nil -} - -func (s *LocalService) BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error { - block := s.vm.blockStore.LoadBlockByHash(args.Hash) - if block == nil { - reply.BlockID = types.BlockID{} - reply.Block = nil - return nil - } - blockMeta := s.vm.blockStore.LoadBlockMeta(block.Height) - reply.BlockID = blockMeta.BlockID - reply.Block = block - return nil -} - -func (s *LocalService) BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error { - height, err := getHeight(s.vm.blockStore, args.Height) - if err != nil { - return err - } - - results, err := s.vm.stateStore.LoadABCIResponses(height) - if err != nil { - return err - } - - reply.Height = height - reply.TxsResults = results.DeliverTxs - reply.BeginBlockEvents = results.BeginBlock.Events - reply.EndBlockEvents = results.EndBlock.Events - reply.ValidatorUpdates = results.EndBlock.ValidatorUpdates - reply.ConsensusParamUpdates = results.EndBlock.ConsensusParamUpdates - return nil -} - -func (s *LocalService) Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error { - height, err := getHeight(s.vm.blockStore, args.Height) - if err != nil { - return err - } - - blockMeta := s.vm.blockStore.LoadBlockMeta(height) - if blockMeta == nil { - return nil - } - - header := blockMeta.Header - commit := s.vm.blockStore.LoadBlockCommit(height) - res := ctypes.NewResultCommit(&header, commit, !(height == s.vm.blockStore.Height())) - - reply.SignedHeader = res.SignedHeader - reply.CanonicalCommit = res.CanonicalCommit - return nil -} - -func (s *LocalService) Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error { - height, err := getHeight(s.vm.blockStore, args.Height) - if err != nil { - return err - } - - validators, err := s.vm.stateStore.LoadValidators(height) - if err != nil { - return err - } - - totalCount := len(validators.Validators) - perPage := validatePerPage(args.PerPage) - page, err := validatePage(args.Page, perPage, totalCount) - if err != nil { - return err - } - - skipCount := validateSkipCount(page, perPage) - - reply.BlockHeight = height - reply.Validators = validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - reply.Count = len(reply.Validators) - reply.Total = totalCount - return nil -} - -func (s *LocalService) Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error { - r, err := s.vm.txIndexer.Get(args.Hash) - if err != nil { - return err - } - - if r == nil { - return fmt.Errorf("tx (%X) not found", args.Hash) - } - - height := r.Height - index := r.Index - - var proof types.TxProof - if args.Prove { - block := s.vm.blockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines - } - - reply.Hash = args.Hash - reply.Height = height - reply.Index = index - reply.TxResult = r.Result - reply.Tx = r.Tx - reply.Proof = proof - return nil -} - -func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error { - q, err := tmquery.New(args.Query) - if err != nil { - return err - } - - var ctx context.Context - if req != nil { - ctx = req.Context() - } else { - ctx = context.Background() - } - - results, err := s.vm.txIndexer.Search(ctx, q) - if err != nil { - return err - } - - // sort results (must be done before pagination) - switch args.OrderBy { - case "desc": - sort.Slice(results, func(i, j int) bool { - if results[i].Height == results[j].Height { - return results[i].Index > results[j].Index - } - return results[i].Height > results[j].Height - }) - case "asc", "": - sort.Slice(results, func(i, j int) bool { - if results[i].Height == results[j].Height { - return results[i].Index < results[j].Index - } - return results[i].Height < results[j].Height - }) - default: - return errors.New("expected order_by to be either `asc` or `desc` or empty") - } - - // paginate results - totalCount := len(results) - perPage := validatePerPage(args.PerPage) - - page, err := validatePage(args.Page, perPage, totalCount) - if err != nil { - return err - } - - skipCount := validateSkipCount(page, perPage) - pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - - apiResults := make([]*ctypes.ResultTx, 0, pageSize) - for i := skipCount; i < skipCount+pageSize; i++ { - r := results[i] - - var proof types.TxProof - if args.Prove { - block := s.vm.blockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines - } - - apiResults = append(apiResults, &ctypes.ResultTx{ - Hash: types.Tx(r.Tx).Hash(), - Height: r.Height, - Index: r.Index, - TxResult: r.Result, - Tx: r.Tx, - Proof: proof, - }) - } - - reply.Txs = apiResults - reply.TotalCount = totalCount - return nil -} - -func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error { - q, err := tmquery.New(args.Query) - if err != nil { - return err - } - - var ctx context.Context - if req != nil { - ctx = req.Context() - } else { - ctx = context.Background() - } - - results, err := s.vm.blockIndexer.Search(ctx, q) - if err != nil { - return err - } - - // sort results (must be done before pagination) - switch args.OrderBy { - case "desc", "": - sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) - - case "asc": - sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) - - default: - return errors.New("expected order_by to be either `asc` or `desc` or empty") - } - - // paginate results - totalCount := len(results) - perPage := validatePerPage(args.PerPage) - - page, err := validatePage(args.Page, perPage, totalCount) - if err != nil { - return err - } - - skipCount := validateSkipCount(page, perPage) - pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - - apiResults := make([]*ctypes.ResultBlock, 0, pageSize) - for i := skipCount; i < skipCount+pageSize; i++ { - block := s.vm.blockStore.LoadBlock(results[i]) - if block != nil { - blockMeta := s.vm.blockStore.LoadBlockMeta(block.Height) - if blockMeta != nil { - apiResults = append(apiResults, &ctypes.ResultBlock{ - Block: block, - BlockID: blockMeta.BlockID, - }) - } - } - } - - reply.Blocks = apiResults - reply.TotalCount = totalCount - return nil -} - -func (s *LocalService) BlockchainInfo( - _ *http.Request, - args *BlockchainInfoArgs, - reply *ctypes.ResultBlockchainInfo, -) error { - // maximum 20 block metas - const limit int64 = 20 - var err error - args.MinHeight, args.MaxHeight, err = filterMinMax( - s.vm.blockStore.Base(), - s.vm.blockStore.Height(), - args.MinHeight, - args.MaxHeight, - limit) - if err != nil { - return err - } - s.vm.tmLogger.Debug("BlockchainInfoHandler", "maxHeight", args.MaxHeight, "minHeight", args.MinHeight) - - var blockMetas []*types.BlockMeta - for height := args.MaxHeight; height >= args.MinHeight; height-- { - blockMeta := s.vm.blockStore.LoadBlockMeta(height) - blockMetas = append(blockMetas, blockMeta) - } - - reply.LastHeight = s.vm.blockStore.Height() - reply.BlockMetas = blockMetas - return nil -} - -func (s *LocalService) Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error { - if len(s.vm.genChunks) > 1 { - return errors.New("genesis response is large, please use the genesis_chunked API instead") - } - - reply.Genesis = s.vm.genesis - return nil -} - -func (s *LocalService) GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error { - if s.vm.genChunks == nil { - return fmt.Errorf("service configuration error, genesis chunks are not initialized") - } - - if len(s.vm.genChunks) == 0 { - return fmt.Errorf("service configuration error, there are no chunks") - } - - id := int(args.Chunk) - - if id > len(s.vm.genChunks)-1 { - return fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) - } - - reply.TotalChunks = len(s.vm.genChunks) - reply.ChunkNumber = id - reply.Data = s.vm.genChunks[id] - return nil -} - -func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error { - var ( - earliestBlockHeight int64 - earliestBlockHash tmbytes.HexBytes - earliestAppHash tmbytes.HexBytes - earliestBlockTimeNano int64 - ) - - if earliestBlockMeta := s.vm.blockStore.LoadBaseMeta(); earliestBlockMeta != nil { - earliestBlockHeight = earliestBlockMeta.Header.Height - earliestAppHash = earliestBlockMeta.Header.AppHash - earliestBlockHash = earliestBlockMeta.BlockID.Hash - earliestBlockTimeNano = earliestBlockMeta.Header.Time.UnixNano() - } - - var ( - latestBlockHash tmbytes.HexBytes - latestAppHash tmbytes.HexBytes - latestBlockTimeNano int64 - - latestHeight = s.vm.blockStore.Height() - ) - - if latestHeight != 0 { - if latestBlockMeta := s.vm.blockStore.LoadBlockMeta(latestHeight); latestBlockMeta != nil { - latestBlockHash = latestBlockMeta.BlockID.Hash - latestAppHash = latestBlockMeta.Header.AppHash - latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() - } - } - - reply.NodeInfo = p2p.DefaultNodeInfo{ - DefaultNodeID: p2p.ID(s.vm.ctx.NodeID.String()), - Network: fmt.Sprintf("%d", s.vm.ctx.NetworkID), - } - reply.SyncInfo = ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: time.Unix(0, latestBlockTimeNano), - EarliestBlockHash: earliestBlockHash, - EarliestAppHash: earliestAppHash, - EarliestBlockHeight: earliestBlockHeight, - EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - } - return nil -} - -// ToDo: no peers, because it's vm -func (s *LocalService) NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error { - return nil -} - -// ToDo: we doesn't have consensusState -func (s *LocalService) DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error { - return nil -} - -// ToDo: we doesn't have consensusState -func (s *LocalService) ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error { - return nil -} - -func (s *LocalService) ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error { - reply.BlockHeight = s.vm.blockStore.Height() - reply.ConsensusParams = *s.vm.genesis.ConsensusParams - return nil -} - -func (s *LocalService) Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error { - *reply = ctypes.ResultHealth{} - return nil -} - -func (s *LocalService) UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error { - limit := validatePerPage(args.Limit) - txs := s.vm.mempool.ReapMaxTxs(limit) - reply.Count = len(txs) - reply.Total = s.vm.mempool.Size() - reply.Txs = txs - return nil -} - -func (s *LocalService) NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error { - reply.Count = s.vm.mempool.Size() - reply.Total = s.vm.mempool.Size() - reply.TotalBytes = s.vm.mempool.TxsBytes() - return nil -} - -func (s *LocalService) CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error { - res, err := s.vm.proxyApp.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: args.Tx}) - if err != nil { - return err - } - reply.ResponseCheckTx = *res - return nil -} diff --git a/vm_/service_utils.go b/vm_/service_utils.go deleted file mode 100644 index 502c9e681..000000000 --- a/vm_/service_utils.go +++ /dev/null @@ -1,131 +0,0 @@ -package vm - -import ( - "fmt" - - tmmath "github.com/consideritdone/landslidecore/libs/math" - "github.com/consideritdone/landslidecore/rpc/client" - coretypes "github.com/consideritdone/landslidecore/rpc/core/types" - "github.com/consideritdone/landslidecore/store" -) - -var ( - // see README - defaultPerPage = 30 - maxPerPage = 100 -) - -// bsHeight can be either latest committed or uncommitted (+1) height. -func getHeight(bs *store.BlockStore, heightPtr *int64) (int64, error) { - bsHeight := bs.Height() - bsBase := bs.Base() - if heightPtr != nil { - height := *heightPtr - if height <= 0 { - return 0, fmt.Errorf("height must be greater than 0, but got %d", height) - } - if height > bsHeight { - return 0, fmt.Errorf("height %d must be less than or equal to the current blockchain height %d", height, bsHeight) - } - if height < bsBase { - return 0, fmt.Errorf("height %d is not available, lowest height is %d", height, bsBase) - } - return height, nil - } - return bsHeight, nil -} - -func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { - if perPage < 1 { - panic(fmt.Sprintf("zero or negative perPage: %d", perPage)) - } - - if pagePtr == nil { // no page parameter - return 1, nil - } - - pages := ((totalCount - 1) / perPage) + 1 - if pages == 0 { - pages = 1 // one page (even if it's empty) - } - page := *pagePtr - if page <= 0 || page > pages { - return 1, fmt.Errorf("page should be within [1, %d] range, given %d", pages, page) - } - - return page, nil -} - -func validatePerPage(perPagePtr *int) int { - if perPagePtr == nil { // no per_page parameter - return defaultPerPage - } - - perPage := *perPagePtr - if perPage < 1 { - return defaultPerPage - } else if perPage > maxPerPage { - return maxPerPage - } - return perPage -} - -func validateSkipCount(page, perPage int) int { - skipCount := (page - 1) * perPage - if skipCount < 0 { - return 0 - } - return skipCount -} - -// filterMinMax returns error if either min or max are negative or min > max -// if 0, use blockstore base for min, latest block height for max -// enforce limit. -func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { - // filter negatives - if min < 0 || max < 0 { - return min, max, fmt.Errorf("heights must be non-negative") - } - - // adjust for default values - if min == 0 { - min = 1 - } - if max == 0 { - max = height - } - - // limit max to the height - max = tmmath.MinInt64(height, max) - - // limit min to the base - min = tmmath.MaxInt64(base, min) - - // limit min to within `limit` of max - // so the total number of blocks returned will be `limit` - min = tmmath.MaxInt64(min, max-limit+1) - - if min > max { - return min, max, fmt.Errorf("min height %d can't be greater than max height %d", min, max) - } - return min, max, nil -} - -func WaitForHeight(c Service, h int64, waiter client.Waiter) error { - if waiter == nil { - waiter = client.DefaultWaitStrategy - } - delta := int64(1) - for delta > 0 { - r := new(coretypes.ResultStatus) - if err := c.Status(nil, nil, r); err != nil { - return err - } - delta = h - r.SyncInfo.LatestBlockHeight - // wait for the time, or abort early - if err := waiter(delta); err != nil { - return err - } - } - return nil -} diff --git a/vm_/vm.go b/vm_/vm.go deleted file mode 100644 index 20077f996..000000000 --- a/vm_/vm.go +++ /dev/null @@ -1,700 +0,0 @@ -package vm - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "net/http" - "time" - - "github.com/ava-labs/avalanchego/api/metrics" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/gorilla/rpc/v2" - "github.com/prometheus/client_golang/prometheus" - dbm "github.com/tendermint/tm-db" - - abciTypes "github.com/consideritdone/landslidecore/abci/types" - "github.com/consideritdone/landslidecore/config" - cs "github.com/consideritdone/landslidecore/consensus" - tmjson "github.com/consideritdone/landslidecore/libs/json" - "github.com/consideritdone/landslidecore/libs/log" - mempl "github.com/consideritdone/landslidecore/mempool" - "github.com/consideritdone/landslidecore/node" - tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" - "github.com/consideritdone/landslidecore/proxy" - rpccore "github.com/consideritdone/landslidecore/rpc/core" - rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server" - sm "github.com/consideritdone/landslidecore/state" - "github.com/consideritdone/landslidecore/state/indexer" - blockidxkv "github.com/consideritdone/landslidecore/state/indexer/block/kv" - "github.com/consideritdone/landslidecore/state/txindex" - txidxkv "github.com/consideritdone/landslidecore/state/txindex/kv" - "github.com/consideritdone/landslidecore/store" - "github.com/consideritdone/landslidecore/types" -) - -var ( - _ block.ChainVM = &VM{} - - Version = &version.Semantic{ - Major: 0, - Minor: 1, - Patch: 1, - } -) - -const ( - Name = "landslide" - - decidedCacheSize = 100 - missingCacheSize = 50 - unverifiedCacheSize = 50 - - // genesisChunkSize is the maximum size, in bytes, of each - // chunk in the genesis structure for the chunked API - genesisChunkSize = 16 * 1024 * 1024 // 16 -) - -var ( - chainStateMetricsPrefix = "chain_state" - - lastAcceptedKey = []byte("last_accepted_key") - blockStoreDBPrefix = []byte("blockstore") - stateDBPrefix = []byte("state") - txIndexerDBPrefix = []byte("tx_index") - blockIndexerDBPrefix = []byte("block_events") - - proposerAddress = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} -) - -var ( - errInvalidBlock = errors.New("invalid block") - errNoPendingTxs = errors.New("there is no txs to include to block") -) - -type VM struct { - ctx *snow.Context - dbManager manager.Manager - - toEngine chan<- common.Message - - // *chain.State helps to implement the VM interface by wrapping blocks - // with an efficient caching layer. - *chain.State - - tmLogger log.Logger - - blockStoreDB dbm.DB - blockStore *store.BlockStore - - stateDB dbm.DB - stateStore sm.Store - tmState *sm.State - - mempool mempl.Mempool - - // Tendermint Application - app abciTypes.Application - - // Tendermint proxy app - proxyApp proxy.AppConns - - // EventBus is a common bus for all events going through the system. - eventBus *types.EventBus - - // [acceptedBlockDB] is the database to store the last accepted - // block. - acceptedBlockDB database.Database - - genesis *types.GenesisDoc - // cache of chunked genesis data. - genChunks []string - - // Metrics - multiGatherer metrics.MultiGatherer - - txIndexer txindex.TxIndexer - txIndexerDB dbm.DB - blockIndexer indexer.BlockIndexer - blockIndexerDB dbm.DB - indexerService *txindex.IndexerService - - clock mockable.Clock - - appCreator func(ids.ID) (abciTypes.Application, error) -} - -func NewVM(app abciTypes.Application) *VM { - return &VM{app: app, appCreator: nil} -} - -func NewVMWithAppCreator(creator func(chainID ids.ID) (abciTypes.Application, error)) *VM { - return &VM{app: nil, appCreator: creator} -} - -func (vm *VM) Initialize( - _ context.Context, - chainCtx *snow.Context, - dbManager manager.Manager, - genesisBytes []byte, - upgradeBytes []byte, - configBytes []byte, - toEngine chan<- common.Message, - fxs []*common.Fx, - appSender common.AppSender, -) error { - if vm.appCreator != nil { - app, err := vm.appCreator(chainCtx.ChainID) - if err != nil { - return err - } - vm.app = app - } - - vm.ctx = chainCtx - vm.tmLogger = log.NewTMLogger(vm.ctx.Log) - vm.dbManager = dbManager - - vm.toEngine = toEngine - - baseDB := dbManager.Current().Database - - vm.blockStoreDB = Database{prefixdb.NewNested(blockStoreDBPrefix, baseDB)} - vm.blockStore = store.NewBlockStore(vm.blockStoreDB) - - vm.stateDB = Database{prefixdb.NewNested(stateDBPrefix, baseDB)} - vm.stateStore = sm.NewStore(vm.stateDB) - - if err := vm.initGenesis(genesisBytes); err != nil { - return err - } - - if err := vm.initGenesisChunks(); err != nil { - return err - } - - state, err := vm.stateStore.LoadFromDBOrGenesisDoc(vm.genesis) - if err != nil { - return fmt.Errorf("failed to load tmState from genesis: %w ", err) - } - vm.tmState = &state - - // genesis only - if vm.tmState.LastBlockHeight == 0 { - // TODO use decoded/encoded genesis bytes - block, partSet := vm.tmState.MakeBlock(1, []types.Tx{genesisBytes}, nil, nil, nil) - vm.tmLogger.Info("init block", "b", block, "part set", partSet) - } - - //vm.genesisHash = vm.ethConfig.Genesis.ToBlock(nil).Hash() // must create genesis hash before [vm.readLastAccepted] - - // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := node.CreateAndStartProxyAppConns(proxy.NewLocalClientCreator(vm.app), vm.tmLogger) - if err != nil { - return fmt.Errorf("failed to create and start proxy app: %w ", err) - } - vm.proxyApp = proxyApp - - // Create EventBus - eventBus, err := node.CreateAndStartEventBus(vm.tmLogger) - if err != nil { - return fmt.Errorf("failed to create and start event bus: %w ", err) - } - vm.eventBus = eventBus - - vm.txIndexerDB = Database{prefixdb.NewNested(txIndexerDBPrefix, baseDB)} - vm.txIndexer = txidxkv.NewTxIndex(vm.txIndexerDB) - vm.blockIndexerDB = Database{prefixdb.NewNested(blockIndexerDBPrefix, baseDB)} - vm.blockIndexer = blockidxkv.New(vm.blockIndexerDB) - vm.indexerService = txindex.NewIndexerService(vm.txIndexer, vm.blockIndexer, eventBus) - vm.indexerService.SetLogger(vm.tmLogger.With("module", "txindex")) - - if err := vm.indexerService.Start(); err != nil { - return err - } - - if err := vm.doHandshake(vm.genesis, vm.tmLogger.With("module", "consensus")); err != nil { - return err - } - - state, err = vm.stateStore.Load() - if err != nil { - return fmt.Errorf("failed to load tmState: %w ", err) - } - vm.tmState = &state - - genesisBlock, err := vm.buildGenesisBlock(genesisBytes) - if err != nil { - return fmt.Errorf("failed to build genesis block: %w ", err) - } - - vm.mempool = vm.createMempool() - - if err := vm.initializeMetrics(); err != nil { - return err - } - - if err := vm.initChainState(genesisBlock); err != nil { - return err - } - - return nil -} - -// builds genesis block if required -func (vm *VM) buildGenesisBlock(genesisData []byte) (*types.Block, error) { - if vm.tmState.LastBlockHeight != 0 { - return nil, nil - } - txs := types.Txs{types.Tx(genesisData)} - if len(txs) == 0 { - return nil, errNoPendingTxs - } - height := vm.tmState.LastBlockHeight + 1 - - commit := makeCommitMock(height, time.Now()) - genesisBlock, _ := vm.tmState.MakeBlock(height, txs, commit, nil, proposerAddress) - return genesisBlock, nil -} - -// Initializes Genesis if required -func (vm *VM) initGenesis(genesisData []byte) error { - // load genesis from database - genesis, err := node.LoadGenesisDoc(vm.stateDB) - // genesis not found in database - if err != nil { - if err == node.ErrNoGenesisDoc { - // get it from json - genesis, err = types.GenesisDocFromJSON(genesisData) - if err != nil { - return fmt.Errorf("failed to decode genesis bytes: %w ", err) - } - // save to database - err = node.SaveGenesisDoc(vm.stateDB, genesis) - if err != nil { - return fmt.Errorf("failed to save genesis data: %w ", err) - } - } else { - return err - } - } - - vm.genesis = genesis - return nil -} - -// InitGenesisChunks configures the environment -// and should be called on service startup. -func (vm *VM) initGenesisChunks() error { - if vm.genesis == nil { - return fmt.Errorf("empty genesis") - } - - data, err := tmjson.Marshal(vm.genesis) - if err != nil { - return err - } - - for i := 0; i < len(data); i += genesisChunkSize { - end := i + genesisChunkSize - - if end > len(data) { - end = len(data) - } - - vm.genChunks = append(vm.genChunks, base64.StdEncoding.EncodeToString(data[i:end])) - } - - return nil -} - -func (vm *VM) createMempool() *mempl.CListMempool { - cfg := config.DefaultMempoolConfig() - mempool := mempl.NewCListMempool( - cfg, - vm.proxyApp.Mempool(), - vm.tmState.LastBlockHeight, - vm, - mempl.WithMetrics(mempl.NopMetrics()), // TODO: use prometheus metrics based on config - mempl.WithPreCheck(sm.TxPreCheck(*vm.tmState)), - mempl.WithPostCheck(sm.TxPostCheck(*vm.tmState)), - ) - mempoolLogger := vm.tmLogger.With("module", "mempool") - mempool.SetLogger(mempoolLogger) - - return mempool -} - -// NotifyBlockReady tells the consensus engine that a new block -// is ready to be created -func (vm *VM) NotifyBlockReady() { - select { - case vm.toEngine <- common.PendingTxs: - vm.tmLogger.Debug("Notify consensys engine") - default: - vm.tmLogger.Error("Failed to push PendingTxs notification to the consensus engine.") - } -} - -func (vm *VM) doHandshake(genesis *types.GenesisDoc, consensusLogger log.Logger) error { - handshaker := cs.NewHandshaker(vm.stateStore, *vm.tmState, vm.blockStore, genesis) - handshaker.SetLogger(consensusLogger) - handshaker.SetEventBus(vm.eventBus) - if err := handshaker.Handshake(vm.proxyApp); err != nil { - return fmt.Errorf("error during handshake: %v", err) - } - return nil -} - -// readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the -// last accepted block hash and height by reading directly from [vm.chaindb] instead of relying -// on [chain]. -// Note: assumes chaindb, ethConfig, and genesisHash have been initialized. -//func (vm *VM) readLastAccepted() (tmbytes.HexBytes, uint64, error) { -// // Attempt to load last accepted block to determine if it is necessary to -// // initialize state with the genesis block. -// lastAcceptedBytes, lastAcceptedErr := vm.acceptedBlockDB.Get(lastAcceptedKey) -// switch { -// case lastAcceptedErr == database.ErrNotFound: -// // If there is nothing in the database, return the genesis block hash and height -// return vm.genesisHash, 0, nil -// case lastAcceptedErr != nil: -// return common.Hash{}, 0, fmt.Errorf("failed to get last accepted block ID due to: %w", lastAcceptedErr) -// case len(lastAcceptedBytes) != common.HashLength: -// return common.Hash{}, 0, fmt.Errorf("last accepted bytes should have been length %d, but found %d", common.HashLength, len(lastAcceptedBytes)) -// default: -// lastAcceptedHash := common.BytesToHash(lastAcceptedBytes) -// height := rawdb.ReadHeaderNumber(vm.chaindb, lastAcceptedHash) -// if height == nil { -// return common.Hash{}, 0, fmt.Errorf("failed to retrieve header number of last accepted block: %s", lastAcceptedHash) -// } -// return lastAcceptedHash, *height, nil -// } -//} - -func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { - block, err := vm.newBlock(lastAcceptedBlock) - if err != nil { - return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) - } - block.status = choices.Accepted - - config := &chain.Config{ - DecidedCacheSize: decidedCacheSize, - MissingCacheSize: missingCacheSize, - UnverifiedCacheSize: unverifiedCacheSize, - //GetBlockIDAtHeight: vm.GetBlockIDAtHeight, - GetBlock: vm.getBlock, - UnmarshalBlock: vm.parseBlock, - BuildBlock: vm.buildBlock, - LastAcceptedBlock: block, - } - - // Register chain state metrics - chainStateRegisterer := prometheus.NewRegistry() - state, err := chain.NewMeteredState(chainStateRegisterer, config) - if err != nil { - return fmt.Errorf("could not create metered state: %w", err) - } - vm.State = state - - return vm.multiGatherer.Register(chainStateMetricsPrefix, chainStateRegisterer) -} - -func (vm *VM) initializeMetrics() error { - vm.multiGatherer = metrics.NewMultiGatherer() - - if err := vm.ctx.Metrics.Register(vm.multiGatherer); err != nil { - return err - } - - return nil -} - -// parseBlock parses [b] into a block to be wrapped by ChainState. -func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { - protoBlock := new(tmproto.Block) - err := protoBlock.Unmarshal(b) - if err != nil { - return nil, err - } - - tmBlock, err := types.BlockFromProto(protoBlock) - if err != nil { - return nil, err - } - - // Note: the status of block is set by ChainState - block, err := vm.newBlock(tmBlock) - if err != nil { - return nil, err - } - - return block, nil -} - -// getBlock attempts to retrieve block [id] from the VM to be wrapped -// by ChainState. -func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { - var hash []byte - copy(hash, id[:]) - tmBlock := vm.blockStore.LoadBlockByHash(hash) - // If [tmBlock] is nil, return [database.ErrNotFound] here - // so that the miss is considered cacheable. - if tmBlock == nil { - return nil, database.ErrNotFound - } - // Note: the status of block is set by ChainState - return vm.newBlock(tmBlock) -} - -func (vm *VM) applyBlock(block *Block) error { - vm.mempool.Lock() - defer vm.mempool.Unlock() - - state, err := vm.stateStore.Load() - if err != nil { - return err - } - - if err := validateBlock(state, block.tmBlock); err != nil { - return err - } - - abciResponses, err := execBlockOnProxyApp( - vm.tmLogger, - vm.proxyApp.Consensus(), - block.tmBlock, vm.stateStore, - state.InitialHeight, - ) - if err != nil { - return err - } - - // Save the results before we commit. - if err := vm.stateStore.SaveABCIResponses(block.tmBlock.Height, abciResponses); err != nil { - return err - } - - blockID := types.BlockID{ - Hash: block.tmBlock.Hash(), - PartSetHeader: block.tmBlock.MakePartSet(types.BlockPartSizeBytes).Header(), - } - - // Update the state with the block and responses. - state, err = updateState(state, blockID, &block.tmBlock.Header, abciResponses) - if err != nil { - return err - } - - // while mempool is Locked, flush to ensure all async requests have completed - // in the ABCI app before Commit. - if err := vm.mempool.FlushAppConn(); err != nil { - vm.tmLogger.Error("client error during mempool.FlushAppConn", "err", err) - return err - } - - // Commit block, get hash back - res, err := vm.proxyApp.Consensus().CommitSync() - if err != nil { - vm.tmLogger.Error("client error during proxyAppConn.CommitSync", "err", err) - return err - } - - // ResponseCommit has no error code - just data - vm.tmLogger.Info( - "committed state", - "height", block.Height, - "num_txs", len(block.tmBlock.Txs), - "app_hash", fmt.Sprintf("%X", res.Data), - ) - - deliverTxResponses := make([]*abciTypes.ResponseDeliverTx, len(block.tmBlock.Txs)) - for i := range block.tmBlock.Txs { - deliverTxResponses[i] = &abciTypes.ResponseDeliverTx{Code: abciTypes.CodeTypeOK} - } - - // Update mempool. - if err := vm.mempool.Update( - block.tmBlock.Height, - block.tmBlock.Txs, - deliverTxResponses, - TxPreCheck(state), - TxPostCheck(state), - ); err != nil { - return err - } - - vm.tmState.LastBlockHeight = block.tmBlock.Height - if err := vm.stateStore.Save(state); err != nil { - return err - } - vm.blockStore.SaveBlock(block.tmBlock, block.tmBlock.MakePartSet(types.BlockPartSizeBytes), block.tmBlock.LastCommit) - - fireEvents(vm.tmLogger, vm.eventBus, block.tmBlock, abciResponses) - return nil -} - -// buildBlock builds a block to be wrapped by ChainState -func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { - txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) - if len(txs) == 0 { - return nil, errNoPendingTxs - } - height := vm.tmState.LastBlockHeight + 1 - - commit := makeCommitMock(height, time.Now()) - block, _ := vm.tmState.MakeBlock(height, txs, commit, nil, proposerAddress) - - // Note: the status of block is set by ChainState - blk, err := vm.newBlock(block) - blk.SetStatus(choices.Processing) - if err != nil { - return nil, err - } - vm.tmLogger.Debug(fmt.Sprintf("Built block %s", blk.ID())) - - return blk, nil -} - -func (vm *VM) AppGossip(_ context.Context, nodeID ids.NodeID, msg []byte) error { - return nil -} - -func (vm *VM) SetState(ctx context.Context, state snow.State) error { - return nil -} - -func (vm *VM) Shutdown(ctx context.Context) error { - // first stop the non-reactor services - if err := vm.eventBus.Stop(); err != nil { - return fmt.Errorf("Error closing eventBus: %w ", err) - } - if err := vm.indexerService.Stop(); err != nil { - return fmt.Errorf("Error closing indexerService: %w ", err) - } - //TODO: investigate wal configuration - // stop mempool WAL - //if vm.config.Mempool.WalEnabled() { - // n.mempool.CloseWAL() - //} - //if n.prometheusSrv != nil { - // if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { - // // Error from closing listeners, or context timeout: - // n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) - // } - //} - if err := vm.blockStore.Close(); err != nil { - return fmt.Errorf("Error closing blockStore: %w ", err) - } - if err := vm.stateStore.Close(); err != nil { - return fmt.Errorf("Error closing stateStore: %w ", err) - } - return nil - //timestampVM and deprecated landslide - //if vm.state == nil { - // return nil - //} - // - //return vm.state.Close() // close versionDB - - //coreth - //if vm.ctx == nil { - // return nil - //} - //vm.Network.Shutdown() - //if err := vm.StateSyncClient.Shutdown(); err != nil { - // log.Error("error stopping state syncer", "err", err) - //} - //close(vm.shutdownChan) - //vm.eth.Stop() - //vm.shutdownWg.Wait() - //return nil -} - -func (vm *VM) Version(ctx context.Context) (string, error) { - return Version.String(), nil -} - -func (vm *VM) CreateStaticHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { - //TODO implement me - return nil, nil -} - -func (vm *VM) CreateHandlers(_ context.Context) (map[string]*common.HTTPHandler, error) { - mux := http.NewServeMux() - rpcLogger := vm.tmLogger.With("module", "rpc-server") - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) - - server := rpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(NewService(vm), Name); err != nil { - return nil, err - } - - return map[string]*common.HTTPHandler{ - "/rpc": { - LockOptions: common.WriteLock, - Handler: server, - }, - }, nil -} - -func (vm *VM) ProxyApp() proxy.AppConns { - return vm.proxyApp -} - -func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { - //TODO implement me - return nil -} - -func (vm *VM) AppRequest(_ context.Context, nodeID ids.NodeID, requestID uint32, time time.Time, request []byte) error { - return nil -} - -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { - return nil -} - -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { - return nil -} - -func (vm *VM) CrossChainAppRequest(_ context.Context, _ ids.ID, _ uint32, deadline time.Time, request []byte) error { - return nil -} - -func (vm *VM) CrossChainAppRequestFailed(_ context.Context, _ ids.ID, _ uint32) error { - return nil -} - -func (vm *VM) CrossChainAppResponse(_ context.Context, _ ids.ID, _ uint32, response []byte) error { - return nil -} - -func (vm *VM) Connected(_ context.Context, id ids.NodeID, nodeVersion *version.Application) error { - return nil // noop -} - -func (vm *VM) Disconnected(_ context.Context, id ids.NodeID) error { - return nil // noop -} - -func (vm *VM) HealthCheck(ctx context.Context) (interface{}, error) { return nil, nil } From 9391978386cc398599c989348c493e84d8521de1 Mon Sep 17 00:00:00 2001 From: n0cte Date: Tue, 11 Jul 2023 15:41:16 +0400 Subject: [PATCH 04/14] correctly update state after block apply --- vm/funcs.go | 18 ----------- vm/service_test.go | 2 +- vm/vm.go | 78 +++++++++++++++++++++++++++------------------- 3 files changed, 47 insertions(+), 51 deletions(-) diff --git a/vm/funcs.go b/vm/funcs.go index c43c1d736..72f04a20e 100644 --- a/vm/funcs.go +++ b/vm/funcs.go @@ -206,24 +206,6 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { return types.NewResults(ar.DeliverTxs).Hash() } -func updateState( - st state.State, - blockID types.BlockID, - header *types.Header, - abciResponses *tmstate.ABCIResponses, -) (state.State, error) { - return state.State{ - Version: st.Version, - ChainID: st.ChainID, - InitialHeight: st.InitialHeight, - LastBlockHeight: header.Height, - LastBlockID: blockID, - LastBlockTime: header.Time, - LastResultsHash: ABCIResponsesResultsHash(abciResponses), - AppHash: nil, - }, nil -} - // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. func TxPreCheck(state state.State) mempl.PreCheckFunc { diff --git a/vm/service_test.go b/vm/service_test.go index 639c0db06..6b1651c2d 100644 --- a/vm/service_test.go +++ b/vm/service_test.go @@ -21,7 +21,7 @@ func TestABCIService(t *testing.T) { assert.NoError(t, service.ABCIInfo(nil, nil, reply)) assert.Equal(t, uint64(1), reply.Response.AppVersion) assert.Equal(t, int64(1), reply.Response.LastBlockHeight) - assert.Equal(t, []uint8([]byte{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), reply.Response.LastBlockAppHash) + assert.Equal(t, []uint8([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), reply.Response.LastBlockAppHash) t.Logf("%+v", reply) }) diff --git a/vm/vm.go b/vm/vm.go index 8269d7043..08793f0bb 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -27,10 +27,11 @@ import ( abciTypes "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/consensus" + "github.com/consideritdone/landslidecore/crypto/tmhash" "github.com/consideritdone/landslidecore/libs/log" - "github.com/consideritdone/landslidecore/mempool" mempl "github.com/consideritdone/landslidecore/mempool" "github.com/consideritdone/landslidecore/node" + tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/proxy" rpccore "github.com/consideritdone/landslidecore/rpc/core" @@ -97,7 +98,7 @@ type ( state state.State genesis *types.GenesisDoc - mempool *mempool.CListMempool + mempool *mempl.CListMempool eventBus *types.EventBus txIndexer txindex.TxIndexer @@ -374,9 +375,9 @@ func (vm *VM) Initialize( vm.app.Mempool(), vm.state.LastBlockHeight, vm, - mempool.WithMetrics(mempool.NopMetrics()), - mempool.WithPreCheck(state.TxPreCheck(vm.state)), - mempool.WithPostCheck(state.TxPostCheck(vm.state)), + mempl.WithMetrics(mempl.NopMetrics()), + mempl.WithPreCheck(state.TxPreCheck(vm.state)), + mempl.WithPostCheck(state.TxPostCheck(vm.state)), ) vm.mempool.SetLogger(vm.log.With("module", "mempool")) vm.mempool.EnableTxsAvailable() @@ -388,6 +389,13 @@ func (vm *VM) Initialize( if vm.state.LastBlockHeight == 0 { block, _ := vm.state.MakeBlock(1, types.Txs{types.Tx(genesisBytes)}, makeCommitMock(1, time.Now()), nil, proposerAddress) + block.LastBlockID = types.BlockID{ + Hash: tmhash.Sum([]byte{}), + PartSetHeader: types.PartSetHeader{ + Total: 0, + Hash: tmhash.Sum([]byte{}), + }, + } if err := NewBlock(vm, block, choices.Processing).Accept(ctx); err != nil { return err } @@ -587,25 +595,22 @@ func (vm *VM) ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, // returned. func (vm *VM) BuildBlock(context.Context) (snowman.Block, error) { vm.log.Debug("build block") + txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) if len(txs) == 0 { return nil, errNoPendingTxs } - height := vm.state.LastBlockHeight + 1 - commit := makeCommitMock(height, time.Now()) - block, _ := vm.state.MakeBlock(height, txs, commit, nil, proposerAddress) + state := vm.state.Copy() - prev := vm.blockStore.LoadBlock(height - 1) - block.LastBlockID = types.BlockID{ - Hash: prev.Hash(), - PartSetHeader: prev.LastBlockID.PartSetHeader, - } + commit := makeCommitMock(state.LastBlockHeight+1, time.Now()) + block, _ := vm.state.MakeBlock(state.LastBlockHeight+1, txs, commit, nil, proposerAddress) + block.LastBlockID = state.LastBlockID blk := NewBlock(vm, block, choices.Processing) vm.verifiedBlocks[blk.ID()] = blk - vm.log.Debug("build block", "height", blk.Height(), "id", blk.ID()) + vm.log.Debug("build block", "id", blk.ID(), "height", blk.Height(), "txs", len(block.Txs)) return blk, nil } @@ -631,18 +636,28 @@ func (vm *VM) applyBlock(block *Block) error { vm.mempool.Lock() defer vm.mempool.Unlock() - state, err := vm.stateStore.Load() - if err != nil { - return err - } + state := vm.state.Copy() - if err := validateBlock(state, block.Block); err != nil { + err := validateBlock(state, block.Block) + if err != nil { return err } - abciResponses, err := execBlockOnProxyApp(vm.log, vm.app.Consensus(), block.Block, vm.stateStore, state.InitialHeight) - if err != nil { - return err + abciResponses := new(tmstate.ABCIResponses) + if state.LastBlockHeight > 0 { + abciResponses, err = execBlockOnProxyApp(vm.log, vm.app.Consensus(), block.Block, vm.stateStore, state.InitialHeight) + if err != nil { + return err + } + } else { + abciResponses.DeliverTxs = []*abciTypes.ResponseDeliverTx{ + &abciTypes.ResponseDeliverTx{ + Code: abciTypes.CodeTypeOK, + Data: block.Txs[0], + }, + } + abciResponses.BeginBlock = new(abciTypes.ResponseBeginBlock) + abciResponses.EndBlock = new(abciTypes.ResponseEndBlock) } // Save the results before we commit. @@ -655,12 +670,6 @@ func (vm *VM) applyBlock(block *Block) error { PartSetHeader: block.Block.MakePartSet(types.BlockPartSizeBytes).Header(), } - // Update the state with the block and responses. - state, err = updateState(state, blockID, &block.Block.Header, abciResponses) - if err != nil { - return err - } - // while mempool is Locked, flush to ensure all async requests have completed // in the ABCI app before Commit. if err := vm.mempool.FlushAppConn(); err != nil { @@ -675,6 +684,13 @@ func (vm *VM) applyBlock(block *Block) error { return err } + // Update the state with the block and responses. + state.LastBlockHeight = block.Block.Height + state.LastBlockID = blockID + state.LastBlockTime = block.Time + state.LastResultsHash = types.NewResults(abciResponses.DeliverTxs).Hash() + state.AppHash = res.Data + // ResponseCommit has no error code - just data vm.log.Info( "committed state", @@ -699,14 +715,12 @@ func (vm *VM) applyBlock(block *Block) error { return err } - vm.state.LastBlockHeight = block.Block.Height - vm.state.LastBlockID = blockID - vm.state.LastBlockTime = block.Time if err := vm.stateStore.Save(state); err != nil { return err } - vm.blockStore.SaveBlock(block.Block, block.Block.MakePartSet(types.BlockPartSizeBytes), block.Block.LastCommit) + vm.state = state + vm.blockStore.SaveBlock(block.Block, block.Block.MakePartSet(types.BlockPartSizeBytes), block.Block.LastCommit) fireEvents(vm.log, vm.eventBus, block.Block, abciResponses) return nil } From 7f8c3e9a788d1a7232913dc3b9567e9653c164d5 Mon Sep 17 00:00:00 2001 From: n0cte Date: Wed, 12 Jul 2023 13:47:47 +0400 Subject: [PATCH 05/14] check block existance --- vm/vm.go | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/vm/vm.go b/vm/vm.go index 08793f0bb..68e8c1457 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -560,6 +560,9 @@ func (vm *VM) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) return b, nil } b := vm.blockStore.LoadBlockByHash(blkID[:]) + if b == nil { + return nil, errInvalidBlock + } return NewBlock(vm, b, choices.Accepted), nil } @@ -593,7 +596,7 @@ func (vm *VM) ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, // // If the VM doesn't want to issue a new block, an error should be // returned. -func (vm *VM) BuildBlock(context.Context) (snowman.Block, error) { +func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { vm.log.Debug("build block") txs := vm.mempool.ReapMaxBytesMaxGas(-1, -1) @@ -601,11 +604,23 @@ func (vm *VM) BuildBlock(context.Context) (snowman.Block, error) { return nil, errNoPendingTxs } - state := vm.state.Copy() + var preferredBlock *types.Block + if b, ok := vm.verifiedBlocks[vm.preferred]; ok { + preferredBlock = b.Block + } else { + preferredBlock := vm.blockStore.LoadBlockByHash(vm.preferred[:]) + if preferredBlock == nil { + return nil, errInvalidBlock + } + } + preferredHeight := preferredBlock.Height - commit := makeCommitMock(state.LastBlockHeight+1, time.Now()) - block, _ := vm.state.MakeBlock(state.LastBlockHeight+1, txs, commit, nil, proposerAddress) - block.LastBlockID = state.LastBlockID + commit := makeCommitMock(preferredHeight+1, time.Now()) + block, _ := vm.state.MakeBlock(preferredHeight+1, txs, commit, nil, proposerAddress) + block.LastBlockID = types.BlockID{ + Hash: preferredBlock.Hash(), + PartSetHeader: preferredBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + } blk := NewBlock(vm, block, choices.Processing) vm.verifiedBlocks[blk.ID()] = blk From 72765c21a87e552977b2fd549b328d5bcda76648 Mon Sep 17 00:00:00 2001 From: n0cte Date: Wed, 12 Jul 2023 17:34:55 +0400 Subject: [PATCH 06/14] add status serialization into block bytes --- vm/block.go | 11 ++++------ vm/vm.go | 58 +++++++++++++++++++---------------------------------- 2 files changed, 25 insertions(+), 44 deletions(-) diff --git a/vm/block.go b/vm/block.go index 407d86a97..500d20cc3 100644 --- a/vm/block.go +++ b/vm/block.go @@ -32,9 +32,7 @@ func NewBlock(vm *VM, block *types.Block, st choices.Status) *Block { // binary representation of this element. An element should return the same // IDs upon repeated calls. func (block *Block) ID() ids.ID { - var id ids.ID - copy(id[:], block.Hash()) - return id + return ids.ID(block.Hash()) } // Accept this element. @@ -43,6 +41,7 @@ func (block *Block) ID() ids.ID { func (block *Block) Accept(context.Context) error { block.vm.log.Debug("try to accept block", "block", block.ID()) block.st = choices.Accepted + delete(block.vm.verifiedBlocks, block.ID()) return block.vm.applyBlock(block) } @@ -70,9 +69,7 @@ func (block *Block) Status() choices.Status { // Parent returns the ID of this block's parent. func (block *Block) Parent() ids.ID { - var id ids.ID - copy(id[:], block.LastBlockID.Hash.Bytes()) - return id + return ids.ID(block.LastBlockID.Hash) } // Verify that the state transition this block would make if accepted is @@ -100,7 +97,7 @@ func (block *Block) Bytes() []byte { if err != nil { panic(fmt.Sprintf("can't serialize block: %s", err)) } - return data + return append([]byte{uint8(block.st)}, data...) } // Height returns the height of this block in the chain. diff --git a/vm/vm.go b/vm/vm.go index 68e8c1457..ff7f9f064 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -402,38 +402,9 @@ func (vm *VM) Initialize( } vm.log.Info("vm initialization completed") - return nil + return vm.SetPreference(ctx, ids.ID(vm.state.LastBlockID.Hash)) } -// func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { -// block, err := vm.newBlock(lastAcceptedBlock) -// if err != nil { -// return fmt.Errorf("failed to create block wrapper for the last accepted block: %w", err) -// } -// block.status = choices.Accepted -// -// config := &chain.Config{ -// DecidedCacheSize: decidedCacheSize, -// MissingCacheSize: missingCacheSize, -// UnverifiedCacheSize: unverifiedCacheSize, -// //GetBlockIDAtHeight: vm.GetBlockIDAtHeight, -// GetBlock: vm.getBlock, -// UnmarshalBlock: vm.parseBlock, -// BuildBlock: vm.buildBlock, -// LastAcceptedBlock: block, -// } -// -// // Register chain state metrics -// chainStateRegisterer := prometheus.NewRegistry() -// state, err := chain.NewMeteredState(chainStateRegisterer, config) -// if err != nil { -// return fmt.Errorf("could not create metered state: %w", err) -// } -// vm.State = state -// -// return vm.multiGatherer.Register(chainStateMetricsPrefix, chainStateRegisterer) -// } - func (vm *VM) NotifyBlockReady() { select { case vm.toEngine <- common.PendingTxs: @@ -557,12 +528,14 @@ func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, e func (vm *VM) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { vm.log.Debug("get block", "blkID", blkID.String()) if b, ok := vm.verifiedBlocks[blkID]; ok { + vm.log.Debug("get block", "status", b.Status()) return b, nil } b := vm.blockStore.LoadBlockByHash(blkID[:]) if b == nil { return nil, errInvalidBlock } + vm.log.Debug("get block", "status", choices.Accepted) return NewBlock(vm, b, choices.Accepted), nil } @@ -576,7 +549,7 @@ func (vm *VM) ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, vm.log.Debug("parse block") protoBlock := new(tmproto.Block) - if err := protoBlock.Unmarshal(blockBytes); err != nil { + if err := protoBlock.Unmarshal(blockBytes[1:]); err != nil { vm.log.Error("can't parse block", "err", err) return nil, err } @@ -587,9 +560,13 @@ func (vm *VM) ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, return nil, err } - vm.log.Debug("parsed block", "id", ids.ID(block.Hash())) + blk := NewBlock(vm, block, choices.Status(uint32(blockBytes[0]))) + vm.log.Debug("parsed block", "id", blk.ID(), "status", blk.Status().String()) + if _, ok := vm.verifiedBlocks[blk.ID()]; !ok { + vm.verifiedBlocks[blk.ID()] = blk + } - return NewBlock(vm, block, choices.Processing), nil + return blk, nil } // Attempt to create a new block from data contained in the VM. @@ -604,19 +581,23 @@ func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { return nil, errNoPendingTxs } + state := vm.state.Copy() + var preferredBlock *types.Block if b, ok := vm.verifiedBlocks[vm.preferred]; ok { + vm.log.Debug("load preferred block from cache", "id", vm.preferred.String()) preferredBlock = b.Block } else { - preferredBlock := vm.blockStore.LoadBlockByHash(vm.preferred[:]) + vm.log.Debug("load preferred block from blockStore", "id", vm.preferred.String()) + preferredBlock = vm.blockStore.LoadBlockByHash(vm.preferred[:]) if preferredBlock == nil { return nil, errInvalidBlock } } - preferredHeight := preferredBlock.Height + preferredHeight := preferredBlock.Header.Height commit := makeCommitMock(preferredHeight+1, time.Now()) - block, _ := vm.state.MakeBlock(preferredHeight+1, txs, commit, nil, proposerAddress) + block, _ := state.MakeBlock(preferredHeight+1, txs, commit, nil, proposerAddress) block.LastBlockID = types.BlockID{ Hash: preferredBlock.Hash(), PartSetHeader: preferredBlock.MakePartSet(types.BlockPartSizeBytes).Header(), @@ -644,7 +625,10 @@ func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { // a definitionally accepted block, the Genesis block, that will be // returned. func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { - return ids.ID(vm.state.LastBlockID.Hash), nil + if vm.preferred == ids.Empty { + return ids.ID(vm.state.LastBlockID.Hash), nil + } + return vm.preferred, nil } func (vm *VM) applyBlock(block *Block) error { From 2f6032ac9b8188ec9b47a9becadeb9d65ae9fe3d Mon Sep 17 00:00:00 2001 From: n0cte Date: Thu, 13 Jul 2023 11:29:35 +0400 Subject: [PATCH 07/14] rollback block changes --- types/block.go | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/types/block.go b/types/block.go index 84d55205b..19f3a4ee6 100644 --- a/types/block.go +++ b/types/block.go @@ -68,16 +68,16 @@ func (b *Block) ValidateBasic() error { if b.LastCommit == nil { return errors.New("nil LastCommit") } - //if err := b.LastCommit.ValidateBasic(); err != nil { - // return fmt.Errorf("wrong LastCommit: %v", err) - //} + if err := b.LastCommit.ValidateBasic(); err != nil { + return fmt.Errorf("wrong LastCommit: %v", err) + } - //if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - // return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", - // b.LastCommit.Hash(), - // b.LastCommitHash, - // ) - //} + if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { + return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", + b.LastCommit.Hash(), + b.LastCommitHash, + ) + } // NOTE: b.Data.Txs may be nil, but b.Data.Hash() still works fine. if !bytes.Equal(b.DataHash, b.Data.Hash()) { @@ -89,18 +89,18 @@ func (b *Block) ValidateBasic() error { } // NOTE: b.Evidence.Evidence may be nil, but we're just looping. - //for i, ev := range b.Evidence.Evidence { - // if err := ev.ValidateBasic(); err != nil { - // return fmt.Errorf("invalid evidence (#%d): %v", i, err) - // } - //} + for i, ev := range b.Evidence.Evidence { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("invalid evidence (#%d): %v", i, err) + } + } - //if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - // return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", - // b.EvidenceHash, - // b.Evidence.Hash(), - // ) - //} + if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { + return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", + b.EvidenceHash, + b.Evidence.Hash(), + ) + } return nil } @@ -972,11 +972,11 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { } sigs := make([]CommitSig, len(cp.Signatures)) - //for i := range cp.Signatures { - // if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { - // return nil, err - // } - //} + for i := range cp.Signatures { + if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { + return nil, err + } + } commit.Signatures = sigs commit.Height = cp.Height From 9485c92c469ae32c1cf25d09c4283c7047e1f706 Mon Sep 17 00:00:00 2001 From: n0cte Date: Thu, 13 Jul 2023 11:44:03 +0400 Subject: [PATCH 08/14] off block signs checking --- types/block.go | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/types/block.go b/types/block.go index 19f3a4ee6..84d55205b 100644 --- a/types/block.go +++ b/types/block.go @@ -68,16 +68,16 @@ func (b *Block) ValidateBasic() error { if b.LastCommit == nil { return errors.New("nil LastCommit") } - if err := b.LastCommit.ValidateBasic(); err != nil { - return fmt.Errorf("wrong LastCommit: %v", err) - } + //if err := b.LastCommit.ValidateBasic(); err != nil { + // return fmt.Errorf("wrong LastCommit: %v", err) + //} - if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", - b.LastCommit.Hash(), - b.LastCommitHash, - ) - } + //if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { + // return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", + // b.LastCommit.Hash(), + // b.LastCommitHash, + // ) + //} // NOTE: b.Data.Txs may be nil, but b.Data.Hash() still works fine. if !bytes.Equal(b.DataHash, b.Data.Hash()) { @@ -89,18 +89,18 @@ func (b *Block) ValidateBasic() error { } // NOTE: b.Evidence.Evidence may be nil, but we're just looping. - for i, ev := range b.Evidence.Evidence { - if err := ev.ValidateBasic(); err != nil { - return fmt.Errorf("invalid evidence (#%d): %v", i, err) - } - } + //for i, ev := range b.Evidence.Evidence { + // if err := ev.ValidateBasic(); err != nil { + // return fmt.Errorf("invalid evidence (#%d): %v", i, err) + // } + //} - if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", - b.EvidenceHash, - b.Evidence.Hash(), - ) - } + //if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { + // return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", + // b.EvidenceHash, + // b.Evidence.Hash(), + // ) + //} return nil } @@ -972,11 +972,11 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { } sigs := make([]CommitSig, len(cp.Signatures)) - for i := range cp.Signatures { - if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { - return nil, err - } - } + //for i := range cp.Signatures { + // if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { + // return nil, err + // } + //} commit.Signatures = sigs commit.Height = cp.Height From d0ae8d51ef4e390f1ce343fd7ca276a60907e6c2 Mon Sep 17 00:00:00 2001 From: n0cte Date: Thu, 13 Jul 2023 13:17:37 +0400 Subject: [PATCH 09/14] change last block reading --- vm/vm.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/vm/vm.go b/vm/vm.go index ff7f9f064..223c2c34c 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -402,7 +402,7 @@ func (vm *VM) Initialize( } vm.log.Info("vm initialization completed") - return vm.SetPreference(ctx, ids.ID(vm.state.LastBlockID.Hash)) + return nil } func (vm *VM) NotifyBlockReady() { @@ -584,15 +584,19 @@ func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { state := vm.state.Copy() var preferredBlock *types.Block - if b, ok := vm.verifiedBlocks[vm.preferred]; ok { - vm.log.Debug("load preferred block from cache", "id", vm.preferred.String()) - preferredBlock = b.Block - } else { - vm.log.Debug("load preferred block from blockStore", "id", vm.preferred.String()) - preferredBlock = vm.blockStore.LoadBlockByHash(vm.preferred[:]) - if preferredBlock == nil { - return nil, errInvalidBlock + if vm.preferred != ids.Empty { + if b, ok := vm.verifiedBlocks[vm.preferred]; ok { + vm.log.Debug("load preferred block from cache", "id", vm.preferred.String()) + preferredBlock = b.Block + } else { + vm.log.Debug("load preferred block from blockStore", "id", vm.preferred.String()) + preferredBlock = vm.blockStore.LoadBlockByHash(vm.preferred[:]) + if preferredBlock == nil { + return nil, errInvalidBlock + } } + } else { + preferredBlock = vm.blockStore.LoadBlockByHash(state.LastBlockID.Hash) } preferredHeight := preferredBlock.Header.Height From 2a04162b9567bf22eca9693844c260670f712ee4 Mon Sep 17 00:00:00 2001 From: n0cte Date: Thu, 13 Jul 2023 14:35:06 +0400 Subject: [PATCH 10/14] revert block changes --- types/block.go | 50 +++++++++++++++++++++++++------------------------- vm/funcs.go | 33 +++++++++++++++++---------------- vm/vm.go | 4 ++-- 3 files changed, 44 insertions(+), 43 deletions(-) diff --git a/types/block.go b/types/block.go index 84d55205b..19f3a4ee6 100644 --- a/types/block.go +++ b/types/block.go @@ -68,16 +68,16 @@ func (b *Block) ValidateBasic() error { if b.LastCommit == nil { return errors.New("nil LastCommit") } - //if err := b.LastCommit.ValidateBasic(); err != nil { - // return fmt.Errorf("wrong LastCommit: %v", err) - //} + if err := b.LastCommit.ValidateBasic(); err != nil { + return fmt.Errorf("wrong LastCommit: %v", err) + } - //if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - // return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", - // b.LastCommit.Hash(), - // b.LastCommitHash, - // ) - //} + if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { + return fmt.Errorf("wrong Header.LastCommitHash. Expected %v, got %v", + b.LastCommit.Hash(), + b.LastCommitHash, + ) + } // NOTE: b.Data.Txs may be nil, but b.Data.Hash() still works fine. if !bytes.Equal(b.DataHash, b.Data.Hash()) { @@ -89,18 +89,18 @@ func (b *Block) ValidateBasic() error { } // NOTE: b.Evidence.Evidence may be nil, but we're just looping. - //for i, ev := range b.Evidence.Evidence { - // if err := ev.ValidateBasic(); err != nil { - // return fmt.Errorf("invalid evidence (#%d): %v", i, err) - // } - //} + for i, ev := range b.Evidence.Evidence { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("invalid evidence (#%d): %v", i, err) + } + } - //if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - // return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", - // b.EvidenceHash, - // b.Evidence.Hash(), - // ) - //} + if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { + return fmt.Errorf("wrong Header.EvidenceHash. Expected %v, got %v", + b.EvidenceHash, + b.Evidence.Hash(), + ) + } return nil } @@ -972,11 +972,11 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { } sigs := make([]CommitSig, len(cp.Signatures)) - //for i := range cp.Signatures { - // if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { - // return nil, err - // } - //} + for i := range cp.Signatures { + if err := sigs[i].FromProto(cp.Signatures[i]); err != nil { + return nil, err + } + } commit.Signatures = sigs commit.Height = cp.Height diff --git a/vm/funcs.go b/vm/funcs.go index 72f04a20e..b48a27057 100644 --- a/vm/funcs.go +++ b/vm/funcs.go @@ -32,23 +32,24 @@ func NewLocalGenesisDocProvider(data []byte) node.GenesisDocProvider { } } -func makeCommitMock(height int64, timestamp time.Time) *types.Commit { - var commitSig []types.CommitSig = nil - if height != 1 { - commitSig = []types.CommitSig{{Timestamp: time.Now()}} - } - return types.NewCommit( - height, - 0, - types.BlockID{ - Hash: []byte(""), - PartSetHeader: types.PartSetHeader{ - Hash: []byte(""), - Total: 1, - }, +func makeCommit(height int64, timestamp time.Time) *types.Commit { + commitSig := []types.CommitSig(nil) + if height > 1 { + commitSig = []types.CommitSig{{ + BlockIDFlag: types.BlockIDFlagNil, + Timestamp: time.Now(), + ValidatorAddress: proposerAddress, + Signature: []byte{0x0}, + }} + } + blockID := types.BlockID{ + Hash: []byte(""), + PartSetHeader: types.PartSetHeader{ + Hash: []byte(""), + Total: 1, }, - commitSig, - ) + } + return types.NewCommit(height, 0, blockID, commitSig) } func validateBlock(state state.State, block *types.Block) error { diff --git a/vm/vm.go b/vm/vm.go index 223c2c34c..9c55c7ff5 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -388,7 +388,7 @@ func (vm *VM) Initialize( } if vm.state.LastBlockHeight == 0 { - block, _ := vm.state.MakeBlock(1, types.Txs{types.Tx(genesisBytes)}, makeCommitMock(1, time.Now()), nil, proposerAddress) + block, _ := vm.state.MakeBlock(1, types.Txs{types.Tx(genesisBytes)}, makeCommit(1, time.Now()), nil, proposerAddress) block.LastBlockID = types.BlockID{ Hash: tmhash.Sum([]byte{}), PartSetHeader: types.PartSetHeader{ @@ -600,7 +600,7 @@ func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { } preferredHeight := preferredBlock.Header.Height - commit := makeCommitMock(preferredHeight+1, time.Now()) + commit := makeCommit(preferredHeight+1, time.Now()) block, _ := state.MakeBlock(preferredHeight+1, txs, commit, nil, proposerAddress) block.LastBlockID = types.BlockID{ Hash: preferredBlock.Hash(), From 44a1f06c9c46d158e952cda1217695ce9f2db610 Mon Sep 17 00:00:00 2001 From: n0cte Date: Mon, 17 Jul 2023 11:49:48 +0400 Subject: [PATCH 11/14] fix generating first block --- vm/vm.go | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/vm/vm.go b/vm/vm.go index 9c55c7ff5..3e5b9140b 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -31,7 +31,6 @@ import ( "github.com/consideritdone/landslidecore/libs/log" mempl "github.com/consideritdone/landslidecore/mempool" "github.com/consideritdone/landslidecore/node" - tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/proxy" rpccore "github.com/consideritdone/landslidecore/rpc/core" @@ -388,7 +387,7 @@ func (vm *VM) Initialize( } if vm.state.LastBlockHeight == 0 { - block, _ := vm.state.MakeBlock(1, types.Txs{types.Tx(genesisBytes)}, makeCommit(1, time.Now()), nil, proposerAddress) + block, _ := vm.state.MakeBlock(1, types.Txs{}, makeCommit(1, time.Now()), nil, proposerAddress) block.LastBlockID = types.BlockID{ Hash: tmhash.Sum([]byte{}), PartSetHeader: types.PartSetHeader{ @@ -646,21 +645,9 @@ func (vm *VM) applyBlock(block *Block) error { return err } - abciResponses := new(tmstate.ABCIResponses) - if state.LastBlockHeight > 0 { - abciResponses, err = execBlockOnProxyApp(vm.log, vm.app.Consensus(), block.Block, vm.stateStore, state.InitialHeight) - if err != nil { - return err - } - } else { - abciResponses.DeliverTxs = []*abciTypes.ResponseDeliverTx{ - &abciTypes.ResponseDeliverTx{ - Code: abciTypes.CodeTypeOK, - Data: block.Txs[0], - }, - } - abciResponses.BeginBlock = new(abciTypes.ResponseBeginBlock) - abciResponses.EndBlock = new(abciTypes.ResponseEndBlock) + abciResponses, err := execBlockOnProxyApp(vm.log, vm.app.Consensus(), block.Block, vm.stateStore, state.InitialHeight) + if err != nil { + return err } // Save the results before we commit. From 4d7c9a8e1e8c0091c45b828a93d847bafeb89ba2 Mon Sep 17 00:00:00 2001 From: n0cte Date: Mon, 17 Jul 2023 12:07:03 +0400 Subject: [PATCH 12/14] update go version --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1da0ea079..5fcdbc216 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" + go-version: "1.20" - uses: actions/checkout@v3 - run: go mod download shell: bash @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/setup-go@v4 with: - go-version: "1.19" + go-version: "1.20" - uses: actions/checkout@v3 - name: test VM run: go test -v ./vm/... From a8b21c5cf8a7f13947f256cc36902f9ad3d90ddb Mon Sep 17 00:00:00 2001 From: n0cte Date: Tue, 18 Jul 2023 17:10:26 +0400 Subject: [PATCH 13/14] add tendermint like rpc interface --- rpc/jsonrpc/server/http_json_handler.go | 17 +- rpc/jsonrpc/server/rpc_func.go | 2 +- vm/funcs.go | 5 +- vm/service.go | 782 +++++++++++++----------- vm/service_test.go | 393 +++++++++--- vm/vm.go | 35 +- vm/vm_test.go | 19 +- 7 files changed, 771 insertions(+), 482 deletions(-) diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index b51f1f231..930e8c43d 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -17,7 +17,7 @@ import ( // HTTP + JSON handler // jsonrpc calls grab the given method's function info and runs reflect.Call -func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { +func MakeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { @@ -67,13 +67,14 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han ) continue } - if len(r.URL.Path) > 1 { - responses = append( - responses, - types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), - ) - continue - } + // TODO: need to rever this change + //if len(r.URL.Path) > 1 { + // responses = append( + // responses, + // types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), + // ) + // continue + //} rpcFunc, ok := funcMap[request.Method] if !ok || rpcFunc.ws { responses = append(responses, types.RPCMethodNotFoundError(request.ID)) diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 9f39c3664..03925c0c4 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -20,7 +20,7 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo } // JSONRPC endpoints - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) + mux.HandleFunc("/", handleInvalidJSONRPCPaths(MakeJSONRPCHandler(funcMap, logger))) } // Function introspection diff --git a/vm/funcs.go b/vm/funcs.go index b48a27057..5f34d6dc5 100644 --- a/vm/funcs.go +++ b/vm/funcs.go @@ -14,7 +14,6 @@ import ( tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" "github.com/consideritdone/landslidecore/proxy" "github.com/consideritdone/landslidecore/rpc/client" - coretypes "github.com/consideritdone/landslidecore/rpc/core/types" "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/store" "github.com/consideritdone/landslidecore/types" @@ -371,8 +370,8 @@ func WaitForHeight(c Service, h int64, waiter client.Waiter) error { } delta := int64(1) for delta > 0 { - r := new(coretypes.ResultStatus) - if err := c.Status(nil, nil, r); err != nil { + r, err := c.Status(nil) + if err != nil { return err } delta = h - r.SyncInfo.LatestBlockHeight diff --git a/vm/service.go b/vm/service.go index aca8dc3e6..de91d4531 100644 --- a/vm/service.go +++ b/vm/service.go @@ -4,22 +4,31 @@ import ( "context" "errors" "fmt" - "net/http" "sort" "time" abci "github.com/consideritdone/landslidecore/abci/types" tmbytes "github.com/consideritdone/landslidecore/libs/bytes" tmmath "github.com/consideritdone/landslidecore/libs/math" + tmpubsub "github.com/consideritdone/landslidecore/libs/pubsub" tmquery "github.com/consideritdone/landslidecore/libs/pubsub/query" mempl "github.com/consideritdone/landslidecore/mempool" "github.com/consideritdone/landslidecore/p2p" "github.com/consideritdone/landslidecore/proxy" "github.com/consideritdone/landslidecore/rpc/core" ctypes "github.com/consideritdone/landslidecore/rpc/core/types" + rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server" + rpctypes "github.com/consideritdone/landslidecore/rpc/jsonrpc/types" + blockidxnull "github.com/consideritdone/landslidecore/state/indexer/block/null" + "github.com/consideritdone/landslidecore/state/txindex/null" "github.com/consideritdone/landslidecore/types" ) +var ( + SubscribeTimeout = 5 * time.Second + _ Service = (*LocalService)(nil) +) + type ( LocalService struct { vm *VM @@ -27,196 +36,261 @@ type ( Service interface { ABCIService - HistoryService - NetworkService - SignService - StatusService - MempoolService - } - - ABCIQueryArgs struct { - Path string `json:"path"` - Data tmbytes.HexBytes `json:"data"` - } - - ABCIQueryOptions struct { - Height int64 `json:"height"` - Prove bool `json:"prove"` - } - - ABCIQueryWithOptionsArgs struct { - Path string `json:"path"` - Data tmbytes.HexBytes `json:"data"` - Opts ABCIQueryOptions `json:"opts"` - } - - BroadcastTxArgs struct { - Tx types.Tx `json:"tx"` + EventsService + HistoryClient + NetworkClient + SignClient + StatusClient + MempoolClient } ABCIService interface { // Reading from abci app - ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error - ABCIQuery(_ *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error - ABCIQueryWithOptions(_ *http.Request, args *ABCIQueryWithOptionsArgs, reply *ctypes.ResultABCIQuery) error + ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) + ABCIQuery(ctx *rpctypes.Context, path string, data tmbytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTxCommit) error - BroadcastTxAsync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error - BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error + BroadcastTxCommit(*rpctypes.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(*rpctypes.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(*rpctypes.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) } - BlockHeightArgs struct { - Height *int64 `json:"height"` + EventsService interface { + Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) + Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) + UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) } - BlockHashArgs struct { - Hash []byte `json:"hash"` + HistoryClient interface { + Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) + GenesisChunked(*rpctypes.Context, uint) (*ctypes.ResultGenesisChunk, error) + BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) } - CommitArgs struct { - Height *int64 `json:"height"` + MempoolClient interface { + UnconfirmedTxs(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) + CheckTx(*rpctypes.Context, types.Tx) (*ctypes.ResultCheckTx, error) } - ValidatorsArgs struct { - Height *int64 `json:"height"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` + NetworkClient interface { + NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) + DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) + ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) + ConsensusParams(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) + Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) } - TxArgs struct { - Hash tmbytes.HexBytes `json:"hash"` - Prove bool `json:"prove"` - } + SignClient interface { + Block(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) + BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) + BlockResults(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) + Commit(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) + Validators(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) + Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) - TxSearchArgs struct { - Query string `json:"query"` - Prove bool `json:"prove"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - OrderBy string `json:"orderBy"` - } - - BlockSearchArgs struct { - Query string `json:"query"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - OrderBy string `json:"orderBy"` - } + TxSearch(ctx *rpctypes.Context, query string, prove bool, + page, perPage *int, orderBy string) (*ctypes.ResultTxSearch, error) - SignService interface { - Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error - BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error - BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error - Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error - Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error - Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error - TxSearch(_ *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error - BlockSearch(_ *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error + BlockSearch(ctx *rpctypes.Context, query string, + page, perPage *int, orderBy string) (*ctypes.ResultBlockSearch, error) } - BlockchainInfoArgs struct { - MinHeight int64 `json:"minHeight"` - MaxHeight int64 `json:"maxHeight"` + StatusClient interface { + Status(*rpctypes.Context) (*ctypes.ResultStatus, error) } +) - GenesisChunkedArgs struct { - Chunk uint `json:"chunk"` - } +func NewService(vm *VM) *LocalService { + return &LocalService{vm} +} - HistoryService interface { - BlockchainInfo(_ *http.Request, args *BlockchainInfoArgs, reply *ctypes.ResultBlockchainInfo) error - Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error - GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error +func NewServiceAsRPCRoutes(vm *VM) map[string]*rpcserver.RPCFunc { + s := NewService(vm) + return map[string]*rpcserver.RPCFunc{ + // subscribe/unsubscribe are reserved for websocket events. + "subscribe": rpcserver.NewWSRPCFunc(s.Subscribe, "query"), + "unsubscribe": rpcserver.NewWSRPCFunc(s.Unsubscribe, "query"), + "unsubscribe_all": rpcserver.NewWSRPCFunc(s.UnsubscribeAll, ""), + + // info API + "health": rpcserver.NewRPCFunc(s.Health, ""), + "status": rpcserver.NewRPCFunc(s.Status, ""), + "net_info": rpcserver.NewRPCFunc(s.NetInfo, ""), + "blockchain": rpcserver.NewRPCFunc(s.BlockchainInfo, "minHeight,maxHeight"), + "genesis": rpcserver.NewRPCFunc(s.Genesis, ""), + "genesis_chunked": rpcserver.NewRPCFunc(s.GenesisChunked, "chunk"), + "block": rpcserver.NewRPCFunc(s.Block, "height"), + "block_by_hash": rpcserver.NewRPCFunc(s.BlockByHash, "hash"), + "block_results": rpcserver.NewRPCFunc(s.BlockResults, "height"), + "commit": rpcserver.NewRPCFunc(s.Commit, "height"), + "check_tx": rpcserver.NewRPCFunc(s.CheckTx, "tx"), + "tx": rpcserver.NewRPCFunc(s.Tx, "hash,prove"), + "tx_search": rpcserver.NewRPCFunc(s.TxSearch, "query,prove,page,per_page,order_by"), + "block_search": rpcserver.NewRPCFunc(s.BlockSearch, "query,page,per_page,order_by"), + "validators": rpcserver.NewRPCFunc(s.Validators, "height,page,per_page"), + "dump_consensus_state": rpcserver.NewRPCFunc(s.DumpConsensusState, ""), + "consensus_state": rpcserver.NewRPCFunc(s.ConsensusState, ""), + "consensus_params": rpcserver.NewRPCFunc(s.ConsensusParams, "height"), + "unconfirmed_txs": rpcserver.NewRPCFunc(s.UnconfirmedTxs, "limit"), + "num_unconfirmed_txs": rpcserver.NewRPCFunc(s.NumUnconfirmedTxs, ""), + + // tx broadcast API + "broadcast_tx_commit": rpcserver.NewRPCFunc(s.BroadcastTxCommit, "tx"), + "broadcast_tx_sync": rpcserver.NewRPCFunc(s.BroadcastTxSync, "tx"), + "broadcast_tx_async": rpcserver.NewRPCFunc(s.BroadcastTxAsync, "tx"), + + // abci API + "abci_query": rpcserver.NewRPCFunc(s.ABCIQuery, "path,data,height,prove"), + "abci_info": rpcserver.NewRPCFunc(s.ABCIInfo, ""), } +} - StatusService interface { - Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error - } +func (s *LocalService) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { + addr := ctx.RemoteAddr() - ConsensusParamsArgs struct { - Height *int64 `json:"height"` + if s.vm.eventBus.NumClients() >= s.vm.rpcConfig.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", s.vm.rpcConfig.MaxSubscriptionClients) + } else if s.vm.eventBus.NumClientSubscriptions(addr) >= s.vm.rpcConfig.MaxSubscriptionsPerClient { + return nil, fmt.Errorf("max_subscriptions_per_client %d reached", s.vm.rpcConfig.MaxSubscriptionsPerClient) } - NetworkService interface { - NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error - DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error - ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error - ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error - Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error - } + s.vm.log.Info("Subscribe to query", "remote", addr, "query", query) - UnconfirmedTxsArgs struct { - Limit *int `json:"limit"` + q, err := tmquery.New(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query: %w", err) } - CheckTxArgs struct { - Tx []byte `json:"tx"` - } + subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) + defer cancel() - MempoolService interface { - UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error - NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error - CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error - } -) + sub, err := s.vm.eventBus.Subscribe(subCtx, addr, q, s.vm.rpcConfig.SubscriptionBufferSize) + if err != nil { + return nil, err + } + + closeIfSlow := s.vm.rpcConfig.CloseOnSlowClient + + // TODO: inspired by Ilnur: usage of ctx.JSONReq.ID may cause situation when user or server try to create multiple subscriptions with the same id. + // Solution: return error code with the error sescription when this situation happens + // Capture the current ID, since it can change in the future. + subscriptionID := ctx.JSONReq.ID + go func() { + for { + select { + case msg := <-sub.Out(): + var ( + resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} + resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) + ) + writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err = ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { + s.vm.log.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + + if closeIfSlow { + var ( + err = errors.New("subscription was cancelled (reason: slow client)") + resp = rpctypes.RPCServerError(subscriptionID, err) + ) + if !ctx.WSConn.TryWriteRPCResponse(resp) { + s.vm.log.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } + return + } + } + case <-sub.Cancelled(): + if sub.Err() != tmpubsub.ErrUnsubscribed { + var reason string + if sub.Err() == nil { + reason = "Tendermint exited" + } else { + reason = sub.Err().Error() + } + resp := rpctypes.RPCServerError(subscriptionID, err) + if !ctx.WSConn.TryWriteRPCResponse(resp) { + s.vm.log.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", + fmt.Errorf("subscription was cancelled (reason: %s)", reason)) + } + } + return + } + } + }() -var ( - DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} -) + return &ctypes.ResultSubscribe{}, nil +} -func NewService(vm *VM) Service { - return &LocalService{vm} +func (s *LocalService) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() + s.vm.log.Info("Unsubscribe from query", "remote", addr, "query", query) + q, err := tmquery.New(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query: %w", err) + } + err = s.vm.eventBus.Unsubscribe(context.Background(), addr, q) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil } -func (s *LocalService) ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error { - resInfo, err := s.vm.app.Query().InfoSync(proxy.RequestInfo) +func (s *LocalService) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() + s.vm.log.Info("Unsubscribe from all", "remote", addr) + err := s.vm.eventBus.UnsubscribeAll(context.Background(), addr) if err != nil { - return err + return nil, err } - reply.Response = *resInfo - return nil + return &ctypes.ResultUnsubscribe{}, nil } -func (s *LocalService) ABCIQuery(req *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error { - return s.ABCIQueryWithOptions(req, &ABCIQueryWithOptionsArgs{args.Path, args.Data, DefaultABCIQueryOptions}, reply) +func (s *LocalService) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { + resInfo, err := s.vm.app.Query().InfoSync(proxy.RequestInfo) + if err != nil || resInfo == nil { + return nil, err + } + return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } -func (s *LocalService) ABCIQueryWithOptions( - _ *http.Request, - args *ABCIQueryWithOptionsArgs, - reply *ctypes.ResultABCIQuery, -) error { +// TODO: attention! Different signatures in RPC interfaces +func (s *LocalService) ABCIQuery( + ctx *rpctypes.Context, + path string, + data tmbytes.HexBytes, + height int64, + prove bool, +) (*ctypes.ResultABCIQuery, error) { resQuery, err := s.vm.app.Query().QuerySync(abci.RequestQuery{ - Path: args.Path, - Data: args.Data, - Height: args.Opts.Height, - Prove: args.Opts.Prove, + Path: path, + Data: data, + Height: height, + Prove: prove, }) - if err != nil { - return err + if err != nil || resQuery == nil { + return nil, err } - reply.Response = *resQuery - return nil + + return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } -func (s *LocalService) BroadcastTxCommit( - _ *http.Request, - args *BroadcastTxArgs, - reply *ctypes.ResultBroadcastTxCommit, -) error { +func (s *LocalService) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { subscriber := "" // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(context.Background(), core.SubscribeTimeout) + subCtx, cancel := context.WithTimeout(ctx.Context(), core.SubscribeTimeout) defer cancel() - q := types.EventQueryTxFor(args.Tx) + q := types.EventQueryTxFor(tx) deliverTxSub, err := s.vm.eventBus.Subscribe(subCtx, subscriber, q) if err != nil { err = fmt.Errorf("failed to subscribe to tx: %w", err) s.vm.log.Error("Error on broadcast_tx_commit", "err", err) - return err + return nil, err } defer func() { @@ -227,35 +301,33 @@ func (s *LocalService) BroadcastTxCommit( // Broadcast tx and wait for CheckTx result checkTxResCh := make(chan *abci.Response, 1) - err = s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { + err = s.vm.mempool.CheckTx(tx, func(res *abci.Response) { checkTxResCh <- res }, mempl.TxInfo{}) if err != nil { s.vm.log.Error("Error on broadcastTxCommit", "err", err) - return fmt.Errorf("error on broadcastTxCommit: %v", err) + return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) } checkTxResMsg := <-checkTxResCh checkTxRes := checkTxResMsg.GetCheckTx() if checkTxRes.Code != abci.CodeTypeOK { - *reply = ctypes.ResultBroadcastTxCommit{ + return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, - Hash: args.Tx.Hash(), - } - return nil + Hash: tx.Hash(), + }, nil } // Wait for the tx to be included in a block or timeout. select { case msg := <-deliverTxSub.Out(): // The tx was included in a block. deliverTxRes := msg.Data().(types.EventDataTx) - *reply = ctypes.ResultBroadcastTxCommit{ + return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: deliverTxRes.Result, - Hash: args.Tx.Hash(), + Hash: tx.Hash(), Height: deliverTxRes.Height, - } - return nil + }, nil case <-deliverTxSub.Cancelled(): var reason string if deliverTxSub.Err() == nil { @@ -265,194 +337,189 @@ func (s *LocalService) BroadcastTxCommit( } err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) s.vm.log.Error("Error on broadcastTxCommit", "err", err) - return err + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, err // TODO: use config for timeout case <-time.After(10 * time.Second): err = errors.New("timed out waiting for tx to be included in a block") s.vm.log.Error("Error on broadcastTxCommit", "err", err) - return err + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, err } } -func (s *LocalService) BroadcastTxAsync( - _ *http.Request, - args *BroadcastTxArgs, - reply *ctypes.ResultBroadcastTx, -) error { - err := s.vm.mempool.CheckTx(args.Tx, nil, mempl.TxInfo{}) +func (s *LocalService) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + err := s.vm.mempool.CheckTx(tx, nil, mempl.TxInfo{}) if err != nil { - return err + return nil, err } - reply.Hash = args.Tx.Hash() - return nil + return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } -func (s *LocalService) BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error { +func (s *LocalService) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) - err := s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { + err := s.vm.mempool.CheckTx(tx, func(res *abci.Response) { s.vm.log.With("module", "service").Debug("handled response from checkTx") resCh <- res }, mempl.TxInfo{}) if err != nil { - return err + return nil, err } res := <-resCh r := res.GetCheckTx() - - reply.Code = r.Code - reply.Data = r.Data - reply.Log = r.Log - reply.Codespace = r.Codespace - reply.Hash = args.Tx.Hash() - - return nil + return &ctypes.ResultBroadcastTx{ + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + Hash: tx.Hash(), + }, nil } -func (s *LocalService) Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } + block := s.vm.blockStore.LoadBlock(height) blockMeta := s.vm.blockStore.LoadBlockMeta(height) - - if blockMeta != nil { - reply.BlockID = blockMeta.BlockID + if blockMeta == nil { + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: block}, nil } - reply.Block = block - return nil + return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } -func (s *LocalService) BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error { - block := s.vm.blockStore.LoadBlockByHash(args.Hash) +func (s *LocalService) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { + block := s.vm.blockStore.LoadBlockByHash(hash) if block == nil { - reply.BlockID = types.BlockID{} - reply.Block = nil - return nil + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } + // If block is not nil, then blockMeta can't be nil. blockMeta := s.vm.blockStore.LoadBlockMeta(block.Height) - reply.BlockID = blockMeta.BlockID - reply.Block = block - return nil + return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } -func (s *LocalService) BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } results, err := s.vm.stateStore.LoadABCIResponses(height) if err != nil { - return err + return nil, err } - reply.Height = height - reply.TxsResults = results.DeliverTxs - reply.BeginBlockEvents = results.BeginBlock.Events - reply.EndBlockEvents = results.EndBlock.Events - reply.ValidatorUpdates = results.EndBlock.ValidatorUpdates - reply.ConsensusParamUpdates = results.EndBlock.ConsensusParamUpdates - return nil + return &ctypes.ResultBlockResults{ + Height: height, + TxsResults: results.DeliverTxs, + BeginBlockEvents: results.BeginBlock.Events, + EndBlockEvents: results.EndBlock.Events, + ValidatorUpdates: results.EndBlock.ValidatorUpdates, + ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates, + }, nil } -func (s *LocalService) Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } blockMeta := s.vm.blockStore.LoadBlockMeta(height) if blockMeta == nil { - return nil + return nil, nil } - header := blockMeta.Header - commit := s.vm.blockStore.LoadBlockCommit(height) - res := ctypes.NewResultCommit(&header, commit, !(height == s.vm.blockStore.Height())) - reply.SignedHeader = res.SignedHeader - reply.CanonicalCommit = res.CanonicalCommit - return nil + // Return the canonical commit (comes from the block at height+1) + commit := s.vm.blockStore.LoadBlockCommit(height) + return ctypes.NewResultCommit(&header, commit, true), nil } -func (s *LocalService) Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } validators, err := s.vm.stateStore.LoadValidators(height) if err != nil { - return err + return nil, err } totalCount := len(validators.Validators) - perPage := validatePerPage(args.PerPage) - page, err := validatePage(args.Page, perPage, totalCount) + perPage := validatePerPage(perPagePtr) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { - return err + return nil, err } skipCount := validateSkipCount(page, perPage) - reply.BlockHeight = height - reply.Validators = validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - reply.Count = len(reply.Validators) - reply.Total = totalCount - return nil + v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + + return &ctypes.ResultValidators{ + BlockHeight: height, + Validators: v, + Count: len(v), + Total: totalCount}, nil } -func (s *LocalService) Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error { - s.vm.log.Debug("query tx", "hash", args.Hash) - r, err := s.vm.txIndexer.Get(args.Hash) +func (s *LocalService) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + if _, ok := s.vm.txIndexer.(*null.TxIndex); ok { + return nil, fmt.Errorf("transaction indexing is disabled") + } + + r, err := s.vm.txIndexer.Get(hash) if err != nil { - return err + return nil, err } - s.vm.log.Debug("query tx", "r", args.Hash) if r == nil { - return fmt.Errorf("tx (%X) not found", args.Hash) + return nil, fmt.Errorf("tx (%X) not found", hash) } height := r.Height index := r.Index var proof types.TxProof - if args.Prove { + if prove { block := s.vm.blockStore.LoadBlock(height) proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } - reply.Hash = args.Hash - reply.Height = height - reply.Index = index - reply.TxResult = r.Result - reply.Tx = r.Tx - reply.Proof = proof - return nil + return &ctypes.ResultTx{ + Hash: hash, + Height: height, + Index: index, + TxResult: r.Result, + Tx: r.Tx, + Proof: proof, + }, nil } -func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error { - q, err := tmquery.New(args.Query) +func (s *LocalService) TxSearch(ctx *rpctypes.Context, query string, prove bool, pagePtr, perPagePtr *int, orderBy string) (*ctypes.ResultTxSearch, error) { + // if index is disabled, return error + q, err := tmquery.New(query) if err != nil { - return err - } - - var ctx context.Context - if req != nil { - ctx = req.Context() - } else { - ctx = context.Background() + return nil, err } - results, err := s.vm.txIndexer.Search(ctx, q) + results, err := s.vm.txIndexer.Search(ctx.Context(), q) if err != nil { - return err + return nil, err } // sort results (must be done before pagination) - switch args.OrderBy { + switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { if results[i].Height == results[j].Height { @@ -468,16 +535,16 @@ func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ct return results[i].Height < results[j].Height }) default: - return errors.New("expected order_by to be either `asc` or `desc` or empty") + return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } // paginate results totalCount := len(results) - perPage := validatePerPage(args.PerPage) + perPage := validatePerPage(perPagePtr) - page, err := validatePage(args.Page, perPage, totalCount) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { - return err + return nil, err } skipCount := validateSkipCount(page, perPage) @@ -488,7 +555,7 @@ func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ct r := results[i] var proof types.TxProof - if args.Prove { + if prove { block := s.vm.blockStore.LoadBlock(r.Height) proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } @@ -503,31 +570,35 @@ func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ct }) } - reply.Txs = apiResults - reply.TotalCount = totalCount - return nil + return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } -func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error { - q, err := tmquery.New(args.Query) - if err != nil { - return err +// BlockSearch searches for a paginated set of blocks matching BeginBlock and +// EndBlock event search criteria. +func (s *LocalService) BlockSearch( + ctx *rpctypes.Context, + query string, + pagePtr, perPagePtr *int, + orderBy string, +) (*ctypes.ResultBlockSearch, error) { + + // skip if block indexing is disabled + if _, ok := s.vm.blockIndexer.(*blockidxnull.BlockerIndexer); ok { + return nil, errors.New("block indexing is disabled") } - var ctx context.Context - if req != nil { - ctx = req.Context() - } else { - ctx = context.Background() + q, err := tmquery.New(query) + if err != nil { + return nil, err } - results, err := s.vm.blockIndexer.Search(ctx, q) + results, err := s.vm.blockIndexer.Search(ctx.Context(), q) if err != nil { - return err + return nil, err } // sort results (must be done before pagination) - switch args.OrderBy { + switch orderBy { case "desc", "": sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) @@ -535,16 +606,16 @@ func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, rep sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return errors.New("expected order_by to be either `asc` or `desc` or empty") + return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } // paginate results totalCount := len(results) - perPage := validatePerPage(args.PerPage) + perPage := validatePerPage(perPagePtr) - page, err := validatePage(args.Page, perPage, totalCount) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { - return err + return nil, err } skipCount := validateSkipCount(page, perPage) @@ -564,68 +635,66 @@ func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, rep } } - reply.Blocks = apiResults - reply.TotalCount = totalCount - return nil + return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } -func (s *LocalService) BlockchainInfo( - _ *http.Request, - args *BlockchainInfoArgs, - reply *ctypes.ResultBlockchainInfo, -) error { +func (s *LocalService) BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { // maximum 20 block metas const limit int64 = 20 var err error - args.MinHeight, args.MaxHeight, err = filterMinMax( + minHeight, maxHeight, err = filterMinMax( s.vm.blockStore.Base(), s.vm.blockStore.Height(), - args.MinHeight, - args.MaxHeight, + minHeight, + maxHeight, limit) if err != nil { - return err + return nil, err } - s.vm.log.Debug("BlockchainInfoHandler", "maxHeight", args.MaxHeight, "minHeight", args.MinHeight) + s.vm.log.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) var blockMetas []*types.BlockMeta - for height := args.MaxHeight; height >= args.MinHeight; height-- { + for height := maxHeight; height >= minHeight; height-- { blockMeta := s.vm.blockStore.LoadBlockMeta(height) blockMetas = append(blockMetas, blockMeta) } - reply.LastHeight = s.vm.blockStore.Height() - reply.BlockMetas = blockMetas - return nil + return &ctypes.ResultBlockchainInfo{ + LastHeight: s.vm.blockStore.Height(), + BlockMetas: blockMetas}, nil } -func (s *LocalService) Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error { - reply.Genesis = s.vm.genesis - return nil +func (s *LocalService) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + //if len(s.vm.genChunks) > 1 { + // return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") + //} + + return &ctypes.ResultGenesis{Genesis: s.vm.genesis}, nil } -func (s *LocalService) GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error { - //if s.vm.genChunks == nil { - // return fmt.Errorf("service configuration error, genesis chunks are not initialized") - //} - // - //if len(s.vm.genChunks) == 0 { - // return fmt.Errorf("service configuration error, there are no chunks") - //} - // - //id := int(args.Chunk) - // - //if id > len(s.vm.genChunks)-1 { - // return fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) - //} - // - //reply.TotalChunks = len(s.vm.genChunks) - //reply.ChunkNumber = id - //reply.Data = s.vm.genChunks[id] - return nil +func (s *LocalService) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { + if s.vm.genChunks == nil { + return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") + } + + if len(s.vm.genChunks) == 0 { + return nil, fmt.Errorf("service configuration error, there are no chunks") + } + + id := int(chunk) + + if id > len(s.vm.genChunks)-1 { + return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) + } + + return &ctypes.ResultGenesisChunk{ + TotalChunks: len(s.vm.genChunks), + ChunkNumber: id, + Data: s.vm.genChunks[id], + }, nil } -func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error { +func (s *LocalService) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -656,70 +725,79 @@ func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.Result } } - reply.NodeInfo = p2p.DefaultNodeInfo{ - DefaultNodeID: p2p.ID(s.vm.chainCtx.NodeID.String()), - Network: fmt.Sprintf("%d", s.vm.chainCtx.NetworkID), - } - reply.SyncInfo = ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: time.Unix(0, latestBlockTimeNano), - EarliestBlockHash: earliestBlockHash, - EarliestAppHash: earliestAppHash, - EarliestBlockHeight: earliestBlockHeight, - EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - } - return nil + result := &ctypes.ResultStatus{ + NodeInfo: p2p.DefaultNodeInfo{ + DefaultNodeID: p2p.ID(s.vm.chainCtx.NodeID.String()), + Network: fmt.Sprintf("%d", s.vm.chainCtx.NetworkID), + }, + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: time.Unix(0, latestBlockTimeNano), + EarliestBlockHash: earliestBlockHash, + EarliestAppHash: earliestAppHash, + EarliestBlockHeight: earliestBlockHeight, + EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + CatchingUp: false, + }, + ValidatorInfo: ctypes.ValidatorInfo{ + Address: proposerPubKey.Address(), + PubKey: proposerPubKey, + VotingPower: 0, + }, + } + + return result, nil } -// ToDo: no peers, because it's vm -func (s *LocalService) NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error { - return nil +// ToDo: no peers, no network from tendermint side +func (s *LocalService) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { + return &ctypes.ResultNetInfo{}, nil } // ToDo: we doesn't have consensusState -func (s *LocalService) DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error { - return nil +func (s *LocalService) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { + return &ctypes.ResultDumpConsensusState{}, nil } // ToDo: we doesn't have consensusState -func (s *LocalService) ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error { - return nil +func (s *LocalService) ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { + return &ctypes.ResultConsensusState{}, nil } -func (s *LocalService) ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error { - reply.BlockHeight = s.vm.blockStore.Height() - reply.ConsensusParams = *s.vm.genesis.ConsensusParams - return nil +func (s *LocalService) ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + return &ctypes.ResultConsensusParams{ + BlockHeight: s.vm.blockStore.Height(), + ConsensusParams: *s.vm.genesis.ConsensusParams, + }, nil } -func (s *LocalService) Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error { - *reply = ctypes.ResultHealth{} - return nil +func (s *LocalService) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { + return &ctypes.ResultHealth{}, nil } -func (s *LocalService) UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error { - limit := validatePerPage(args.Limit) +func (s *LocalService) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { + limit := validatePerPage(limitPtr) txs := s.vm.mempool.ReapMaxTxs(limit) - reply.Count = len(txs) - reply.Total = s.vm.mempool.Size() - reply.Txs = txs - return nil + return &ctypes.ResultUnconfirmedTxs{ + Count: len(txs), + Total: s.vm.mempool.Size(), + Txs: txs, + }, nil } -func (s *LocalService) NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error { - reply.Count = s.vm.mempool.Size() - reply.Total = s.vm.mempool.Size() - reply.TotalBytes = s.vm.mempool.TxsBytes() - return nil +func (s *LocalService) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { + return &ctypes.ResultUnconfirmedTxs{ + Count: s.vm.mempool.Size(), + Total: s.vm.mempool.Size(), + TotalBytes: s.vm.mempool.TxsBytes()}, nil } -func (s *LocalService) CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error { - res, err := s.vm.app.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: args.Tx}) +func (s *LocalService) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + res, err := s.vm.app.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { - return err + return nil, err } - reply.ResponseCheckTx = *res - return nil + return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil } diff --git a/vm/service_test.go b/vm/service_test.go index 6b1651c2d..50896bbfb 100644 --- a/vm/service_test.go +++ b/vm/service_test.go @@ -2,23 +2,59 @@ package vm import ( "context" + "encoding/base64" + "errors" "fmt" + "strings" "testing" "time" - atypes "github.com/consideritdone/landslidecore/abci/types" + "github.com/ava-labs/avalanchego/snow/engine/common" + tmjson "github.com/consideritdone/landslidecore/libs/json" ctypes "github.com/consideritdone/landslidecore/rpc/core/types" + rpctypes "github.com/consideritdone/landslidecore/rpc/jsonrpc/types" + "github.com/consideritdone/landslidecore/types" + "golang.org/x/sync/errgroup" + + atypes "github.com/consideritdone/landslidecore/abci/types" "github.com/davecgh/go-spew/spew" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func broadcastTx(t *testing.T, v *VM, msgs chan common.Message, tx []byte) (*ctypes.ResultBroadcastTxCommit, error) { + var result *ctypes.ResultBroadcastTxCommit + wg := new(errgroup.Group) + wg.Go(func() error { + select { + case <-msgs: + t.Logf("found new txs in engine") + block, err := v.BuildBlock(context.Background()) + if err != nil { + return err + } + return block.Accept(context.Background()) + case <-time.After(time.Minute): + return errors.New("timeout. no txs") + } + }) + wg.Go(func() error { + var err error + result, err = NewService(v).BroadcastTxCommit(&rpctypes.Context{}, tx) + return err + }) + if err := wg.Wait(); err != nil { + return nil, err + } + return result, nil +} + func TestABCIService(t *testing.T) { vm, service, _ := mustNewKVTestVm(t) t.Run("ABCIInfo", func(t *testing.T) { - reply := new(ctypes.ResultABCIInfo) - assert.NoError(t, service.ABCIInfo(nil, nil, reply)) + reply, err := service.ABCIInfo(&rpctypes.Context{}) + require.NoError(t, err) assert.Equal(t, uint64(1), reply.Response.AppVersion) assert.Equal(t, int64(1), reply.Response.LastBlockHeight) assert.Equal(t, []uint8([]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}), reply.Response.LastBlockAppHash) @@ -28,8 +64,8 @@ func TestABCIService(t *testing.T) { t.Run("ABCIQuery", func(t *testing.T) { k, v, tx := MakeTxKV() - replyBroadcast := new(ctypes.ResultBroadcastTx) - require.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{tx}, replyBroadcast)) + _, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) + require.NoError(t, err) blk, err := vm.BuildBlock(context.Background()) require.NoError(t, err) @@ -38,10 +74,9 @@ func TestABCIService(t *testing.T) { err = blk.Accept(context.Background()) require.NoError(t, err) - res := new(ctypes.ResultABCIQuery) - err = service.ABCIQuery(nil, &ABCIQueryArgs{Path: "/key", Data: k}, res) - if assert.Nil(t, err) && assert.True(t, res.Response.IsOK()) { - assert.EqualValues(t, v, res.Response.Value) + reply, err := service.ABCIQuery(&rpctypes.Context{}, "/key", k, 0, false) + if assert.Nil(t, err) && assert.True(t, reply.Response.IsOK()) { + assert.EqualValues(t, v, reply.Response.Value) } spew.Dump(vm.mempool.Size()) }) @@ -69,8 +104,8 @@ func TestABCIService(t *testing.T) { }(ctx) _, _, tx := MakeTxKV() - reply := new(ctypes.ResultBroadcastTxCommit) - assert.NoError(t, service.BroadcastTxCommit(nil, &BroadcastTxArgs{tx}, reply)) + reply, err := service.BroadcastTxCommit(&rpctypes.Context{}, tx) + assert.NoError(t, err) assert.True(t, reply.CheckTx.IsOK()) assert.True(t, reply.DeliverTx.IsOK()) assert.Equal(t, 0, vm.mempool.Size()) @@ -82,8 +117,8 @@ func TestABCIService(t *testing.T) { initMempoolSize := vm.mempool.Size() _, _, tx := MakeTxKV() - reply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxAsync(nil, &BroadcastTxArgs{tx}, reply)) + reply, err := service.BroadcastTxAsync(&rpctypes.Context{}, tx) + assert.NoError(t, err) assert.NotNil(t, reply.Hash) assert.Equal(t, initMempoolSize+1, vm.mempool.Size()) assert.EqualValues(t, tx, vm.mempool.ReapMaxTxs(-1)[0]) @@ -95,19 +130,80 @@ func TestABCIService(t *testing.T) { initMempoolSize := vm.mempool.Size() _, _, tx := MakeTxKV() - reply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: tx}, reply)) + reply, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) + assert.NoError(t, err) assert.Equal(t, reply.Code, atypes.CodeTypeOK) assert.Equal(t, initMempoolSize+1, vm.mempool.Size()) assert.EqualValues(t, tx, vm.mempool.ReapMaxTxs(-1)[0]) }) } +func TestEventService(t *testing.T) { + _, service, _ := mustNewCounterTestVm(t) + + // subscribe to new blocks and make sure height increments by 1 + t.Run("Subscribe", func(t *testing.T) { + events := []string{ + types.QueryForEvent(types.EventNewBlock).String(), + types.QueryForEvent(types.EventNewBlockHeader).String(), + types.QueryForEvent(types.EventValidBlock).String(), + } + + for i, event := range events { + _, err := service.Subscribe(&rpctypes.Context{JSONReq: &rpctypes.RPCRequest{ID: rpctypes.JSONRPCIntID(i)}}, event) + require.NoError(t, err) + } + t.Cleanup(func() { + if _, err := service.UnsubscribeAll(&rpctypes.Context{}); err != nil { + t.Error(err) + } + }) + }) + + t.Run("Unsubscribe", func(t *testing.T) { + events := []string{ + types.QueryForEvent(types.EventNewBlock).String(), + types.QueryForEvent(types.EventNewBlockHeader).String(), + types.QueryForEvent(types.EventValidBlock).String(), + } + + for i, event := range events { + _, err := service.Subscribe(&rpctypes.Context{JSONReq: &rpctypes.RPCRequest{ID: rpctypes.JSONRPCIntID(i)}}, event) + require.NoError(t, err) + _, err = service.Unsubscribe(&rpctypes.Context{}, event) + require.NoError(t, err) + } + //TODO: investigate the need to use Cleanup with UnsubscribeAll + //t.Cleanup(func() { + // if _, err := service.UnsubscribeAll(&rpctypes.Context{}); err != nil { + // t.Error(err) + // } + //}) + }) + + t.Run("UnsubscribeAll", func(t *testing.T) { + events := []string{ + types.QueryForEvent(types.EventNewBlock).String(), + types.QueryForEvent(types.EventNewBlockHeader).String(), + types.QueryForEvent(types.EventValidBlock).String(), + } + + for i, event := range events { + _, err := service.Subscribe(&rpctypes.Context{JSONReq: &rpctypes.RPCRequest{ID: rpctypes.JSONRPCIntID(i)}}, event) + require.NoError(t, err) + } + _, err := service.UnsubscribeAll(&rpctypes.Context{}) + if err != nil { + t.Error(err) + } + }) +} + func TestHistoryService(t *testing.T) { vm, service, _ := mustNewCounterTestVm(t) - txReply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: []byte{0x00}}, txReply)) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x00}) + assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) blk, err := vm.BuildBlock(context.Background()) @@ -115,44 +211,63 @@ func TestHistoryService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - t.Run("BlockchainInfo", func(t *testing.T) { - reply := new(ctypes.ResultBlockchainInfo) - assert.NoError(t, service.BlockchainInfo(nil, &BlockchainInfoArgs{1, 100}, reply)) - assert.Equal(t, int64(2), reply.LastHeight) - }) - t.Run("Genesis", func(t *testing.T) { - reply := new(ctypes.ResultGenesis) - assert.NoError(t, service.Genesis(nil, nil, reply)) + reply, err := service.Genesis(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, vm.genesis, reply.Genesis) }) + + t.Run("GenesisChunked", func(t *testing.T) { + first, err := service.GenesisChunked(&rpctypes.Context{}, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := service.GenesisChunked(&rpctypes.Context{}, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) + + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, tmjson.Unmarshal(doc, &out), "first: %+v, doc: %s", first, string(doc)) + }) + + t.Run("BlockchainInfo", func(t *testing.T) { + reply, err := service.BlockchainInfo(&rpctypes.Context{}, 1, 100) + assert.NoError(t, err) + assert.Equal(t, int64(2), reply.LastHeight) + }) } func TestNetworkService(t *testing.T) { vm, service, _ := mustNewCounterTestVm(t) t.Run("NetInfo", func(t *testing.T) { - reply := new(ctypes.ResultNetInfo) - assert.NoError(t, service.NetInfo(nil, nil, reply)) + _, err := service.NetInfo(&rpctypes.Context{}) + assert.NoError(t, err) }) t.Run("DumpConsensusState", func(t *testing.T) { - reply := new(ctypes.ResultDumpConsensusState) - assert.NoError(t, service.DumpConsensusState(nil, nil, reply)) + _, err := service.DumpConsensusState(&rpctypes.Context{}) + assert.NoError(t, err) }) t.Run("ConsensusState", func(t *testing.T) { - reply := new(ctypes.ResultConsensusState) - assert.NoError(t, service.ConsensusState(nil, nil, reply)) + _, err := service.ConsensusState(&rpctypes.Context{}) + assert.NoError(t, err) }) t.Run("ConsensusParams", func(t *testing.T) { - reply := new(ctypes.ResultConsensusParams) - assert.NoError(t, service.ConsensusParams(nil, nil, reply)) + reply, err := service.ConsensusParams(&rpctypes.Context{}, nil) + assert.NoError(t, err) assert.Equal(t, int64(1), reply.BlockHeight) - txReply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: []byte{0x00}}, txReply)) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x00}) + assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) blk, err := vm.BuildBlock(context.Background()) @@ -160,27 +275,28 @@ func TestNetworkService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - assert.NoError(t, service.ConsensusParams(nil, nil, reply)) - assert.Equal(t, int64(2), reply.BlockHeight) + reply2, err := service.ConsensusParams(&rpctypes.Context{}, nil) + assert.NoError(t, err) + assert.Equal(t, int64(2), reply2.BlockHeight) }) t.Run("Health", func(t *testing.T) { - reply := new(ctypes.ResultHealth) - assert.NoError(t, service.Health(nil, nil, reply)) + _, err := service.Health(&rpctypes.Context{}) + assert.NoError(t, err) }) } func TestSignService(t *testing.T) { _, _, tx := MakeTxKV() - vm, service, _ := mustNewKVTestVm(t) + tx2 := []byte{0x02} + tx3 := []byte{0x03} + vm, service, msgs := mustNewKVTestVm(t) blk0, err := vm.BuildBlock(context.Background()) assert.ErrorIs(t, err, errNoPendingTxs, "expecting error no txs") assert.Nil(t, blk0) - txArg := &BroadcastTxArgs{tx} - txReply := new(ctypes.ResultBroadcastTx) - err = service.BroadcastTxSync(nil, txArg, txReply) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) @@ -192,40 +308,39 @@ func TestSignService(t *testing.T) { height1 := int64(blk1.Height()) t.Run("Block", func(t *testing.T) { - replyWithoutHeight := new(ctypes.ResultBlock) - assert.NoError(t, service.Block(nil, &BlockHeightArgs{&height1}, replyWithoutHeight)) + replyWithoutHeight, err := service.Block(&rpctypes.Context{}, &height1) + assert.NoError(t, err) if assert.NotNil(t, replyWithoutHeight.Block) { assert.EqualValues(t, height1, replyWithoutHeight.Block.Height) } - reply := new(ctypes.ResultBlock) - assert.NoError(t, service.Block(nil, &BlockHeightArgs{Height: &height1}, reply)) + reply, err := service.Block(&rpctypes.Context{}, &height1) + assert.NoError(t, err) if assert.NotNil(t, reply.Block) { assert.EqualValues(t, height1, reply.Block.Height) } }) t.Run("BlockByHash", func(t *testing.T) { - replyWithoutHash := new(ctypes.ResultBlock) - assert.NoError(t, service.BlockByHash(nil, &BlockHashArgs{}, replyWithoutHash)) + replyWithoutHash, err := service.BlockByHash(&rpctypes.Context{}, []byte{}) + assert.NoError(t, err) assert.Nil(t, replyWithoutHash.Block) - reply := new(ctypes.ResultBlock) hash := blk1.ID() - - assert.NoError(t, service.BlockByHash(nil, &BlockHashArgs{Hash: hash[:]}, reply)) + reply, err := service.BlockByHash(&rpctypes.Context{}, hash[:]) + assert.NoError(t, err) if assert.NotNil(t, reply.Block) { assert.EqualValues(t, hash[:], reply.Block.Hash().Bytes()) } }) t.Run("BlockResults", func(t *testing.T) { - replyWithoutHeight := new(ctypes.ResultBlockResults) - assert.NoError(t, service.BlockResults(nil, &BlockHeightArgs{}, replyWithoutHeight)) + replyWithoutHeight, err := service.BlockResults(&rpctypes.Context{}, nil) + assert.NoError(t, err) assert.Equal(t, height1, replyWithoutHeight.Height) - reply := new(ctypes.ResultBlockResults) - assert.NoError(t, service.BlockResults(nil, &BlockHeightArgs{Height: &height1}, reply)) + reply, err := service.BlockResults(&rpctypes.Context{}, &height1) + assert.NoError(t, err) if assert.NotNil(t, reply.TxsResults) { assert.Equal(t, height1, reply.Height) } @@ -234,21 +349,133 @@ func TestSignService(t *testing.T) { t.Run("Tx", func(t *testing.T) { time.Sleep(2 * time.Second) - reply := new(ctypes.ResultTx) - assert.NoError(t, service.Tx(nil, &TxArgs{Hash: txReply.Hash.Bytes()}, reply)) + reply, err := service.Tx(&rpctypes.Context{}, txReply.Hash.Bytes(), false) + assert.NoError(t, err) assert.EqualValues(t, txReply.Hash, reply.Hash) assert.EqualValues(t, tx, reply.Tx) }) t.Run("TxSearch", func(t *testing.T) { - reply := new(ctypes.ResultTxSearch) - assert.NoError(t, service.TxSearch(nil, &TxSearchArgs{Query: fmt.Sprintf("tx.hash='%s'", txReply.Hash)}, reply)) + txReply2, err := service.BroadcastTxAsync(&rpctypes.Context{}, tx2) + assert.NoError(t, err) + assert.Equal(t, atypes.CodeTypeOK, txReply2.Code) + + blk2, err := vm.BuildBlock(context.Background()) + require.NoError(t, err) + assert.NotNil(t, blk2) + assert.NoError(t, blk2.Accept(context.Background())) + + time.Sleep(time.Second) + + reply, err := service.TxSearch(&rpctypes.Context{}, fmt.Sprintf("tx.hash='%s'", txReply2.Hash), false, nil, nil, "asc") + assert.NoError(t, err) assert.True(t, len(reply.Txs) > 0) + + // TODO: need to fix + // reply2, err := service.TxSearch(&rpctypes.Context{}, fmt.Sprintf("tx.height=%d", blk2.Height()), false, nil, nil, "desc") + // assert.NoError(t, err) + // assert.True(t, len(reply2.Txs) > 0) + }) + + //TODO: Check logic of test + t.Run("Commit", func(t *testing.T) { + txReply, err := service.BroadcastTxAsync(&rpctypes.Context{}, tx3) + require.NoError(t, err) + assert.Equal(t, atypes.CodeTypeOK, txReply.Code) + + assert, require := assert.New(t), require.New(t) + + // get an offset of height to avoid racing and guessing + s, err := service.Status(&rpctypes.Context{}) + require.NoError(err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = service.Block(&rpctypes.Context{}, &h) + require.Error(err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := broadcastTx(t, vm, msgs, tx) + require.NoError(err) + require.True(bres.DeliverTx.IsOK()) + time.Sleep(2 * time.Second) + + txh := bres.Height + apph := txh + + // wait before querying + err = WaitForHeight(service, apph, nil) + require.NoError(err) + + qres, err := service.ABCIQuery(&rpctypes.Context{}, "/key", k, 0, false) + require.NoError(err) + if assert.True(qres.Response.IsOK()) { + assert.Equal(k, qres.Response.Key) + assert.EqualValues(v, qres.Response.Value) + } + + // make sure we can lookup the tx with proof + ptx, err := service.Tx(&rpctypes.Context{}, bres.Hash, true) + require.NoError(err) + assert.EqualValues(txh, ptx.Height) + assert.EqualValues(tx, ptx.Tx) + + // and we can even check the block is added + block, err := service.Block(&rpctypes.Context{}, &apph) + require.NoError(err) + appHash := block.Block.Header.AppHash + assert.True(len(appHash) > 0) + assert.EqualValues(apph, block.Block.Header.Height) + + blockByHash, err := service.BlockByHash(&rpctypes.Context{}, block.BlockID.Hash) + require.NoError(err) + require.Equal(block, blockByHash) + + // now check the results + blockResults, err := service.BlockResults(&rpctypes.Context{}, &txh) + require.Nil(err, "%+v", err) + assert.Equal(txh, blockResults.Height) + if assert.Equal(2, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(0, blockResults.TxsResults[0].Code) + } + + // check blockchain info, now that we know there is info + info, err := service.BlockchainInfo(&rpctypes.Context{}, apph, apph) + require.NoError(err) + assert.True(info.LastHeight >= apph) + if assert.Equal(1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(block.BlockID, lastMeta.BlockID) + } + + // and get the corresponding commit with the same apphash + commit, err := service.Commit(&rpctypes.Context{}, &apph) + require.NoError(err) + assert.NotNil(commit) + assert.Equal(appHash, commit.Header.AppHash) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := service.Commit(&rpctypes.Context{}, &h) + require.NoError(err) + assert.Equal(block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + pres, err := service.ABCIQuery(&rpctypes.Context{}, "/key", k, 0, true) + require.NoError(err) + assert.True(pres.Response.IsOK()) }) t.Run("BlockSearch", func(t *testing.T) { - reply := new(ctypes.ResultBlockSearch) - assert.NoError(t, service.BlockSearch(nil, &BlockSearchArgs{Query: "block.height=2"}, reply)) + reply, err := service.BlockSearch(&rpctypes.Context{}, "block.height=2", nil, nil, "desc") + assert.NoError(t, err) assert.True(t, len(reply.Blocks) > 0) }) } @@ -260,17 +487,13 @@ func TestStatusService(t *testing.T) { assert.ErrorIs(t, err, errNoPendingTxs, "expecting error no txs") assert.Nil(t, blk0) - txArg := &BroadcastTxArgs{ - Tx: []byte{0x01}, - } - txReply := &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, txArg, txReply) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x01}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) t.Run("Status", func(t *testing.T) { - reply1 := new(ctypes.ResultStatus) - assert.NoError(t, service.Status(nil, nil, reply1)) + reply1, err := service.Status(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, int64(1), reply1.SyncInfo.LatestBlockHeight) blk, err := vm.BuildBlock(context.Background()) @@ -278,8 +501,8 @@ func TestStatusService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - reply2 := new(ctypes.ResultStatus) - assert.NoError(t, service.Status(nil, nil, reply2)) + reply2, err := service.Status(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, int64(2), reply2.SyncInfo.LatestBlockHeight) }) } @@ -291,32 +514,31 @@ func TestMempoolService(t *testing.T) { assert.ErrorIs(t, err, errNoPendingTxs, "expecting error no txs") assert.Nil(t, blk0) - txArg := &BroadcastTxArgs{ - Tx: []byte{0x01}, - } - txReply := &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, txArg, txReply) + tx := []byte{0x01} + expectedTx := types.Tx(tx) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x01}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) t.Run("UnconfirmedTxs", func(t *testing.T) { limit := 100 - reply := new(ctypes.ResultUnconfirmedTxs) - assert.NoError(t, service.UnconfirmedTxs(nil, &UnconfirmedTxsArgs{Limit: &limit}, reply)) + reply, err := service.UnconfirmedTxs(&rpctypes.Context{}, &limit) + assert.NoError(t, err) assert.True(t, len(reply.Txs) == 1) - assert.Equal(t, reply.Txs[0], txArg.Tx) + assert.Equal(t, expectedTx, reply.Txs[0]) }) t.Run("NumUnconfirmedTxs", func(t *testing.T) { - reply := new(ctypes.ResultUnconfirmedTxs) - assert.NoError(t, service.NumUnconfirmedTxs(nil, nil, reply)) + reply, err := service.NumUnconfirmedTxs(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, reply.Count, 1) assert.Equal(t, reply.Total, 1) }) t.Run("CheckTx", func(t *testing.T) { - reply1 := new(ctypes.ResultCheckTx) - assert.NoError(t, service.CheckTx(nil, &CheckTxArgs{Tx: txArg.Tx}, reply1)) + reply1, err := service.CheckTx(&rpctypes.Context{}, tx) + assert.NoError(t, err) + t.Logf("%v\n", reply1) // ToDo: check reply1 blk, err := vm.BuildBlock(context.Background()) @@ -324,8 +546,9 @@ func TestMempoolService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - reply2 := new(ctypes.ResultCheckTx) - assert.NoError(t, service.CheckTx(nil, &CheckTxArgs{Tx: txArg.Tx}, reply2)) + reply2, err := service.CheckTx(&rpctypes.Context{}, tx) + assert.NoError(t, err) // ToDo: check reply2 + t.Logf("%v\n", reply2) }) } diff --git a/vm/vm.go b/vm/vm.go index 3e5b9140b..366117152 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -2,13 +2,11 @@ package vm import ( "context" + "encoding/base64" "errors" "fmt" - "net/http" "time" - "github.com/gorilla/rpc/v2" - "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/database/manager" @@ -21,19 +19,18 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/version" abciTypes "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/consensus" + "github.com/consideritdone/landslidecore/crypto/secp256k1" "github.com/consideritdone/landslidecore/crypto/tmhash" "github.com/consideritdone/landslidecore/libs/log" mempl "github.com/consideritdone/landslidecore/mempool" "github.com/consideritdone/landslidecore/node" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/proxy" - rpccore "github.com/consideritdone/landslidecore/rpc/core" rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server" "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/state/indexer" @@ -76,6 +73,7 @@ var ( dbPrefixBlockIndexer = []byte("block-indexer") proposerAddress = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + proposerPubKey = secp256k1.PubKey{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} errInvalidBlock = errors.New("invalid block") errNoPendingTxs = errors.New("there is no txs to include to block") @@ -88,6 +86,8 @@ type ( appCreator AppCreator app proxy.AppConns + rpcConfig *config.RPCConfig + log log.Logger chainCtx *snow.Context toEngine chan<- common.Message @@ -96,6 +96,7 @@ type ( stateStore state.Store state state.State genesis *types.GenesisDoc + genChunks []string mempool *mempl.CListMempool eventBus *types.EventBus @@ -308,6 +309,7 @@ func (vm *VM) Initialize( vm.toEngine = toEngine vm.log = log.NewTMLogger(vm.chainCtx.Log).With("module", "vm") vm.verifiedBlocks = make(map[ids.ID]*Block) + vm.rpcConfig = config.DefaultRPCConfig() db := dbManager.Current().Database @@ -329,6 +331,13 @@ func (vm *VM) Initialize( if err != nil { return nil } + for i := 0; i < len(genesisBytes); i += genesisChunkSize { + end := i + genesisChunkSize + if end > len(genesisBytes) { + end = len(genesisBytes) + } + vm.genChunks = append(vm.genChunks, base64.StdEncoding.EncodeToString(genesisBytes[i:end])) + } vm.app, err = node.CreateAndStartProxyAppConns(proxy.NewLocalClientCreator(app), vm.log) if err != nil { @@ -496,21 +505,13 @@ func (vm *VM) CreateStaticHandlers(context.Context) (map[string]*common.HTTPHand // it have an extension called `accounts`, where clients could get // information about their accounts. func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, error) { - mux := http.NewServeMux() - rpcLogger := vm.log.With("module", "rpc-server") - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) - - server := rpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(NewService(vm), Name); err != nil { - return nil, err - } - return map[string]*common.HTTPHandler{ "/rpc": { LockOptions: common.WriteLock, - Handler: server, + Handler: rpcserver.MakeJSONRPCHandler( + NewServiceAsRPCRoutes(vm), + vm.log.With("module", "rpc"), + ), }, }, nil } diff --git a/vm/vm_test.go b/vm/vm_test.go index 212eb7513..06b92ba91 100644 --- a/vm/vm_test.go +++ b/vm/vm_test.go @@ -21,7 +21,6 @@ import ( "github.com/consideritdone/landslidecore/abci/example/counter" atypes "github.com/consideritdone/landslidecore/abci/types" tmrand "github.com/consideritdone/landslidecore/libs/rand" - ctypes "github.com/consideritdone/landslidecore/rpc/core/types" ) var ( @@ -99,11 +98,7 @@ func TestInitVm(t *testing.T) { assert.Nil(t, blk0) // submit first tx (0x00) - args := &BroadcastTxArgs{ - Tx: []byte{0x00}, - } - reply := &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, args, reply) + reply, err := service.BroadcastTxSync(nil, []byte{0x00}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, reply.Code) @@ -128,20 +123,12 @@ func TestInitVm(t *testing.T) { t.Logf("TM Block Tx count: %d", len(tmBlk1.Data.Txs)) // submit second tx (0x01) - args = &BroadcastTxArgs{ - Tx: []byte{0x01}, - } - reply = &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, args, reply) + reply, err = service.BroadcastTxSync(nil, []byte{0x01}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, reply.Code) // submit 3rd tx (0x02) - args = &BroadcastTxArgs{ - Tx: []byte{0x02}, - } - reply = &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, args, reply) + reply, err = service.BroadcastTxSync(nil, []byte{0x02}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, reply.Code) From 118d1aae5b47f6c325e2b411942e7033918857a2 Mon Sep 17 00:00:00 2001 From: Ilnur Date: Mon, 15 Jan 2024 17:48:06 +0400 Subject: [PATCH 14/14] change NodeInfo filling (#79) --- vm/service.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vm/service.go b/vm/service.go index de91d4531..6cdcb4944 100644 --- a/vm/service.go +++ b/vm/service.go @@ -727,8 +727,13 @@ func (s *LocalService) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, erro result := &ctypes.ResultStatus{ NodeInfo: p2p.DefaultNodeInfo{ - DefaultNodeID: p2p.ID(s.vm.chainCtx.NodeID.String()), + DefaultNodeID: p2p.ID(fmt.Sprintf("%x", s.vm.chainCtx.NodeID.Bytes())), + ListenAddr: fmt.Sprintf("/ext/bc/%s/rpc", s.vm.chainCtx.ChainID.String()), Network: fmt.Sprintf("%d", s.vm.chainCtx.NetworkID), + // TODO: correct data + Channels: []byte("channels"), + // TODO: correct data + Moniker: "moniker", }, SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash,