diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 7a4da6484..0af9ed990 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -5,9 +5,9 @@ import ( "encoding/binary" "encoding/json" "fmt" + "github.com/ava-labs/avalanchego/database/memdb" - dbm "github.com/tendermint/tm-db" - + "github.com/ava-labs/avalanchego/database" "github.com/consideritdone/landslidecore/abci/example/code" "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/version" @@ -21,16 +21,16 @@ var ( ) type State struct { - db dbm.DB + db database.Database Size int64 `json:"size"` Height int64 `json:"height"` AppHash []byte `json:"app_hash"` } -func loadState(db dbm.DB) State { +func loadState(database database.Database) State { var state State - state.db = db - stateBytes, err := db.Get(stateKey) + state.db = database + stateBytes, err := database.Get(stateKey) if err != nil { panic(err) } @@ -49,7 +49,7 @@ func saveState(state State) { if err != nil { panic(err) } - err = state.db.Set(stateKey, stateBytes) + err = state.db.Put(stateKey, stateBytes) if err != nil { panic(err) } @@ -71,7 +71,7 @@ type Application struct { } func NewApplication() *Application { - state := loadState(dbm.NewMemDB()) + state := loadState(memdb.New()) return &Application{state: state} } @@ -95,7 +95,7 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli key, value = req.Tx, req.Tx } - err := app.state.db.Set(prefixKey(key), value) + err := app.state.db.Put(prefixKey(key), value) if err != nil { panic(err) } diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index fdfc71784..d02900612 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -4,11 +4,12 @@ import ( "bytes" "encoding/base64" "fmt" + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/prometheus/client_golang/prometheus" "strconv" "strings" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/abci/example/code" "github.com/consideritdone/landslidecore/abci/types" cryptoenc "github.com/consideritdone/landslidecore/crypto/encoding" @@ -37,7 +38,8 @@ type PersistentKVStoreApplication struct { func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication { name := "kvstore" - db, err := dbm.NewGoLevelDB(name, dbDir) + logger := logging.NewLogger(name) + db, err := leveldb.New(dbDir, []byte{}, logger, name, prometheus.NewRegistry()) if err != nil { panic(err) } @@ -174,11 +176,8 @@ func (app *PersistentKVStoreApplication) ApplySnapshotChunk( // update validators func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) { - itr, err := app.app.state.db.Iterator(nil, nil) - if err != nil { - panic(err) - } - for ; itr.Valid(); itr.Next() { + itr := app.app.state.db.NewIterator() + for ; itr.Error() == nil && len(itr.Key()) > 0; itr.Next() { if isValidatorTx(itr.Key()) { validator := new(types.ValidatorUpdate) err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator) @@ -188,7 +187,7 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida validators = append(validators, *validator) } } - if err = itr.Error(); err != nil { + if err := itr.Error(); err != nil { panic(err) } return @@ -273,7 +272,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Error encoding validator: %v", err)} } - if err = app.app.state.db.Set(key, value.Bytes()); err != nil { + if err = app.app.state.db.Put(key, value.Bytes()); err != nil { panic(err) } app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index ad7f1602b..6efce6aab 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -2,6 +2,7 @@ package v0 import ( "fmt" + "github.com/ava-labs/avalanchego/database/memdb" "os" "sort" "testing" @@ -10,8 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/libs/log" @@ -68,8 +67,8 @@ func newBlockchainReactor( panic(fmt.Errorf("error start app: %w", err)) } - blockDB := dbm.NewMemDB() - stateDB := dbm.NewMemDB() + blockDB := memdb.New() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) @@ -82,7 +81,7 @@ func newBlockchainReactor( // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. fastSync := true - db := dbm.NewMemDB() + db := memdb.New() stateStore = sm.NewStore(db) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index ccba8f003..573902ab0 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -2,6 +2,7 @@ package v1 import ( "fmt" + "github.com/ava-labs/avalanchego/database/memdb" "os" "sort" "sync" @@ -11,8 +12,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/libs/log" @@ -100,8 +99,8 @@ func newBlockchainReactor( panic(fmt.Errorf("error start app: %w", err)) } - blockDB := dbm.NewMemDB() - stateDB := dbm.NewMemDB() + blockDB := memdb.New() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) @@ -114,7 +113,7 @@ func newBlockchainReactor( // NOTE we have to create and commit the blocks first because // pool.height is determined from the store. fastSync := true - db := dbm.NewMemDB() + db := memdb.New() stateStore = sm.NewStore(db) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 5218e71e5..c54c1a080 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -9,10 +9,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/behaviour" bc "github.com/consideritdone/landslidecore/blockchain" @@ -28,6 +24,8 @@ import ( "github.com/consideritdone/landslidecore/store" "github.com/consideritdone/landslidecore/types" tmtime "github.com/consideritdone/landslidecore/types/time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type mockPeer struct { @@ -158,7 +156,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { if err != nil { panic(fmt.Errorf("error start app: %w", err)) } - db := dbm.NewMemDB() + db := memdb.New() stateStore := sm.NewStore(db) appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) if err = stateStore.Save(state); err != nil { @@ -502,15 +500,15 @@ func newReactorStore( panic(fmt.Errorf("error start app: %w", err)) } - stateDB := dbm.NewMemDB() - blockStore := store.NewBlockStore(dbm.NewMemDB()) + stateDB := memdb.New() + blockStore := store.NewBlockStore(memdb.New()) stateStore := sm.NewStore(stateDB) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } - db := dbm.NewMemDB() + db := memdb.New() stateStore = sm.NewStore(db) blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index f3221dbae..1dbe27326 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -5,6 +5,10 @@ import ( "context" "errors" "fmt" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/prometheus/client_golang/prometheus" "net/http" "os" "path/filepath" @@ -13,8 +17,6 @@ import ( "github.com/spf13/cobra" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/libs/log" tmmath "github.com/consideritdone/landslidecore/libs/math" tmos "github.com/consideritdone/landslidecore/libs/os" @@ -119,7 +121,9 @@ func runProxy(cmd *cobra.Command, args []string) error { witnessesAddrs = strings.Split(witnessAddrsJoined, ",") } - db, err := dbm.NewGoLevelDB("light-client-db", home) + dbName := "light-client-db" + dbLogger := logging.NewLogger(dbName) + db, err := leveldb.New(home, []byte{}, dbLogger, dbName, prometheus.NewRegistry()) if err != nil { return fmt.Errorf("can't create a db: %w", err) } @@ -231,7 +235,7 @@ func runProxy(cmd *cobra.Command, args []string) error { return nil } -func checkForExistingProviders(db dbm.DB) (string, []string, error) { +func checkForExistingProviders(db database.Database) (string, []string, error) { primaryBytes, err := db.Get(primaryKey) if err != nil { return "", []string{""}, err @@ -244,12 +248,12 @@ func checkForExistingProviders(db dbm.DB) (string, []string, error) { return string(primaryBytes), witnessesAddrs, nil } -func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error { - err := db.Set(primaryKey, []byte(primaryAddr)) +func saveProviders(db database.Database, primaryAddr, witnessesAddrs string) error { + err := db.Put(primaryKey, []byte(primaryAddr)) if err != nil { return fmt.Errorf("failed to save primary provider: %w", err) } - err = db.Set(witnessesKey, []byte(witnessesAddrs)) + err = db.Put(witnessesKey, []byte(witnessesAddrs)) if err != nil { return fmt.Errorf("failed to save witness providers: %w", err) } diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go index cf371e927..a2d1af110 100644 --- a/cmd/tendermint/commands/rollback.go +++ b/cmd/tendermint/commands/rollback.go @@ -2,16 +2,18 @@ package commands import ( "fmt" + "github.com/ava-labs/avalanchego/database" "github.com/spf13/cobra" - dbm "github.com/tendermint/tm-db" - cfg "github.com/consideritdone/landslidecore/config" + landslidedb "github.com/consideritdone/landslidecore/database" "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/store" ) +type dbCreator func(name string, dir string) (database.Database, error) + var RollbackStateCmd = &cobra.Command{ Use: "rollback", Short: "rollback tendermint state by one height", @@ -53,20 +55,21 @@ func RollbackState(config *cfg.Config) (int64, []byte, error) { } func loadStateAndBlockStore(config *cfg.Config) (*store.BlockStore, state.Store, error) { - dbType := dbm.BackendType(config.DBBackend) - - // Get BlockStore - blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDBName := "blockstore" + blockStoreDB, err := landslidedb.NewDB(blockStoreDBName, config.DBBackend, config.DBDir()) if err != nil { return nil, nil, err } - blockStore := store.NewBlockStore(blockStoreDB) - - // Get StateStore - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDBName := "state" + stateDB, err := landslidedb.NewDB(stateDBName, config.DBBackend, config.DBDir()) if err != nil { return nil, nil, err } + + // Get BlockStore + blockStore := store.NewBlockStore(blockStoreDB) + + // Get StateStore stateStore := state.NewStore(stateDB) return blockStore, stateStore, nil diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 6003f7770..7de2293de 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -12,8 +12,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abcicli "github.com/consideritdone/landslidecore/abci/client" abci "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/evidence" @@ -45,7 +43,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for i := 0; i < nValidators; i++ { logger := consensusLogger().With("test", "byzantine", "validator", i) - stateDB := dbm.NewMemDB() // each state needs its own db + stateDB := memdb.New() // each state needs its own db stateStore := sm.NewStore(stateDB) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) @@ -55,7 +53,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) - blockDB := dbm.NewMemDB() + blockDB := memdb.New() blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus @@ -71,7 +69,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { } // Make a full instance of the evidence pool - evidenceDB := dbm.NewMemDB() + evidenceDB := memdb.New() evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) require.NoError(t, err) evpool.SetLogger(logger.With("module", "evidence")) diff --git a/consensus/common_test.go b/consensus/common_test.go index 2eb53c9eb..d0cb5887f 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "github.com/ava-labs/avalanchego/database" "io/ioutil" "os" "path/filepath" @@ -17,8 +18,6 @@ import ( "path" - dbm "github.com/tendermint/tm-db" - abcicli "github.com/consideritdone/landslidecore/abci/client" "github.com/consideritdone/landslidecore/abci/example/counter" "github.com/consideritdone/landslidecore/abci/example/kvstore" @@ -374,7 +373,7 @@ func newStateWithConfig( pv types.PrivValidator, app abci.Application, ) *State { - blockDB := dbm.NewMemDB() + blockDB := memdb.New() return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) } @@ -383,7 +382,7 @@ func newStateWithConfigAndBlockStore( state sm.State, pv types.PrivValidator, app abci.Application, - blockDB dbm.DB, + blockDB database.Database, ) *State { // Get BlockStore blockStore := store.NewBlockStore(blockDB) @@ -693,7 +692,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou logger := consensusLogger() configRootDirs := make([]string, 0, nValidators) for i := 0; i < nValidators; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db + stateDB := memdb.New() // each state needs its own db stateStore := sm.NewStore(stateDB) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) @@ -731,7 +730,7 @@ func randConsensusNetWithPeers( var peer0Config *cfg.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db + stateDB := memdb.New() // each state needs its own db stateStore := sm.NewStore(stateDB) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index a70899b0f..c2be7e337 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/abci/example/code" abci "github.com/consideritdone/landslidecore/abci/types" mempl "github.com/consideritdone/landslidecore/mempool" @@ -112,7 +110,7 @@ func deliverTxsRange(cs *State, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, false, 10) - blockDB := dbm.NewMemDB() + blockDB := memdb.New() stateStore := sm.NewStore(blockDB) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) err := stateStore.Save(state) @@ -137,7 +135,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() - blockDB := dbm.NewMemDB() + blockDB := memdb.New() stateStore := sm.NewStore(blockDB) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) err := stateStore.Save(state) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index dc4b7139c..e294916de 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -15,8 +15,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abcicli "github.com/consideritdone/landslidecore/abci/client" "github.com/consideritdone/landslidecore/abci/example/kvstore" abci "github.com/consideritdone/landslidecore/abci/types" @@ -135,7 +133,7 @@ func TestReactorWithEvidence(t *testing.T) { css := make([]*State, nValidators) logger := consensusLogger() for i := 0; i < nValidators; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db + stateDB := memdb.New() // each state needs its own db stateStore := sm.NewStore(stateDB) state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) @@ -149,7 +147,7 @@ func TestReactorWithEvidence(t *testing.T) { // duplicate code from: // css[i] = newStateWithConfig(thisConfig, state, privVals[i], app) - blockDB := dbm.NewMemDB() + blockDB := memdb.New() blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus diff --git a/consensus/replay_file.go b/consensus/replay_file.go index bd514348e..e478a4961 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -5,13 +5,12 @@ import ( "context" "errors" "fmt" + "github.com/consideritdone/landslidecore/database" "io" "os" "strconv" "strings" - dbm "github.com/tendermint/tm-db" - cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/libs/log" tmos "github.com/consideritdone/landslidecore/libs/os" @@ -284,16 +283,15 @@ func (pb *playback) replayConsoleLoop() int { // convenience for replay mode func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { - dbType := dbm.BackendType(config.DBBackend) // Get BlockStore - blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDB, err := database.NewDB("blockstore", config.DBBackend, config.DBDir()) if err != nil { tmos.Exit(err.Error()) } blockStore := store.NewBlockStore(blockStoreDB) // Get State - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := database.NewDB("state", config.DBBackend, config.DBDir()) if err != nil { tmos.Exit(err.Error()) } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 4b5fe8a82..5ace778eb 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "fmt" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "io" "io/ioutil" "os" @@ -13,11 +15,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/abci/example/kvstore" abci "github.com/consideritdone/landslidecore/abci/types" cfg "github.com/consideritdone/landslidecore/config" @@ -32,6 +29,9 @@ import ( "github.com/consideritdone/landslidecore/proxy" sm "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/types" + "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMain(m *testing.M) { @@ -65,7 +65,7 @@ func TestMain(m *testing.M) { // wal writer when we need to, instead of with every message. func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, - lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { + lastBlockHeight int64, blockDB database.Database, stateStore sm.Store) { logger := log.TestingLogger() state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) @@ -123,14 +123,14 @@ func sendTxs(ctx context.Context, cs *State) { func TestWALCrash(t *testing.T) { testCases := []struct { name string - initFn func(dbm.DB, *State, context.Context) + initFn func(database.Database, *State, context.Context) heightToStop int64 }{ {"empty block", - func(stateDB dbm.DB, cs *State, ctx context.Context) {}, + func(stateDB database.Database, cs *State, ctx context.Context) {}, 1}, {"many non-empty blocks", - func(stateDB dbm.DB, cs *State, ctx context.Context) { + func(stateDB database.Database, cs *State, ctx context.Context) { go sendTxs(ctx, cs) }, 3}, @@ -146,7 +146,7 @@ func TestWALCrash(t *testing.T) { } func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, - initFn func(dbm.DB, *State, context.Context), heightToStop int64) { + initFn func(database.Database, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -157,7 +157,7 @@ LOOP: // create consensus state from a clean slate logger := log.NewNopLogger() - blockDB := dbm.NewMemDB() + blockDB := memdb.New() stateDB := blockDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) @@ -654,12 +654,12 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin var chain []*types.Block var commits []*types.Commit var store *mockBlockStore - var stateDB dbm.DB + var stateDB database.Database var genesisState sm.State if testValidatorsChange { testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) defer os.RemoveAll(testConfig.RootDir) - stateDB = dbm.NewMemDB() + stateDB = memdb.New() genesisState = sim.GenesisState config = sim.Config @@ -711,7 +711,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) - stateDB1 := dbm.NewMemDB() + stateDB1 := memdb.New() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) @@ -1146,8 +1146,8 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { func stateAndStore( config *cfg.Config, pubKey crypto.PubKey, - appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { - stateDB := dbm.NewMemDB() + appVersion uint64) (database.Database, sm.State, *mockBlockStore) { + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index a213d110f..0148b9ed6 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -4,13 +4,12 @@ import ( "bufio" "bytes" "fmt" + "github.com/ava-labs/avalanchego/database/memdb" "io" "path/filepath" "testing" "time" - db "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/abci/example/kvstore" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/libs/log" @@ -45,7 +44,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { if err != nil { return fmt.Errorf("failed to read genesis file: %w", err) } - blockStoreDB := db.NewMemDB() + blockStoreDB := memdb.New() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) diff --git a/database/database.go b/database/database.go new file mode 100644 index 000000000..1ef506207 --- /dev/null +++ b/database/database.go @@ -0,0 +1,57 @@ +package database + +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/prometheus/client_golang/prometheus" +) + +// These are valid backend types. +const ( + // GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most + // popular implementation) + // - pure go + // - stable + GoLevelDBBackend string = "goleveldb" + // CLevelDBBackend represents cleveldb (uses levigo wrapper) + // - fast + // - requires gcc + // - use cleveldb build tag (go build -tags cleveldb) + CLevelDBBackend string = "cleveldb" + // MemDBBackend represents in-memory key value store, which is mostly used + // for testing. + MemDBBackend string = "memdb" + // BoltDBBackend represents bolt (uses etcd's fork of bolt - + // github.com/etcd-io/bbolt) + // - EXPERIMENTAL + // - may be faster is some use-cases (random reads - indexer) + // - use boltdb build tag (go build -tags boltdb) + BoltDBBackend string = "boltdb" + // RocksDBBackend represents rocksdb (uses github.com/tecbot/gorocksdb) + // - EXPERIMENTAL + // - requires gcc + // - use rocksdb build tag (go build -tags rocksdb) + RocksDBBackend string = "rocksdb" + + BadgerDBBackend string = "badgerdb" +) + +// NewDB creates a new database of type backend with the given name. +func NewDB(name string, backendType, dir string) (database.Database, error) { + var err error + var db database.Database + switch backendType { + case GoLevelDBBackend: + logger := logging.NewLogger(name) + db, err = leveldb.New(dir, []byte{}, logger, name, prometheus.NewRegistry()) + if err != nil { + return nil, err + } + case MemDBBackend: + case BadgerDBBackend: + default: + + } + return db, nil +} diff --git a/evidence/pool.go b/evidence/pool.go index 02acef99c..9494b0408 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -4,19 +4,18 @@ import ( "bytes" "errors" "fmt" + "github.com/ava-labs/avalanchego/database" "sync" "sync/atomic" "time" - "github.com/gogo/protobuf/proto" - gogotypes "github.com/gogo/protobuf/types" - dbm "github.com/tendermint/tm-db" - clist "github.com/consideritdone/landslidecore/libs/clist" "github.com/consideritdone/landslidecore/libs/log" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" sm "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/types" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" ) const ( @@ -28,7 +27,7 @@ const ( type Pool struct { logger log.Logger - evidenceStore dbm.DB + evidenceStore database.Database evidenceList *clist.CList // concurrent linked-list of evidence evidenceSize uint32 // amount of pending evidence @@ -51,7 +50,7 @@ type Pool struct { // NewPool creates an evidence pool. If using an existing evidence store, // it will add all pending evidence to the concurrent list. -func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, error) { +func NewPool(evidenceDB database.Database, stateDB sm.Store, blockStore BlockStore) (*Pool, error) { state, err := stateDB.Load() if err != nil { @@ -305,7 +304,7 @@ func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { key := keyPending(ev) - err = evpool.evidenceStore.Set(key, evBytes) + err = evpool.evidenceStore.Put(key, evBytes) if err != nil { return fmt.Errorf("can't persist evidence: %w", err) } @@ -344,7 +343,7 @@ func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList) { continue } - if err := evpool.evidenceStore.Set(key, evBytes); err != nil { + if err := evpool.evidenceStore.Put(key, evBytes); err != nil { evpool.logger.Error("Unable to save committed evidence", "err", err, "key(height/hash)", key) } } @@ -365,12 +364,9 @@ func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Eviden evList tmproto.EvidenceList // used for calculating the bytes size ) - iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{prefixKey}) - if err != nil { - return nil, totalSize, fmt.Errorf("database error: %v", err) - } - defer iter.Close() - for ; iter.Valid(); iter.Next() { + iter := evpool.evidenceStore.NewIteratorWithPrefix([]byte{prefixKey}) + defer iter.Release() + for ; iter.Error() == nil && len(iter.Key()) > 0; iter.Next() { var evpb tmproto.Evidence err := evpb.Unmarshal(iter.Value()) if err != nil { @@ -401,14 +397,10 @@ func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Eviden } func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { - iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{baseKeyPending}) - if err != nil { - evpool.logger.Error("Unable to iterate over pending evidence", "err", err) - return evpool.State().LastBlockHeight, evpool.State().LastBlockTime - } - defer iter.Close() + iter := evpool.evidenceStore.NewIteratorWithPrefix([]byte{baseKeyPending}) + defer iter.Release() blockEvidenceMap := make(map[string]struct{}) - for ; iter.Valid(); iter.Next() { + for ; iter.Error() == nil && len(iter.Key()) > 0; iter.Next() { ev, err := bytesToEv(iter.Value()) if err != nil { evpool.logger.Error("Error in transition evidence from protobuf", "err", err) diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 634b578c5..7cfaa7682 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -1,6 +1,8 @@ package evidence_test import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "os" "testing" "time" @@ -9,8 +11,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/evidence" "github.com/consideritdone/landslidecore/evidence/mocks" "github.com/consideritdone/landslidecore/libs/log" @@ -40,7 +40,7 @@ func TestEvidencePoolBasic(t *testing.T) { var ( height = int64(1) stateStore = &smmocks.Store{} - evidenceDB = dbm.NewMemDB() + evidenceDB = memdb.New() blockStore = &mocks.BlockStore{} ) @@ -100,7 +100,7 @@ func TestAddExpiredEvidence(t *testing.T) { val = types.NewMockPV() height = int64(30) stateStore = initializeValidatorState(val, height) - evidenceDB = dbm.NewMemDB() + evidenceDB = memdb.New() blockStore = &mocks.BlockStore{} expiredEvidenceTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) expiredHeight = int64(2) @@ -261,7 +261,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { blockStore.On("LoadBlockCommit", height).Return(trusted.Commit) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) - pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err := evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) pool.SetLogger(log.TestingLogger()) @@ -302,11 +302,11 @@ func TestRecoverPendingEvidence(t *testing.T) { height := int64(10) val := types.NewMockPV() valAddress := val.PrivKey.PubKey().Address() - evidenceDB := dbm.NewMemDB() + evidenceDB := memdb.New() stateStore := initializeValidatorState(val, height) state, err := stateStore.Load() require.NoError(t, err) - blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + blockStore := initializeBlockStore(memdb.New(), state, valAddress) // create previous pool and populate it pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) require.NoError(t, err) @@ -347,7 +347,7 @@ func TestRecoverPendingEvidence(t *testing.T) { } func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store { - stateDB := dbm.NewMemDB() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) state := sm.State{ ChainID: evidenceChainID, @@ -398,7 +398,7 @@ func initializeValidatorState(privVal types.PrivValidator, height int64) sm.Stor // initializeBlockStore creates a block storage and populates it w/ a dummy // block at +height+. -func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.BlockStore { +func initializeBlockStore(db database.Database, state sm.State, valAddr []byte) *store.BlockStore { blockStore := store.NewBlockStore(db) for i := int64(1); i <= state.LastBlockHeight; i++ { @@ -430,10 +430,10 @@ func makeCommit(height int64, valAddr []byte) *types.Commit { func defaultTestPool(height int64) (*evidence.Pool, types.MockPV) { val := types.NewMockPV() valAddress := val.PrivKey.PubKey().Address() - evidenceDB := dbm.NewMemDB() + evidenceDB := memdb.New() stateStore := initializeValidatorState(val, height) state, _ := stateStore.Load() - blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) + blockStore := initializeBlockStore(memdb.New(), state, valAddress) pool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) if err != nil { panic("test evidence pool could not be created") diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index f90ed2ea1..0c1b5b93f 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/crypto" "github.com/consideritdone/landslidecore/crypto/tmhash" @@ -189,7 +187,7 @@ func TestReactorsGossipNoCommittedEvidence(t *testing.T) { func TestReactorBroadcastEvidenceMemoryLeak(t *testing.T) { evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) - evidenceDB := dbm.NewMemDB() + evidenceDB := memdb.New() blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: evidenceTime}}, @@ -246,7 +244,7 @@ func makeAndConnectReactorsAndPools(config *cfg.Config, stateStores []sm.Store) evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) for i := 0; i < N; i++ { - evidenceDB := dbm.NewMemDB() + evidenceDB := memdb.New() blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: evidenceTime}}, diff --git a/evidence/verify_test.go b/evidence/verify_test.go index 28fe20f65..3036a1c40 100644 --- a/evidence/verify_test.go +++ b/evidence/verify_test.go @@ -8,8 +8,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/crypto" "github.com/consideritdone/landslidecore/crypto/tmhash" "github.com/consideritdone/landslidecore/evidence" @@ -90,7 +88,7 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header}) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) blockStore.On("LoadBlockCommit", height).Return(trusted.Commit) - pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err := evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) pool.SetLogger(log.TestingLogger()) @@ -113,20 +111,20 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { // duplicate evidence should be rejected evList = types.EvidenceList{ev, ev} - pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err = evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) assert.Error(t, pool.CheckEvidence(evList)) // If evidence is submitted with an altered timestamp it should return an error ev.Timestamp = defaultEvidenceTime.Add(1 * time.Minute) - pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err = evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) assert.Error(t, pool.AddEvidence(ev)) ev.Timestamp = defaultEvidenceTime // Evidence submitted with a different validator power should fail ev.TotalVotingPower = 1 - pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err = evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) assert.Error(t, pool.AddEvidence(ev)) ev.TotalVotingPower = common.ValidatorSet.TotalVotingPower() @@ -167,7 +165,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit) blockStore.On("Height").Return(nodeHeight) - pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err := evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) // check that the evidence pool correctly verifies the evidence @@ -185,7 +183,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { oldBlockStore.On("Height").Return(nodeHeight) require.Equal(t, defaultEvidenceTime, oldBlockStore.LoadBlockMeta(nodeHeight).Header.Time) - pool, err = evidence.NewPool(dbm.NewMemDB(), stateStore, oldBlockStore) + pool, err = evidence.NewPool(memdb.New(), stateStore, oldBlockStore) require.NoError(t, err) assert.Error(t, pool.CheckEvidence(types.EvidenceList{ev})) } @@ -263,7 +261,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) - pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err := evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) pool.SetLogger(log.TestingLogger()) @@ -338,7 +336,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) - pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err := evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) pool.SetLogger(log.TestingLogger()) @@ -431,7 +429,7 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}) - pool, err := evidence.NewPool(dbm.NewMemDB(), stateStore, blockStore) + pool, err := evidence.NewPool(memdb.New(), stateStore, blockStore) require.NoError(t, err) evList := types.EvidenceList{goodEv} diff --git a/go.mod b/go.mod index 52b9c6ffd..530ebc9af 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,6 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 github.com/google/orderedcode v0.0.1 - github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.5.0 github.com/gtank/merlin v0.1.1 github.com/lib/pq v1.10.4 @@ -67,6 +66,7 @@ require ( github.com/golang/mock v1.6.0 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect + github.com/gorilla/rpc v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect github.com/gtank/ristretto255 v0.1.2 // indirect diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index be42389a3..950ebe660 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -5,8 +5,6 @@ import ( "testing" "time" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/libs/log" "github.com/consideritdone/landslidecore/light" "github.com/consideritdone/landslidecore/light/provider" @@ -37,7 +35,7 @@ func BenchmarkSequence(b *testing.B) { }, benchmarkFullNode, []provider.Provider{benchmarkFullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.SequentialVerification(), ) @@ -65,7 +63,7 @@ func BenchmarkBisection(b *testing.B) { }, benchmarkFullNode, []provider.Provider{benchmarkFullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) if err != nil { @@ -93,7 +91,7 @@ func BenchmarkBackwards(b *testing.B) { }, benchmarkFullNode, []provider.Provider{benchmarkFullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) if err != nil { diff --git a/light/client_test.go b/light/client_test.go index fc45b5d30..84cc167a9 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/libs/log" "github.com/consideritdone/landslidecore/light" "github.com/consideritdone/landslidecore/light/provider" @@ -232,7 +230,7 @@ func TestClient_SequentialVerification(t *testing.T) { tc.otherHeaders, tc.vals, )}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.SequentialVerification(), light.Logger(log.TestingLogger()), ) @@ -357,7 +355,7 @@ func TestClient_SkippingVerification(t *testing.T) { tc.otherHeaders, tc.vals, )}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.SkippingVerification(light.DefaultTrustLevel), light.Logger(log.TestingLogger()), ) @@ -395,7 +393,7 @@ func TestClientLargeBisectionVerification(t *testing.T) { }, veryLargeFullNode, []provider.Provider{veryLargeFullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.SkippingVerification(light.DefaultTrustLevel), ) require.NoError(t, err) @@ -417,7 +415,7 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { }, fullNode, []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.SkippingVerification(light.DefaultTrustLevel), ) require.NoError(t, err) @@ -441,7 +439,7 @@ func TestClient_Cleanup(t *testing.T) { trustOptions, fullNode, []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -461,7 +459,7 @@ func TestClient_Cleanup(t *testing.T) { func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { - trustedStore := dbs.New(dbm.NewMemDB(), chainID) + trustedStore := dbs.New(memdb.New(), chainID) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -485,7 +483,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { // 2. options.Hash != trustedHeader.Hash { - trustedStore := dbs.New(dbm.NewMemDB(), chainID) + trustedStore := dbs.New(memdb.New(), chainID) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -530,7 +528,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { - trustedStore := dbs.New(dbm.NewMemDB(), chainID) + trustedStore := dbs.New(memdb.New(), chainID) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -560,7 +558,7 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { // 2. options.Hash != trustedHeader.Hash // This could happen if previous provider was lying to us. { - trustedStore := dbs.New(dbm.NewMemDB(), chainID) + trustedStore := dbs.New(memdb.New(), chainID) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -607,7 +605,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { // 1. options.Hash == trustedHeader.Hash { // load the first three headers into the trusted store - trustedStore := dbs.New(dbm.NewMemDB(), chainID) + trustedStore := dbs.New(memdb.New(), chainID) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -645,7 +643,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { // 2. options.Hash != trustedHeader.Hash // This could happen if previous provider was lying to us. { - trustedStore := dbs.New(dbm.NewMemDB(), chainID) + trustedStore := dbs.New(memdb.New(), chainID) err := trustedStore.SaveLightBlock(l1) require.NoError(t, err) @@ -705,7 +703,7 @@ func TestClient_Update(t *testing.T) { trustOptions, fullNode, []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -726,7 +724,7 @@ func TestClient_Concurrency(t *testing.T) { trustOptions, fullNode, []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -767,7 +765,7 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { trustOptions, deadNode, []provider.Provider{fullNode, fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), ) @@ -793,7 +791,7 @@ func TestClient_BackwardsVerification(t *testing.T) { }, largeFullNode, []provider.Provider{largeFullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -875,7 +873,7 @@ func TestClient_BackwardsVerification(t *testing.T) { }, tc.provider, []provider.Provider{tc.provider}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err, idx) @@ -888,7 +886,7 @@ func TestClient_BackwardsVerification(t *testing.T) { func TestClient_NewClientFromTrustedStore(t *testing.T) { // 1) Initiate DB and fill with a "trusted" header - db := dbs.New(dbm.NewMemDB(), chainID) + db := dbs.New(memdb.New(), chainID) err := db.SaveLightBlock(l1) require.NoError(t, err) @@ -945,7 +943,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { trustOptions, fullNode, []provider.Provider{badProvider1, badProvider2}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), ) @@ -995,7 +993,7 @@ func TestClient_TrustedValidatorSet(t *testing.T) { trustOptions, fullNode, []provider.Provider{badValSetNode, fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -1013,7 +1011,7 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { trustOptions, fullNode, []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.PruningSize(1), ) @@ -1086,7 +1084,7 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) { trustOptions, badNode, []provider.Provider{badNode, badNode}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.MaxRetryAttempts(1), ) require.NoError(t, err) @@ -1119,7 +1117,7 @@ func TestClientHandlesContexts(t *testing.T) { }, p, []provider.Provider{p, p}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), ) require.Error(t, ctxTimeOut.Err()) require.Error(t, err) @@ -1136,7 +1134,7 @@ func TestClientHandlesContexts(t *testing.T) { }, p, []provider.Provider{p, p}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), ) require.NoError(t, err) diff --git a/light/detector_test.go b/light/detector_test.go index dc0a23f09..9fc710730 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -7,8 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/libs/log" "github.com/consideritdone/landslidecore/light" "github.com/consideritdone/landslidecore/light/provider" @@ -54,7 +52,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { }, primary, []provider.Provider{witness}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), ) @@ -136,7 +134,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { }, primary, []provider.Provider{witness}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), verificationOption, @@ -226,7 +224,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { }, primary, []provider.Provider{witness, accomplice}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxClockDrift(1*time.Second), light.MaxBlockLag(1*time.Second), @@ -293,7 +291,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { }, primary, []provider.Provider{laggingWitness, accomplice}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxClockDrift(1*time.Second), light.MaxBlockLag(1*time.Second), @@ -324,7 +322,7 @@ func TestClientDivergentTraces1(t *testing.T) { }, primary, []provider.Provider{witness}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), ) @@ -348,7 +346,7 @@ func TestClientDivergentTraces2(t *testing.T) { }, primary, []provider.Provider{deadNode, deadNode, primary}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), ) @@ -383,7 +381,7 @@ func TestClientDivergentTraces3(t *testing.T) { }, primary, []provider.Provider{witness}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), light.MaxRetryAttempts(1), ) @@ -420,7 +418,7 @@ func TestClientDivergentTraces4(t *testing.T) { }, primary, []provider.Provider{witness}, - dbs.New(dbm.NewMemDB(), chainID), + dbs.New(memdb.New(), chainID), light.Logger(log.TestingLogger()), ) require.NoError(t, err) diff --git a/light/example_test.go b/light/example_test.go index debff1d83..07119cc19 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -3,14 +3,15 @@ package light_test import ( "context" "fmt" + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/prometheus/client_golang/prometheus" "io/ioutil" stdlog "log" "os" "testing" "time" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/abci/example/kvstore" "github.com/consideritdone/landslidecore/libs/log" "github.com/consideritdone/landslidecore/light" @@ -46,7 +47,9 @@ func ExampleClient_Update() { stdlog.Fatal(err) } - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + name := "light-client-db" + logger := logging.NewLogger(name) + db, err := leveldb.New(dbDir, []byte{}, logger, name, prometheus.NewRegistry()) if err != nil { stdlog.Fatal(err) } @@ -114,7 +117,9 @@ func ExampleClient_VerifyLightBlockAtHeight() { stdlog.Fatal(err) } - db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + name := "light-client-db" + logger := logging.NewLogger(name) + db, err := leveldb.New(dbDir, []byte{}, logger, name, prometheus.NewRegistry()) if err != nil { stdlog.Fatal(err) } diff --git a/light/store/db/db.go b/light/store/db/db.go index 8810f2f8d..30d685d18 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -3,11 +3,10 @@ package db import ( "encoding/binary" "fmt" + "github.com/ava-labs/avalanchego/database" "regexp" "strconv" - dbm "github.com/tendermint/tm-db" - tmsync "github.com/consideritdone/landslidecore/libs/sync" "github.com/consideritdone/landslidecore/light/store" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" @@ -19,7 +18,7 @@ var ( ) type dbs struct { - db dbm.DB + db database.Database prefix string mtx tmsync.RWMutex @@ -28,7 +27,7 @@ type dbs struct { // New returns a Store that wraps any DB (with an optional prefix in case you // want to use one DB with many light clients). -func New(db dbm.DB, prefix string) store.Store { +func New(db database.Database, prefix string) store.Store { size := uint16(0) bz, err := db.Get(sizeKey) diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index d1da41de0..44a39327d 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -8,8 +8,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/crypto" "github.com/consideritdone/landslidecore/crypto/tmhash" tmrand "github.com/consideritdone/landslidecore/libs/rand" @@ -19,7 +17,7 @@ import ( ) func TestLast_FirstLightBlockHeight(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "TestLast_FirstLightBlockHeight") + dbStore := New(memdb.New(), "TestLast_FirstLightBlockHeight") // Empty store height, err := dbStore.LastLightBlockHeight() @@ -44,7 +42,7 @@ func TestLast_FirstLightBlockHeight(t *testing.T) { } func Test_SaveLightBlock(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_SaveLightBlockAndValidatorSet") + dbStore := New(memdb.New(), "Test_SaveLightBlockAndValidatorSet") // Empty store h, err := dbStore.LightBlock(1) @@ -74,7 +72,7 @@ func Test_SaveLightBlock(t *testing.T) { } func Test_LightBlockBefore(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_LightBlockBefore") + dbStore := New(memdb.New(), "Test_LightBlockBefore") assert.Panics(t, func() { _, _ = dbStore.LightBlockBefore(0) @@ -92,7 +90,7 @@ func Test_LightBlockBefore(t *testing.T) { } func Test_Prune(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_Prune") + dbStore := New(memdb.New(), "Test_Prune") // Empty store assert.EqualValues(t, 0, dbStore.Size()) @@ -129,7 +127,7 @@ func Test_Prune(t *testing.T) { } func Test_Concurrency(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_Prune") + dbStore := New(memdb.New(), "Test_Prune") var wg sync.WaitGroup for i := 1; i <= 100; i++ { diff --git a/node/node.go b/node/node.go index fae024ae1..daef926a5 100644 --- a/node/node.go +++ b/node/node.go @@ -5,16 +5,14 @@ import ( "context" "errors" "fmt" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + landslidedb "github.com/consideritdone/landslidecore/database" "net" "net/http" "strings" "time" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/rs/cors" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" bcv0 "github.com/consideritdone/landslidecore/blockchain/v0" bcv1 "github.com/consideritdone/landslidecore/blockchain/v1" @@ -49,6 +47,9 @@ import ( "github.com/consideritdone/landslidecore/types" tmtime "github.com/consideritdone/landslidecore/types/time" "github.com/consideritdone/landslidecore/version" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/cors" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port @@ -66,13 +67,12 @@ type DBContext struct { } // DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (dbm.DB, error) +type DBProvider func(*DBContext) (database.Database, error) // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the ctx.Config. -func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { - dbType := dbm.BackendType(ctx.Config.DBBackend) - return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +func DefaultDBProvider(ctx *DBContext) (database.Database, error) { + return landslidedb.NewDB(ctx.ID, ctx.Config.DBBackend, ctx.Config.DBDir()) } // GenesisDocProvider returns a GenesisDoc. @@ -229,8 +229,8 @@ type Node struct { prometheusSrv *http.Server } -func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { - var blockStoreDB dbm.DB +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB database.Database, err error) { + var blockStoreDB database.Database blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) if err != nil { return @@ -284,7 +284,7 @@ func createAndStartIndexerService( } txIndexer = kv.NewTxIndex(store) - blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))) + blockIndexer = blockidxkv.New(prefixdb.New([]byte("block_events"), store)) case "psql": if config.TxIndex.PsqlConn == "" { @@ -386,7 +386,7 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, } func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, - stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { + stateDB database.Database, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { @@ -1362,7 +1362,7 @@ var ( // database, or creates one using the given genesisDocProvider. On success this also // returns the genesis doc loaded through the given provider. func LoadStateFromDBOrGenesisDocProvider( - stateDB dbm.DB, + stateDB database.Database, genesisDocProvider GenesisDocProvider, ) (sm.State, *types.GenesisDoc, error) { // Get genesis doc @@ -1387,7 +1387,7 @@ func LoadStateFromDBOrGenesisDocProvider( } // panics if failed to unmarshal bytes -func LoadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { +func LoadGenesisDoc(db database.Database) (*types.GenesisDoc, error) { b, err := db.Get(genesisDocKey) if err != nil { panic(err) @@ -1404,12 +1404,12 @@ func LoadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { } // panics if failed to marshal the given genesis document -func SaveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { +func SaveGenesisDoc(db database.Database, genDoc *types.GenesisDoc) error { b, err := tmjson.Marshal(genDoc) if err != nil { return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) } - if err := db.SetSync(genesisDocKey, b); err != nil { + if err := db.Put(genesisDocKey, b); err != nil { return err } diff --git a/node/node_test.go b/node/node_test.go index 69f1358ff..560ad801b 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3,6 +3,7 @@ package node import ( "context" "fmt" + "github.com/ava-labs/avalanchego/database" "net" "os" "syscall" @@ -12,8 +13,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/abci/example/kvstore" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/crypto/ed25519" @@ -254,8 +253,8 @@ func TestCreateProposalBlock(t *testing.T) { mempool.SetLogger(logger) // Make EvidencePool - evidenceDB := dbm.NewMemDB() - blockStore := store.NewBlockStore(dbm.NewMemDB()) + evidenceDB := memdb.New() + blockStore := store.NewBlockStore(memdb.New()) evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) require.NoError(t, err) evidencePool.SetLogger(logger) @@ -420,7 +419,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { assert.Contains(t, channels, cr.Channels[0].ID) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { +func state(nVals int, height int64) (sm.State, database.Database, []types.PrivValidator) { privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { @@ -440,7 +439,7 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { }) // save validators to db for 2 heights - stateDB := dbm.NewMemDB() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) if err := stateStore.Save(s); err != nil { panic(err) diff --git a/p2p/trust/store.go b/p2p/trust/store.go index 3516c5398..8d74cf4e5 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -6,10 +6,9 @@ package trust import ( "encoding/json" "fmt" + "github.com/ava-labs/avalanchego/database" "time" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/libs/service" tmsync "github.com/consideritdone/landslidecore/libs/sync" ) @@ -29,7 +28,7 @@ type MetricStore struct { mtx tmsync.Mutex // The db where peer trust metric history data will be stored - db dbm.DB + db database.Database // This configuration will be used when creating new TrustMetrics config MetricConfig @@ -38,7 +37,7 @@ type MetricStore struct { // NewTrustMetricStore returns a store that saves data to the DB // and uses the config when creating new trust metrics. // Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore { +func NewTrustMetricStore(db database.Database, tmc MetricConfig) *MetricStore { tms := &MetricStore{ peerMetrics: make(map[string]*Metric), db: db, @@ -199,7 +198,7 @@ func (tms *MetricStore) saveToDB() { tms.Logger.Error("Failed to encode the TrustHistory", "err", err) return } - if err := tms.db.SetSync(trustMetricKey, bytes); err != nil { + if err := tms.db.Put(trustMetricKey, bytes); err != nil { tms.Logger.Error("failed to flush data to disk", "error", err) } } diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 25b66874b..24a3b75df 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -5,15 +5,14 @@ package trust import ( "fmt" + "github.com/consideritdone/landslidecore/database" "io/ioutil" "os" "testing" + "github.com/consideritdone/landslidecore/libs/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - "github.com/consideritdone/landslidecore/libs/log" ) func TestTrustMetricStoreSaveLoad(t *testing.T) { @@ -21,7 +20,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { require.NoError(t, err) defer os.Remove(dir) - historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) + historyDB, err := database.NewDB("trusthistory", "goleveldb", dir) require.NoError(t, err) // 0 peers saved @@ -84,7 +83,7 @@ func TestTrustMetricStoreSaveLoad(t *testing.T) { } func TestTrustMetricStoreConfig(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") + historyDB, err := database.NewDB("", "memdb", "") require.NoError(t, err) config := MetricConfig{ @@ -109,7 +108,7 @@ func TestTrustMetricStoreConfig(t *testing.T) { } func TestTrustMetricStoreLookup(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") + historyDB, err := database.NewDB("", "memdb", "") require.NoError(t, err) store := NewTrustMetricStore(historyDB, DefaultConfig()) @@ -132,7 +131,7 @@ func TestTrustMetricStoreLookup(t *testing.T) { } func TestTrustMetricStorePeerScore(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") + historyDB, err := database.NewDB("", "memdb", "") require.NoError(t, err) store := NewTrustMetricStore(historyDB, DefaultConfig()) diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index cf9513907..f33b8b151 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -7,8 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" ctypes "github.com/consideritdone/landslidecore/rpc/core/types" @@ -81,7 +79,7 @@ func TestBlockResults(t *testing.T) { } env = &Environment{} - env.StateStore = sm.NewStore(dbm.NewMemDB()) + env.StateStore = sm.NewStore(memdb.New()) err := env.StateStore.SaveABCIResponses(100, results) require.NoError(t, err) env.BlockStore = mockBlockStore{height: 100} diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index b51f1f231..35dd1db54 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -17,7 +17,7 @@ import ( // HTTP + JSON handler // jsonrpc calls grab the given method's function info and runs reflect.Call -func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { +func MakeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { @@ -67,13 +67,14 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han ) continue } - if len(r.URL.Path) > 1 { - responses = append( - responses, - types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), - ) - continue - } + //TODO + //if len(r.URL.Path) > 1 { + // responses = append( + // responses, + // types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), + // ) + // continue + //} rpcFunc, ok := funcMap[request.Method] if !ok || rpcFunc.ws { responses = append(responses, types.RPCMethodNotFoundError(request.ID)) diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 9f39c3664..03925c0c4 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -20,7 +20,7 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo } // JSONRPC endpoints - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) + mux.HandleFunc("/", handleInvalidJSONRPCPaths(MakeJSONRPCHandler(funcMap, logger))) } // Function introspection diff --git a/state/export_test.go b/state/export_test.go index a5344e64b..56565ce9d 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,8 +1,7 @@ package state import ( - dbm "github.com/tendermint/tm-db" - + "github.com/ava-labs/avalanchego/database" abci "github.com/consideritdone/landslidecore/abci/types" tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" @@ -42,7 +41,7 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. -func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { +func SaveValidatorsInfo(db database.Database, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { stateStore := dbStore{db} return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index 33154ad75..9af64d1b2 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -3,10 +3,9 @@ package state_test import ( "bytes" "fmt" + "github.com/ava-labs/avalanchego/database" "time" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/crypto" "github.com/consideritdone/landslidecore/crypto/ed25519" @@ -93,7 +92,7 @@ func makeTxs(height int64) (txs []types.Tx) { return txs } -func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { +func makeState(nVals, height int) (sm.State, database.Database, map[string]types.PrivValidator) { vals := make([]types.GenesisValidator, nVals) privVals := make(map[string]types.PrivValidator, nVals) for i := 0; i < nVals; i++ { @@ -114,7 +113,7 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida AppHash: nil, }) - stateDB := dbm.NewMemDB() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) if err := stateStore.Save(s); err != nil { panic(err) diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index f77391306..b5d9647c7 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -4,17 +4,16 @@ import ( "context" "errors" "fmt" + "github.com/ava-labs/avalanchego/database" "sort" "strconv" "strings" - "github.com/google/orderedcode" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/libs/pubsub/query" "github.com/consideritdone/landslidecore/state/indexer" "github.com/consideritdone/landslidecore/types" + "github.com/google/orderedcode" ) var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) @@ -23,10 +22,10 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // events with an underlying KV store. Block events are indexed by their height, // such that matching search criteria returns the respective block height(s). type BlockerIndexer struct { - store dbm.DB + store database.Database } -func New(store dbm.DB) *BlockerIndexer { +func New(store database.Database) *BlockerIndexer { return &BlockerIndexer{ store: store, } @@ -51,7 +50,7 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { // EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() - defer batch.Close() + defer batch.Reset() height := bh.Header.Height @@ -60,7 +59,7 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { if err != nil { return fmt.Errorf("failed to create block height index key: %w", err) } - if err := batch.Set(key, int64ToBytes(height)); err != nil { + if err := batch.Put(key, int64ToBytes(height)); err != nil { return err } @@ -74,7 +73,7 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { return fmt.Errorf("failed to index EndBlock events: %w", err) } - return batch.WriteSync() + return batch.Write() } // Search performs a query for block heights that match a given BeginBlock @@ -234,14 +233,11 @@ func (idx *BlockerIndexer) matchRange( lowerBound := qr.LowerBoundValue() upperBound := qr.UpperBoundValue() - it, err := dbm.IteratePrefix(idx.store, startKey) - if err != nil { - return nil, fmt.Errorf("failed to create prefix iterator: %w", err) - } - defer it.Close() + it := idx.store.NewIteratorWithPrefix(startKey) + defer it.Release() LOOP: - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { var ( eventValue string err error @@ -342,13 +338,10 @@ func (idx *BlockerIndexer) match( switch { case c.Op == query.OpEqual: - it, err := dbm.IteratePrefix(idx.store, startKeyBz) - if err != nil { - return nil, fmt.Errorf("failed to create prefix iterator: %w", err) - } - defer it.Close() + it := idx.store.NewIteratorWithPrefix(startKeyBz) + defer it.Release() - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { tmpHeights[string(it.Value())] = it.Value() if err := ctx.Err(); err != nil { @@ -366,13 +359,10 @@ func (idx *BlockerIndexer) match( return nil, err } - it, err := dbm.IteratePrefix(idx.store, prefix) - if err != nil { - return nil, fmt.Errorf("failed to create prefix iterator: %w", err) - } - defer it.Close() + it := idx.store.NewIteratorWithPrefix(prefix) + defer it.Release() - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { tmpHeights[string(it.Value())] = it.Value() select { @@ -393,13 +383,10 @@ func (idx *BlockerIndexer) match( return nil, err } - it, err := dbm.IteratePrefix(idx.store, prefix) - if err != nil { - return nil, fmt.Errorf("failed to create prefix iterator: %w", err) - } - defer it.Close() + it := idx.store.NewIteratorWithPrefix(prefix) + defer it.Release() - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { eventValue, err := parseValueFromEventKey(it.Key()) if err != nil { continue @@ -453,7 +440,7 @@ func (idx *BlockerIndexer) match( return filteredHeights, nil } -func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ string, height int64) error { +func (idx *BlockerIndexer) indexEvents(batch database.Batch, events []abci.Event, typ string, height int64) error { heightBz := int64ToBytes(height) for _, event := range events { @@ -479,7 +466,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ return fmt.Errorf("failed to create block index key: %w", err) } - if err := batch.Set(key, heightBz); err != nil { + if err := batch.Put(key, heightBz); err != nil { return err } } diff --git a/state/rollback_test.go b/state/rollback_test.go index f3df8d69d..157abb135 100644 --- a/state/rollback_test.go +++ b/state/rollback_test.go @@ -4,9 +4,6 @@ import ( "crypto/rand" "testing" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/crypto" "github.com/consideritdone/landslidecore/crypto/tmhash" tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" @@ -15,6 +12,7 @@ import ( "github.com/consideritdone/landslidecore/state/mocks" "github.com/consideritdone/landslidecore/types" "github.com/consideritdone/landslidecore/version" + "github.com/stretchr/testify/require" ) func TestRollback(t *testing.T) { @@ -82,7 +80,7 @@ func TestRollback(t *testing.T) { } func TestRollbackNoState(t *testing.T) { - stateStore := state.NewStore(dbm.NewMemDB()) + stateStore := state.NewStore(memdb.New()) blockStore := &mocks.BlockStore{} _, _, err := state.Rollback(blockStore, stateStore) @@ -115,7 +113,7 @@ func TestRollbackDifferentStateHeight(t *testing.T) { } func setupStateStore(t *testing.T, height int64) state.Store { - stateStore := state.NewStore(dbm.NewMemDB()) + stateStore := state.NewStore(memdb.New()) valSet, _ := types.RandValidatorSet(5, 10) params := types.DefaultConsensusParams() diff --git a/state/state_test.go b/state/state_test.go index 8d8a33b84..d023714d2 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -3,6 +3,8 @@ package state_test import ( "bytes" "fmt" + "github.com/ava-labs/avalanchego/database" + landslidedb "github.com/consideritdone/landslidecore/database" "math" "math/big" "os" @@ -11,8 +13,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/crypto/ed25519" @@ -25,10 +25,9 @@ import ( ) // setupTestCase does setup common to all test cases. -func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { +func setupTestCase(t *testing.T) (func(t *testing.T), database.Database, sm.State) { config := cfg.ResetTestRoot("state_") - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := landslidedb.NewDB("state", config.DBBackend, config.DBDir()) stateStore := sm.NewStore(stateDB) require.NoError(t, err) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) diff --git a/state/store.go b/state/store.go index 4ab41eba1..ba1bfd840 100644 --- a/state/store.go +++ b/state/store.go @@ -3,9 +3,7 @@ package state import ( "errors" "fmt" - - "github.com/gogo/protobuf/proto" - dbm "github.com/tendermint/tm-db" + "github.com/ava-labs/avalanchego/database" abci "github.com/consideritdone/landslidecore/abci/types" tmmath "github.com/consideritdone/landslidecore/libs/math" @@ -13,6 +11,7 @@ import ( tmstate "github.com/consideritdone/landslidecore/proto/tendermint/state" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/types" + "github.com/gogo/protobuf/proto" ) const ( @@ -74,13 +73,13 @@ type Store interface { // dbStore wraps a db (github.com/tendermint/tm-db) type dbStore struct { - db dbm.DB + db database.Database } var _ Store = (*dbStore)(nil) // NewStore creates the dbStore of the state pkg. -func NewStore(db dbm.DB) Store { +func NewStore(db database.Database) Store { return dbStore{db} } @@ -179,7 +178,7 @@ func (store dbStore) save(state State, key []byte) error { state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { return err } - err := store.db.SetSync(key, state.Bytes()) + err := store.db.Put(key, state.Bytes()) if err != nil { return err } @@ -212,7 +211,7 @@ func (store dbStore) Bootstrap(state State) error { return err } - return store.db.SetSync(stateKey, state.Bytes()) + return store.db.Put(stateKey, state.Bytes()) } // PruneStates deletes states between the given heights (including from, excluding to). It is not @@ -250,7 +249,7 @@ func (store dbStore) PruneStates(from int64, to int64) error { } batch := store.db.NewBatch() - defer batch.Close() + defer batch.Reset() pruned := uint64(0) // We have to delete in reverse order, to avoid deleting previous heights that have validator @@ -279,7 +278,7 @@ func (store dbStore) PruneStates(from int64, to int64) error { if err != nil { return err } - err = batch.Set(calcValidatorsKey(h), bz) + err = batch.Put(calcValidatorsKey(h), bz) if err != nil { return err } @@ -309,7 +308,7 @@ func (store dbStore) PruneStates(from int64, to int64) error { return err } - err = batch.Set(calcConsensusParamsKey(h), bz) + err = batch.Put(calcConsensusParamsKey(h), bz) if err != nil { return err } @@ -333,13 +332,13 @@ func (store dbStore) PruneStates(from int64, to int64) error { if err != nil { return err } - batch.Close() + batch.Reset() batch = store.db.NewBatch() - defer batch.Close() + defer batch.Reset() } } - err = batch.WriteSync() + err = batch.Write() if err != nil { return err } @@ -406,7 +405,7 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI return err } - err = store.db.SetSync(calcABCIResponsesKey(height), bz) + err = store.db.Put(calcABCIResponsesKey(height), bz) if err != nil { return err } @@ -464,7 +463,7 @@ func lastStoredHeightFor(height, lastHeightChanged int64) int64 { } // CONTRACT: Returned ValidatorsInfo can be mutated. -func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error) { +func loadValidatorsInfo(db database.Database, height int64) (*tmstate.ValidatorsInfo, error) { buf, err := db.Get(calcValidatorsKey(height)) if err != nil { return nil, err @@ -513,7 +512,7 @@ func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet return err } - err = store.db.Set(calcValidatorsKey(height), bz) + err = store.db.Put(calcValidatorsKey(height), bz) if err != nil { return err } @@ -588,7 +587,7 @@ func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, par return err } - err = store.db.Set(calcConsensusParamsKey(nextHeight), bz) + err = store.db.Put(calcConsensusParamsKey(nextHeight), bz) if err != nil { return err } diff --git a/state/store_test.go b/state/store_test.go index 45a4e56c1..b2a83f564 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -2,14 +2,13 @@ package state_test import ( "fmt" + "github.com/consideritdone/landslidecore/database" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/crypto" @@ -22,7 +21,7 @@ import ( ) func TestStoreLoadValidators(t *testing.T) { - stateDB := dbm.NewMemDB() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) @@ -51,8 +50,7 @@ func BenchmarkLoadValidators(b *testing.B) { config := cfg.ResetTestRoot("state_") defer os.RemoveAll(config.RootDir) - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := database.NewDB("state", config.DBBackend, config.DBDir()) require.NoError(b, err) stateStore := sm.NewStore(stateDB) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) @@ -106,7 +104,7 @@ func TestPruneStates(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - db := dbm.NewMemDB() + db := memdb.New() stateStore := sm.NewStore(db) pk := ed25519.GenPrivKey().PubKey() diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 9d58e98c5..69616632f 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -1,14 +1,13 @@ package state_test import ( + "github.com/consideritdone/landslidecore/database" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - tmrand "github.com/consideritdone/landslidecore/libs/rand" sm "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/types" @@ -31,7 +30,7 @@ func TestTxFilter(t *testing.T) { } for i, tc := range testCases { - stateDB, err := dbm.NewDB("state", "memdb", os.TempDir()) + stateDB, err := database.NewDB("state", "memdb", os.TempDir()) require.NoError(t, err) stateStore := sm.NewStore(stateDB) state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 23884065a..6a7dea683 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -5,11 +5,11 @@ import ( "context" "encoding/hex" "fmt" + "github.com/ava-labs/avalanchego/database" "strconv" "strings" "github.com/gogo/protobuf/proto" - dbm "github.com/tendermint/tm-db" abci "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/libs/pubsub/query" @@ -26,11 +26,11 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) // TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). type TxIndex struct { - store dbm.DB + store database.Database } // NewTxIndex creates new KV indexer. -func NewTxIndex(store dbm.DB) *TxIndex { +func NewTxIndex(store database.Database) *TxIndex { return &TxIndex{ store: store, } @@ -66,7 +66,7 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { // Any event with an empty type is not indexed. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() - defer storeBatch.Close() + defer storeBatch.Reset() for _, result := range b.Ops { hash := types.Tx(result.Tx).Hash() @@ -78,7 +78,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { } // index by height (always) - err = storeBatch.Set(keyForHeight(result), hash) + err = storeBatch.Put(keyForHeight(result), hash) if err != nil { return err } @@ -88,13 +88,13 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { return err } // index by hash (always) - err = storeBatch.Set(hash, rawBytes) + err = storeBatch.Put(hash, rawBytes) if err != nil { return err } } - return storeBatch.WriteSync() + return storeBatch.Write() } // Index indexes a single transaction using the given list of events. Each key @@ -103,7 +103,7 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { // Any event with an empty type is not indexed. func (txi *TxIndex) Index(result *abci.TxResult) error { b := txi.store.NewBatch() - defer b.Close() + defer b.Reset() hash := types.Tx(result.Tx).Hash() @@ -114,7 +114,7 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { } // index by height (always) - err = b.Set(keyForHeight(result), hash) + err = b.Put(keyForHeight(result), hash) if err != nil { return err } @@ -124,15 +124,15 @@ func (txi *TxIndex) Index(result *abci.TxResult) error { return err } // index by hash (always) - err = b.Set(hash, rawBytes) + err = b.Put(hash, rawBytes) if err != nil { return err } - return b.WriteSync() + return b.Write() } -func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Batch) error { +func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store database.Batch) error { for _, event := range result.Result.Events { // only index events with a non-empty type if len(event.Type) == 0 { @@ -147,7 +147,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba // index if `index: true` is set compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) if attr.GetIndex() { - err := store.Set(keyForEvent(compositeTag, attr.Value, result), hash) + err := store.Put(keyForEvent(compositeTag, attr.Value, result), hash) if err != nil { return err } @@ -312,13 +312,10 @@ func (txi *TxIndex) match( switch { case c.Op == query.OpEqual: - it, err := dbm.IteratePrefix(txi.store, startKeyBz) - if err != nil { - panic(err) - } - defer it.Close() + it := txi.store.NewIteratorWithPrefix(startKey(startKeyBz)) + defer it.Release() - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { tmpHashes[string(it.Value())] = it.Value() // Potentially exit early. @@ -335,13 +332,10 @@ func (txi *TxIndex) match( case c.Op == query.OpExists: // XXX: can't use startKeyBz here because c.Operand is nil // (e.g. "account.owner//" won't match w/ a single row) - it, err := dbm.IteratePrefix(txi.store, startKey(c.CompositeKey)) - if err != nil { - panic(err) - } - defer it.Close() + it := txi.store.NewIteratorWithPrefix(startKey(c.CompositeKey)) + defer it.Release() - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { tmpHashes[string(it.Value())] = it.Value() // Potentially exit early. @@ -359,13 +353,10 @@ func (txi *TxIndex) match( // XXX: startKey does not apply here. // For example, if startKey = "account.owner/an/" and search query = "account.owner CONTAINS an" // we can't iterate with prefix "account.owner/an/" because we might miss keys like "account.owner/Ulan/" - it, err := dbm.IteratePrefix(txi.store, startKey(c.CompositeKey)) - if err != nil { - panic(err) - } - defer it.Close() + it := txi.store.NewIteratorWithPrefix(startKey(c.CompositeKey)) + defer it.Release() - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { if !isTagKey(it.Key()) { continue } @@ -439,14 +430,11 @@ func (txi *TxIndex) matchRange( lowerBound := qr.LowerBoundValue() upperBound := qr.UpperBoundValue() - it, err := dbm.IteratePrefix(txi.store, startKey) - if err != nil { - panic(err) - } - defer it.Close() + it := txi.store.NewIteratorWithPrefix(startKey) + defer it.Release() LOOP: - for ; it.Valid(); it.Next() { + for ; it.Error() == nil && len(it.Key()) > 0; it.Next() { if !isTagKey(it.Key()) { continue } diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go index b1c8a3c79..462eb5e7d 100644 --- a/state/txindex/kv/kv_bench_test.go +++ b/state/txindex/kv/kv_bench_test.go @@ -4,11 +4,12 @@ import ( "context" "crypto/rand" "fmt" + "github.com/ava-labs/avalanchego/database/leveldb" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/prometheus/client_golang/prometheus" "io/ioutil" "testing" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/libs/pubsub/query" "github.com/consideritdone/landslidecore/types" @@ -20,7 +21,10 @@ func BenchmarkTxSearch(b *testing.B) { b.Errorf("failed to create temporary directory: %s", err) } - db, err := dbm.NewGoLevelDB("benchmark_tx_search_test", dbDir) + name := "benchmark_tx_search_test" + logger := logging.NewLogger(name) + db, err := leveldb.New(dbDir, []byte{}, logger, name, prometheus.NewRegistry()) + if err != nil { b.Errorf("failed to create database: %s", err) } diff --git a/statesync/stateprovider.go b/statesync/stateprovider.go index 41ad19878..fc15a8262 100644 --- a/statesync/stateprovider.go +++ b/statesync/stateprovider.go @@ -3,11 +3,10 @@ package statesync import ( "context" "fmt" + "github.com/ava-labs/avalanchego/database/memdb" "strings" "time" - dbm "github.com/tendermint/tm-db" - "github.com/consideritdone/landslidecore/libs/log" tmsync "github.com/consideritdone/landslidecore/libs/sync" "github.com/consideritdone/landslidecore/light" @@ -73,7 +72,7 @@ func NewLightClientStateProvider( } lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:], - lightdb.New(dbm.NewMemDB(), ""), light.Logger(logger), light.MaxRetryAttempts(5)) + lightdb.New(memdb.New(), ""), light.Logger(logger), light.MaxRetryAttempts(5)) if err != nil { return nil, err } diff --git a/store/store.go b/store/store.go index c83226ccc..4166141f1 100644 --- a/store/store.go +++ b/store/store.go @@ -2,15 +2,14 @@ package store import ( "fmt" + "github.com/ava-labs/avalanchego/database" "strconv" - "github.com/gogo/protobuf/proto" - dbm "github.com/tendermint/tm-db" - tmsync "github.com/consideritdone/landslidecore/libs/sync" tmstore "github.com/consideritdone/landslidecore/proto/tendermint/store" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/types" + "github.com/gogo/protobuf/proto" ) /* @@ -31,7 +30,7 @@ The store can be assumed to contain all contiguous blocks between base and heigh // deserializing loaded data, indicating probable corruption on disk. */ type BlockStore struct { - db dbm.DB + db database.Database // mtx guards access to the struct fields listed below it. We rely on the database to enforce // fine-grained concurrency control for its data, and thus this mutex does not apply to @@ -45,7 +44,7 @@ type BlockStore struct { // NewBlockStore returns a new BlockStore with the given DB, // initialized to the last height that was committed to the DB. -func NewBlockStore(db dbm.DB) *BlockStore { +func NewBlockStore(db database.Database) *BlockStore { bs := LoadBlockStoreState(db) return &BlockStore{ base: bs.Base, @@ -263,8 +262,8 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { pruned := uint64(0) batch := bs.db.NewBatch() - defer batch.Close() - flush := func(batch dbm.Batch, base int64) error { + defer batch.Reset() + flush := func(batch database.Batch, base int64) error { // We can't trust batches to be atomic, so update base first to make sure noone // tries to access missing blocks. bs.mtx.Lock() @@ -272,11 +271,11 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { bs.mtx.Unlock() bs.saveState() - err := batch.WriteSync() + err := batch.Write() if err != nil { return fmt.Errorf("failed to prune up to height %v: %w", base, err) } - batch.Close() + batch.Reset() return nil } @@ -311,7 +310,7 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { return 0, err } batch = bs.db.NewBatch() - defer batch.Close() + defer batch.Reset() } } @@ -360,17 +359,17 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s panic("nil blockmeta") } metaBytes := mustEncode(pbm) - if err := bs.db.Set(calcBlockMetaKey(height), metaBytes); err != nil { + if err := bs.db.Put(calcBlockMetaKey(height), metaBytes); err != nil { panic(err) } - if err := bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { + if err := bs.db.Put(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { panic(err) } // Save block commit (duplicate and separate from the Block) pbc := block.LastCommit.ToProto() blockCommitBytes := mustEncode(pbc) - if err := bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes); err != nil { + if err := bs.db.Put(calcBlockCommitKey(height-1), blockCommitBytes); err != nil { panic(err) } @@ -378,7 +377,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s // NOTE: we can delete this at a later height pbsc := seenCommit.ToProto() seenCommitBytes := mustEncode(pbsc) - if err := bs.db.Set(calcSeenCommitKey(height), seenCommitBytes); err != nil { + if err := bs.db.Put(calcSeenCommitKey(height), seenCommitBytes); err != nil { panic(err) } @@ -407,7 +406,7 @@ func (bs *BlockStore) SaveBlockWOParts(block *types.Block) { } // Save block meta - if err := bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { + if err := bs.db.Put(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil { panic(err) } @@ -431,7 +430,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { panic(fmt.Errorf("unable to make part into proto: %w", err)) } partBytes := mustEncode(pbp) - if err := bs.db.Set(calcBlockPartKey(height, index), partBytes); err != nil { + if err := bs.db.Put(calcBlockPartKey(height, index), partBytes); err != nil { panic(err) } } @@ -453,7 +452,7 @@ func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) err if err != nil { return fmt.Errorf("unable to marshal commit: %w", err) } - return bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) + return bs.db.Put(calcSeenCommitKey(height), seenCommitBytes) } func (bs *BlockStore) Close() error { @@ -487,19 +486,19 @@ func calcBlockHashKey(hash []byte) []byte { var blockStoreKey = []byte("blockStore") // SaveBlockStoreState persists the blockStore state to the database. -func SaveBlockStoreState(bsj *tmstore.BlockStoreState, db dbm.DB) { +func SaveBlockStoreState(bsj *tmstore.BlockStoreState, db database.Database) { bytes, err := proto.Marshal(bsj) if err != nil { panic(fmt.Sprintf("Could not marshal state bytes: %v", err)) } - if err := db.SetSync(blockStoreKey, bytes); err != nil { + if err := db.Put(blockStoreKey, bytes); err != nil { panic(err) } } // LoadBlockStoreState returns the BlockStoreState as loaded from disk. // If no BlockStoreState was previously persisted, it returns the zero value. -func LoadBlockStoreState(db dbm.DB) tmstore.BlockStoreState { +func LoadBlockStoreState(db database.Database) tmstore.BlockStoreState { bytes, err := db.Get(blockStoreKey) if err != nil { panic(err) diff --git a/store/store_test.go b/store/store_test.go index 8da35b4e7..01da76ecf 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -3,6 +3,8 @@ package store import ( "bytes" "fmt" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" "os" "runtime/debug" "strings" @@ -12,7 +14,6 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" cfg "github.com/consideritdone/landslidecore/config" "github.com/consideritdone/landslidecore/crypto" @@ -56,10 +57,8 @@ func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Bl func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { config := cfg.ResetTestRoot("blockchain_reactor_test") - // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB()) - // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) - blockDB := dbm.NewMemDB() - stateDB := dbm.NewMemDB() + blockDB := memdb.New() + stateDB := memdb.New() stateStore := sm.NewStore(stateDB) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { @@ -84,7 +83,7 @@ func TestLoadBlockStoreState(t *testing.T) { } for _, tc := range testCases { - db := dbm.NewMemDB() + db := memdb.New() SaveBlockStoreState(tc.bss, db) retrBSJ := LoadBlockStoreState(db) assert.Equal(t, tc.want, retrBSJ, "expected the retrieved DBs to match: %s", tc.testName) @@ -92,7 +91,7 @@ func TestLoadBlockStoreState(t *testing.T) { } func TestNewBlockStore(t *testing.T) { - db := dbm.NewMemDB() + db := memdb.New() bss := tmstore.BlockStoreState{Base: 100, Height: 10000} bz, _ := proto.Marshal(&bss) err := db.Set(blockStoreKey, bz) @@ -128,8 +127,8 @@ func TestNewBlockStore(t *testing.T) { assert.Equal(t, bs.Height(), int64(0), "expecting empty bytes to be unmarshaled alright") } -func freshBlockStore() (*BlockStore, dbm.DB) { - db := dbm.NewMemDB() +func freshBlockStore() (*BlockStore, database.Database) { + db := memdb.New() return NewBlockStore(db), db } @@ -369,10 +368,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { func TestLoadBaseMeta(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) + stateStore := sm.NewStore(memdb.New()) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) - bs := NewBlockStore(dbm.NewMemDB()) + bs := NewBlockStore(memdb.New()) for h := int64(1); h <= 10; h++ { block := makeBlock(h, state, new(types.Commit)) @@ -425,10 +424,10 @@ func TestLoadBlockPart(t *testing.T) { func TestPruneBlocks(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - stateStore := sm.NewStore(dbm.NewMemDB()) + stateStore := sm.NewStore(memdb.New()) state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) - db := dbm.NewMemDB() + db := memdb.New() bs := NewBlockStore(db) assert.EqualValues(t, 0, bs.Base()) assert.EqualValues(t, 0, bs.Height()) diff --git a/test/maverick/consensus/replay_file.go b/test/maverick/consensus/replay_file.go index e99c02cda..8e280b112 100644 --- a/test/maverick/consensus/replay_file.go +++ b/test/maverick/consensus/replay_file.go @@ -5,13 +5,12 @@ import ( "context" "errors" "fmt" + "github.com/consideritdone/landslidecore/database" "io" "os" "strconv" "strings" - dbm "github.com/tendermint/tm-db" - cfg "github.com/consideritdone/landslidecore/config" tmcon "github.com/consideritdone/landslidecore/consensus" "github.com/consideritdone/landslidecore/libs/log" @@ -285,16 +284,15 @@ func (pb *playback) replayConsoleLoop() int { // convenience for replay mode func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { - dbType := dbm.BackendType(config.DBBackend) // Get BlockStore - blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDB, err := database.NewDB("blockstore", config.DBBackend, config.DBDir()) if err != nil { tmos.Exit(err.Error()) } blockStore := store.NewBlockStore(blockStoreDB) // Get State - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := database.NewDB("state", config.DBBackend, config.DBDir()) if err != nil { tmos.Exit(err.Error()) } diff --git a/test/maverick/node/node.go b/test/maverick/node/node.go index 018d55ff6..0535dee96 100644 --- a/test/maverick/node/node.go +++ b/test/maverick/node/node.go @@ -5,6 +5,9 @@ import ( "context" "errors" "fmt" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + landslidedb "github.com/consideritdone/landslidecore/database" "net" "net/http" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port @@ -16,8 +19,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" - dbm "github.com/tendermint/tm-db" - abci "github.com/consideritdone/landslidecore/abci/types" bcv0 "github.com/consideritdone/landslidecore/blockchain/v0" bcv1 "github.com/consideritdone/landslidecore/blockchain/v1" @@ -93,13 +94,12 @@ type DBContext struct { } // DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (dbm.DB, error) +type DBProvider func(*DBContext) (database.Database, error) // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the ctx.Config. -func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { - dbType := dbm.BackendType(ctx.Config.DBBackend) - return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +func DefaultDBProvider(ctx *DBContext) (database.Database, error) { + return landslidedb.NewDB(ctx.ID, ctx.Config.DBBackend, ctx.Config.DBDir()) } // GenesisDocProvider returns a GenesisDoc. @@ -256,8 +256,8 @@ type Node struct { prometheusSrv *http.Server } -func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { - var blockStoreDB dbm.DB +func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB database.Database, err error) { + var blockStoreDB database.Database blockStoreDB, err = dbProvider(&DBContext{"blockstore", config}) if err != nil { return @@ -310,7 +310,7 @@ func createAndStartIndexerService( } txIndexer = kv.NewTxIndex(store) - blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events"))) + blockIndexer = blockidxkv.New(prefixdb.New(store, []byte("block_events"))) default: txIndexer = &null.TxIndex{} blockIndexer = &blockidxnull.BlockerIndexer{} @@ -400,7 +400,7 @@ func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, } func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, - stateDB dbm.DB, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { + stateDB database.Database, blockStore *store.BlockStore, logger log.Logger) (*evidence.Reactor, *evidence.Pool, error) { evidenceDB, err := dbProvider(&DBContext{"evidence", config}) if err != nil { @@ -1361,7 +1361,7 @@ var ( // result to the database. On success this also returns the genesis doc loaded // through the given provider. func LoadStateFromDBOrGenesisDocProvider( - stateDB dbm.DB, + stateDB database.Database, genesisDocProvider GenesisDocProvider, ) (sm.State, *types.GenesisDoc, error) { // Get genesis doc @@ -1384,7 +1384,7 @@ func LoadStateFromDBOrGenesisDocProvider( } // panics if failed to unmarshal bytes -func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { +func loadGenesisDoc(db database.Database) (*types.GenesisDoc, error) { b, err := db.Get(genesisDocKey) if err != nil { panic(err) @@ -1401,7 +1401,7 @@ func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { } // panics if failed to marshal the given genesis document -func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { +func saveGenesisDoc(db database.Database, genDoc *types.GenesisDoc) { b, err := tmjson.Marshal(genDoc) if err != nil { panic(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) diff --git a/vm/db_test.go b/vm/db_test.go new file mode 100644 index 000000000..47d05d872 --- /dev/null +++ b/vm/db_test.go @@ -0,0 +1,208 @@ +package vm + +import ( + "bytes" + "fmt" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "math/rand" + "sync" + "testing" +) + +var ( + testDBPrefix = []byte("test") +) + +func TestMemDB(t *testing.T) { + vm, _, _ := mustNewKVTestVm(t) + baseDB := vm.dbManager.Current().Database + db := Database{prefixdb.NewNested(testDBPrefix, baseDB)} + t.Run("PrefixDB", func(t *testing.T) { Run(t, db) }) + t.Run("BaseDB(MemDB)", func(t *testing.T) { RunAvaDatabase(t, baseDB) }) +} + +// Run generates concurrent reads and writes to db so the race detector can +// verify concurrent operations are properly synchronized. +// The contents of db are garbage after Run returns. +func Run(t *testing.T, db database.Database) { + t.Helper() + + const numWorkers = 10 + const numKeys = 64 + + var wg sync.WaitGroup + for i := 0; i < numWorkers; i++ { + wg.Add(1) + i := i + go func() { + defer wg.Done() + + // Insert a bunch of keys with random data. + for k := 1; k <= numKeys; k++ { + key := taskKey(i, k) // say, "task--key-" + value := randomValue() + if err := db.Set(key, value); err != nil { + t.Errorf("Task %d: db.Set(%q=%q) failed: %v", + i, string(key), string(value), err) + } + } + + // Iterate over the database to make sure our keys are there. + it, err := db.Iterator(nil, nil) + if err != nil { + t.Errorf("Iterator[%d]: %v", i, err) + return + } + found := make(map[string][]byte) + mine := []byte(fmt.Sprintf("task-%d-", i)) + for { + if key := it.Key(); bytes.HasPrefix(key, mine) { + found[string(key)] = it.Value() + } + it.Next() + if !it.Valid() { + break + } + } + if err := it.Error(); err != nil { + t.Errorf("Iterator[%d] reported error: %v", i, err) + } + if err := it.Close(); err != nil { + t.Errorf("Close iterator[%d]: %v", i, err) + } + if len(found) != numKeys { + t.Errorf("Task %d: found %d keys, wanted %d", i, len(found), numKeys) + } + + for key, value := range mine { + fmt.Println("--") + fmt.Println(key) + fmt.Println(value) + fmt.Println("--") + } + + // Delete all the keys we inserted. + for k := 1; k <= numKeys; k++ { + key := taskKey(i, k) // say, "task--key-" + if err := db.Delete(key); err != nil { + t.Errorf("Delete %q: %v", key, err) + } + } + // Iterate over the database to make sure our keys are there. + it, err = db.Iterator(nil, nil) + if err != nil { + t.Errorf("Iterator[%d]: %v", i, err) + return + } + foundAfterRemoval := make(map[string][]byte) + for { + if key := it.Key(); bytes.HasPrefix(key, mine) { + foundAfterRemoval[string(key)] = it.Value() + } + it.Next() + if !it.Valid() { + break + } + } + if len(foundAfterRemoval) != 0 { + t.Errorf("Values left after deletion: %v", foundAfterRemoval) + return + } + }() + } + wg.Wait() +} + +// Run generates concurrent reads and writes to db so the race detector can +// verify concurrent operations are properly synchronized. +// The contents of db are garbage after Run returns. +func RunAvaDatabase(t *testing.T, db database.Database) { + t.Helper() + + const numWorkers = 10 + const numKeys = 64 + + var wg sync.WaitGroup + for i := 0; i < numWorkers; i++ { + wg.Add(1) + i := i + go func() { + defer wg.Done() + + // Insert a bunch of keys with random data. + for k := 1; k <= numKeys; k++ { + key := taskKey(i, k) // say, "task--key-" + value := randomValue() + if err := db.Put(key, value); err != nil { + t.Errorf("Task %d: db.Set(%q=%q) failed: %v", + i, string(key), string(value), err) + } + } + + // Iterate over the database to make sure our keys are there. + it := db.NewIterator() + found := make(map[string][]byte) + mine := []byte(fmt.Sprintf("task-%d-", i)) + for { + if key := it.Key(); bytes.HasPrefix(key, mine) { + found[string(key)] = it.Value() + } + it.Next() + if !it.Next() { + break + } + } + if err := it.Error(); err != nil { + t.Errorf("Iterator[%d] reported error: %v", i, err) + } + it.Release() + + if len(found) != numKeys { + t.Errorf("Task %d: found %d keys, wanted %d", i, len(found), numKeys) + } + + for key, value := range mine { + fmt.Println("--") + fmt.Println(key) + fmt.Println(value) + fmt.Println("--") + } + + // Delete all the keys we inserted. + for k := 1; k <= numKeys; k++ { + key := taskKey(i, k) // say, "task--key-" + if err := db.Delete(key); err != nil { + t.Errorf("Delete %q: %v", key, err) + } + } + // Iterate over the database to make sure our keys are there. + it = db.NewIterator() + foundAfterRemoval := make(map[string][]byte) + for { + if key := it.Key(); bytes.HasPrefix(key, mine) { + foundAfterRemoval[string(key)] = it.Value() + } + it.Next() + if !it.Next() { + break + } + } + if len(foundAfterRemoval) != 0 { + t.Errorf("Values left after deletion: %v", foundAfterRemoval) + return + } + }() + } + wg.Wait() +} + +func taskKey(i, k int) []byte { + return []byte(fmt.Sprintf("task-%d-key-%d", i, k)) +} + +func randomValue() []byte { + value := []byte("value-") + dec := rand.Uint32() + return []byte(fmt.Sprintf("%s%d", value, dec)) +} diff --git a/vm/service.go b/vm/service.go index 7caa1bcbd..5cac2102b 100644 --- a/vm/service.go +++ b/vm/service.go @@ -4,22 +4,28 @@ import ( "context" "errors" "fmt" - "net/http" + tmpubsub "github.com/consideritdone/landslidecore/libs/pubsub" + "github.com/consideritdone/landslidecore/proxy" + rpctypes "github.com/consideritdone/landslidecore/rpc/jsonrpc/types" + blockidxnull "github.com/consideritdone/landslidecore/state/indexer/block/null" + "github.com/consideritdone/landslidecore/state/txindex/null" "sort" "time" abci "github.com/consideritdone/landslidecore/abci/types" + "github.com/consideritdone/landslidecore/libs/bytes" tmbytes "github.com/consideritdone/landslidecore/libs/bytes" tmmath "github.com/consideritdone/landslidecore/libs/math" tmquery "github.com/consideritdone/landslidecore/libs/pubsub/query" mempl "github.com/consideritdone/landslidecore/mempool" "github.com/consideritdone/landslidecore/p2p" - "github.com/consideritdone/landslidecore/proxy" "github.com/consideritdone/landslidecore/rpc/core" ctypes "github.com/consideritdone/landslidecore/rpc/core/types" "github.com/consideritdone/landslidecore/types" ) +const SubscribeTimeout = 5 * time.Second + type ( LocalService struct { vm *VM @@ -27,196 +33,220 @@ type ( Service interface { ABCIService - HistoryService - NetworkService - SignService - StatusService - MempoolService - } - - ABCIQueryArgs struct { - Path string `json:"path"` - Data tmbytes.HexBytes `json:"data"` - } - - ABCIQueryOptions struct { - Height int64 `json:"height"` - Prove bool `json:"prove"` - } - - ABCIQueryWithOptionsArgs struct { - Path string `json:"path"` - Data tmbytes.HexBytes `json:"data"` - Opts ABCIQueryOptions `json:"opts"` - } - - BroadcastTxArgs struct { - Tx types.Tx `json:"tx"` + EventsService + HistoryClient + NetworkClient + SignClient + StatusClient + MempoolClient } ABCIService interface { // Reading from abci app - ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error - ABCIQuery(_ *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error - ABCIQueryWithOptions(_ *http.Request, args *ABCIQueryWithOptionsArgs, reply *ctypes.ResultABCIQuery) error + ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) + ABCIQuery(ctx *rpctypes.Context, path string, data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTxCommit) error - BroadcastTxAsync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error - BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error + BroadcastTxCommit(*rpctypes.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(*rpctypes.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(*rpctypes.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) } - BlockHeightArgs struct { - Height *int64 `json:"height"` + EventsService interface { + Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) + Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) + UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) } - BlockHashArgs struct { - Hash []byte `json:"hash"` + HistoryClient interface { + Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) + GenesisChunked(*rpctypes.Context, uint) (*ctypes.ResultGenesisChunk, error) + BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) } - CommitArgs struct { - Height *int64 `json:"height"` + MempoolClient interface { + UnconfirmedTxs(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) + CheckTx(*rpctypes.Context, types.Tx) (*ctypes.ResultCheckTx, error) } - ValidatorsArgs struct { - Height *int64 `json:"height"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` + NetworkClient interface { + NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) + DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) + ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) + ConsensusParams(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) + Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) } - TxArgs struct { - Hash []byte `json:"hash"` - Prove bool `json:"prove"` - } + SignClient interface { + Block(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) + BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) + BlockResults(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) + Commit(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) + Validators(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) + Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) - TxSearchArgs struct { - Query string `json:"query"` - Prove bool `json:"prove"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - OrderBy string `json:"orderBy"` - } + TxSearch(ctx *rpctypes.Context, query string, prove bool, + page, perPage *int, orderBy string) (*ctypes.ResultTxSearch, error) - BlockSearchArgs struct { - Query string `json:"query"` - Page *int `json:"page"` - PerPage *int `json:"perPage"` - OrderBy string `json:"orderBy"` + BlockSearch(ctx *rpctypes.Context, query string, + page, perPage *int, orderBy string) (*ctypes.ResultBlockSearch, error) } - SignService interface { - Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error - BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error - BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error - Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error - Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error - Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error - TxSearch(_ *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error - BlockSearch(_ *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error + StatusClient interface { + Status(*rpctypes.Context) (*ctypes.ResultStatus, error) } +) - BlockchainInfoArgs struct { - MinHeight int64 `json:"minHeight"` - MaxHeight int64 `json:"maxHeight"` - } +func (s *LocalService) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { + addr := ctx.RemoteAddr() - GenesisChunkedArgs struct { - Chunk uint `json:"chunk"` + if s.vm.eventBus.NumClients() >= s.vm.rpcConfig.MaxSubscriptionClients { + return nil, fmt.Errorf("max_subscription_clients %d reached", s.vm.rpcConfig.MaxSubscriptionClients) + } else if s.vm.eventBus.NumClientSubscriptions(addr) >= s.vm.rpcConfig.MaxSubscriptionsPerClient { + return nil, fmt.Errorf("max_subscriptions_per_client %d reached", s.vm.rpcConfig.MaxSubscriptionsPerClient) } - HistoryService interface { - BlockchainInfo(_ *http.Request, args *BlockchainInfoArgs, reply *ctypes.ResultBlockchainInfo) error - Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error - GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error - } + s.vm.tmLogger.Info("Subscribe to query", "remote", addr, "query", query) - StatusService interface { - Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error + q, err := tmquery.New(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query: %w", err) } - ConsensusParamsArgs struct { - Height *int64 `json:"height"` - } + subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) + defer cancel() - NetworkService interface { - NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error - DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error - ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error - ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error - Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error - } + sub, err := s.vm.eventBus.Subscribe(subCtx, addr, q, s.vm.rpcConfig.SubscriptionBufferSize) + if err != nil { + return nil, err + } + + closeIfSlow := s.vm.rpcConfig.CloseOnSlowClient + + // TODO: inspired by Ilnur: usage of ctx.JSONReq.ID may cause situation when user or server try to create multiple subscriptions with the same id. + // Solution: return error code with the error sescription when this situation happens + // Capture the current ID, since it can change in the future. + subscriptionID := ctx.JSONReq.ID + go func() { + for { + select { + case msg := <-sub.Out(): + var ( + resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} + resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) + ) + writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err = ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { + s.vm.tmLogger.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + + if closeIfSlow { + var ( + err = errors.New("subscription was cancelled (reason: slow client)") + resp = rpctypes.RPCServerError(subscriptionID, err) + ) + if !ctx.WSConn.TryWriteRPCResponse(resp) { + s.vm.tmLogger.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } + return + } + } + case <-sub.Cancelled(): + if sub.Err() != tmpubsub.ErrUnsubscribed { + var reason string + if sub.Err() == nil { + reason = "Tendermint exited" + } else { + reason = sub.Err().Error() + } + resp := rpctypes.RPCServerError(subscriptionID, err) + if !ctx.WSConn.TryWriteRPCResponse(resp) { + s.vm.tmLogger.Info("Can't write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", + fmt.Errorf("subscription was cancelled (reason: %s)", reason)) + } + } + return + } + } + }() - UnconfirmedTxsArgs struct { - Limit *int `json:"limit"` - } + return &ctypes.ResultSubscribe{}, nil +} - CheckTxArgs struct { - Tx []byte `json:"tx"` +func (s *LocalService) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() + s.vm.tmLogger.Info("Unsubscribe from query", "remote", addr, "query", query) + q, err := tmquery.New(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query: %w", err) } - - MempoolService interface { - UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error - NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error - CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error + err = s.vm.eventBus.Unsubscribe(context.Background(), addr, q) + if err != nil { + return nil, err } -) + return &ctypes.ResultUnsubscribe{}, nil +} -var ( - DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} -) +func (s *LocalService) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { + addr := ctx.RemoteAddr() + s.vm.tmLogger.Info("Unsubscribe from all", "remote", addr) + err := s.vm.eventBus.UnsubscribeAll(context.Background(), addr) + if err != nil { + return nil, err + } + return &ctypes.ResultUnsubscribe{}, nil +} func NewService(vm *VM) Service { return &LocalService{vm} } -func (s *LocalService) ABCIInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultABCIInfo) error { +func (s *LocalService) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := s.vm.proxyApp.Query().InfoSync(proxy.RequestInfo) - if err != nil { - return err + if err != nil || resInfo == nil { + return nil, err } - reply.Response = *resInfo - return nil -} - -func (s *LocalService) ABCIQuery(req *http.Request, args *ABCIQueryArgs, reply *ctypes.ResultABCIQuery) error { - return s.ABCIQueryWithOptions(req, &ABCIQueryWithOptionsArgs{args.Path, args.Data, DefaultABCIQueryOptions}, reply) + return &ctypes.ResultABCIInfo{Response: *resInfo}, nil } -func (s *LocalService) ABCIQueryWithOptions( - _ *http.Request, - args *ABCIQueryWithOptionsArgs, - reply *ctypes.ResultABCIQuery, -) error { +// TODO: attention! Different signatures in RPC interfaces +func (s *LocalService) ABCIQuery( + ctx *rpctypes.Context, + path string, + data bytes.HexBytes, + height int64, + prove bool, +) (*ctypes.ResultABCIQuery, error) { resQuery, err := s.vm.proxyApp.Query().QuerySync(abci.RequestQuery{ - Path: args.Path, - Data: args.Data, - Height: args.Opts.Height, - Prove: args.Opts.Prove, + Path: path, + Data: data, + Height: height, + Prove: prove, }) - if err != nil { - return err + if err != nil || resQuery == nil { + return nil, err } - reply.Response = *resQuery - return nil + + return &ctypes.ResultABCIQuery{Response: *resQuery}, nil } -func (s *LocalService) BroadcastTxCommit( - _ *http.Request, - args *BroadcastTxArgs, - reply *ctypes.ResultBroadcastTxCommit, -) error { +func (s *LocalService) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { subscriber := "" // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(context.Background(), core.SubscribeTimeout) + subCtx, cancel := context.WithTimeout(ctx.Context(), core.SubscribeTimeout) defer cancel() - q := types.EventQueryTxFor(args.Tx) + q := types.EventQueryTxFor(tx) deliverTxSub, err := s.vm.eventBus.Subscribe(subCtx, subscriber, q) if err != nil { err = fmt.Errorf("failed to subscribe to tx: %w", err) s.vm.tmLogger.Error("Error on broadcast_tx_commit", "err", err) - return err + return nil, err } defer func() { @@ -227,35 +257,33 @@ func (s *LocalService) BroadcastTxCommit( // Broadcast tx and wait for CheckTx result checkTxResCh := make(chan *abci.Response, 1) - err = s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { + err = s.vm.mempool.CheckTx(tx, func(res *abci.Response) { checkTxResCh <- res }, mempl.TxInfo{}) if err != nil { s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) - return fmt.Errorf("error on broadcastTxCommit: %v", err) + return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) } checkTxResMsg := <-checkTxResCh checkTxRes := checkTxResMsg.GetCheckTx() if checkTxRes.Code != abci.CodeTypeOK { - *reply = ctypes.ResultBroadcastTxCommit{ + return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: abci.ResponseDeliverTx{}, - Hash: args.Tx.Hash(), - } - return nil + Hash: tx.Hash(), + }, nil } // Wait for the tx to be included in a block or timeout. select { case msg := <-deliverTxSub.Out(): // The tx was included in a block. deliverTxRes := msg.Data().(types.EventDataTx) - *reply = ctypes.ResultBroadcastTxCommit{ + return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: deliverTxRes.Result, - Hash: args.Tx.Hash(), + Hash: tx.Hash(), Height: deliverTxRes.Height, - } - return nil + }, nil case <-deliverTxSub.Cancelled(): var reason string if deliverTxSub.Err() == nil { @@ -265,192 +293,193 @@ func (s *LocalService) BroadcastTxCommit( } err = fmt.Errorf("deliverTxSub was cancelled (reason: %s)", reason) s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) - return err + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, err // TODO: use config for timeout case <-time.After(10 * time.Second): err = errors.New("timed out waiting for tx to be included in a block") s.vm.tmLogger.Error("Error on broadcastTxCommit", "err", err) - return err + return &ctypes.ResultBroadcastTxCommit{ + CheckTx: *checkTxRes, + DeliverTx: abci.ResponseDeliverTx{}, + Hash: tx.Hash(), + }, err } } -func (s *LocalService) BroadcastTxAsync( - _ *http.Request, - args *BroadcastTxArgs, - reply *ctypes.ResultBroadcastTx, -) error { - err := s.vm.mempool.CheckTx(args.Tx, nil, mempl.TxInfo{}) +func (s *LocalService) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + err := s.vm.mempool.CheckTx(tx, nil, mempl.TxInfo{}) if err != nil { - return err + return nil, err } - reply.Hash = args.Tx.Hash() - return nil + return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil } -func (s *LocalService) BroadcastTxSync(_ *http.Request, args *BroadcastTxArgs, reply *ctypes.ResultBroadcastTx) error { +func (s *LocalService) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { resCh := make(chan *abci.Response, 1) - err := s.vm.mempool.CheckTx(args.Tx, func(res *abci.Response) { + err := s.vm.mempool.CheckTx(tx, func(res *abci.Response) { s.vm.tmLogger.With("module", "service").Debug("handled response from checkTx") resCh <- res }, mempl.TxInfo{}) if err != nil { - return err + return nil, err } res := <-resCh r := res.GetCheckTx() - - reply.Code = r.Code - reply.Data = r.Data - reply.Log = r.Log - reply.Codespace = r.Codespace - reply.Hash = args.Tx.Hash() - - return nil + return &ctypes.ResultBroadcastTx{ + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + Hash: tx.Hash(), + }, nil } -func (s *LocalService) Block(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlock) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } + block := s.vm.blockStore.LoadBlock(height) blockMeta := s.vm.blockStore.LoadBlockMeta(height) - - if blockMeta != nil { - reply.BlockID = blockMeta.BlockID + if blockMeta == nil { + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: block}, nil } - reply.Block = block - return nil + return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } -func (s *LocalService) BlockByHash(_ *http.Request, args *BlockHashArgs, reply *ctypes.ResultBlock) error { - block := s.vm.blockStore.LoadBlockByHash(args.Hash) +func (s *LocalService) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { + block := s.vm.blockStore.LoadBlockByHash(hash) if block == nil { - reply.BlockID = types.BlockID{} - reply.Block = nil - return nil + return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } + // If block is not nil, then blockMeta can't be nil. blockMeta := s.vm.blockStore.LoadBlockMeta(block.Height) - reply.BlockID = blockMeta.BlockID - reply.Block = block - return nil + return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } -func (s *LocalService) BlockResults(_ *http.Request, args *BlockHeightArgs, reply *ctypes.ResultBlockResults) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } results, err := s.vm.stateStore.LoadABCIResponses(height) if err != nil { - return err + return nil, err } - reply.Height = height - reply.TxsResults = results.DeliverTxs - reply.BeginBlockEvents = results.BeginBlock.Events - reply.EndBlockEvents = results.EndBlock.Events - reply.ValidatorUpdates = results.EndBlock.ValidatorUpdates - reply.ConsensusParamUpdates = results.EndBlock.ConsensusParamUpdates - return nil + return &ctypes.ResultBlockResults{ + Height: height, + TxsResults: results.DeliverTxs, + BeginBlockEvents: results.BeginBlock.Events, + EndBlockEvents: results.EndBlock.Events, + ValidatorUpdates: results.EndBlock.ValidatorUpdates, + ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates, + }, nil } -func (s *LocalService) Commit(_ *http.Request, args *CommitArgs, reply *ctypes.ResultCommit) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } blockMeta := s.vm.blockStore.LoadBlockMeta(height) if blockMeta == nil { - return nil + return nil, nil } - header := blockMeta.Header - commit := s.vm.blockStore.LoadBlockCommit(height) - res := ctypes.NewResultCommit(&header, commit, !(height == s.vm.blockStore.Height())) - reply.SignedHeader = res.SignedHeader - reply.CanonicalCommit = res.CanonicalCommit - return nil + // Return the canonical commit (comes from the block at height+1) + commit := s.vm.blockStore.LoadBlockCommit(height) + return ctypes.NewResultCommit(&header, commit, true), nil } -func (s *LocalService) Validators(_ *http.Request, args *ValidatorsArgs, reply *ctypes.ResultValidators) error { - height, err := getHeight(s.vm.blockStore, args.Height) +func (s *LocalService) Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + height, err := getHeight(s.vm.blockStore, heightPtr) if err != nil { - return err + return nil, err } validators, err := s.vm.stateStore.LoadValidators(height) if err != nil { - return err + return nil, err } totalCount := len(validators.Validators) - perPage := validatePerPage(args.PerPage) - page, err := validatePage(args.Page, perPage, totalCount) + perPage := validatePerPage(perPagePtr) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { - return err + return nil, err } skipCount := validateSkipCount(page, perPage) - reply.BlockHeight = height - reply.Validators = validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - reply.Count = len(reply.Validators) - reply.Total = totalCount - return nil + v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + + return &ctypes.ResultValidators{ + BlockHeight: height, + Validators: v, + Count: len(v), + Total: totalCount}, nil } -func (s *LocalService) Tx(_ *http.Request, args *TxArgs, reply *ctypes.ResultTx) error { - r, err := s.vm.txIndexer.Get(args.Hash) +func (s *LocalService) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + if _, ok := s.vm.txIndexer.(*null.TxIndex); ok { + return nil, fmt.Errorf("transaction indexing is disabled") + } + + r, err := s.vm.txIndexer.Get(hash) if err != nil { - return err + return nil, err } if r == nil { - return fmt.Errorf("tx (%X) not found", args.Hash) + return nil, fmt.Errorf("tx (%X) not found", hash) } height := r.Height index := r.Index var proof types.TxProof - if args.Prove { + if prove { block := s.vm.blockStore.LoadBlock(height) proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } - reply.Hash = args.Hash - reply.Height = height - reply.Index = index - reply.TxResult = r.Result - reply.Tx = r.Tx - reply.Proof = proof - return nil + return &ctypes.ResultTx{ + Hash: hash, + Height: height, + Index: index, + TxResult: r.Result, + Tx: r.Tx, + Proof: proof, + }, nil } -func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ctypes.ResultTxSearch) error { - q, err := tmquery.New(args.Query) - if err != nil { - return err +func (s *LocalService) TxSearch(ctx *rpctypes.Context, query string, prove bool, pagePtr, perPagePtr *int, + orderBy string) (*ctypes.ResultTxSearch, error) { + // if index is disabled, return error + if _, ok := s.vm.txIndexer.(*null.TxIndex); ok { + return nil, errors.New("transaction indexing is disabled") } - - var ctx context.Context - if req != nil { - ctx = req.Context() - } else { - ctx = context.Background() + q, err := tmquery.New(query) + if err != nil { + return nil, err } - results, err := s.vm.txIndexer.Search(ctx, q) + results, err := s.vm.txIndexer.Search(ctx.Context(), q) if err != nil { - return err + return nil, err } // sort results (must be done before pagination) - switch args.OrderBy { + switch orderBy { case "desc": sort.Slice(results, func(i, j int) bool { if results[i].Height == results[j].Height { @@ -466,16 +495,16 @@ func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ct return results[i].Height < results[j].Height }) default: - return errors.New("expected order_by to be either `asc` or `desc` or empty") + return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } // paginate results totalCount := len(results) - perPage := validatePerPage(args.PerPage) + perPage := validatePerPage(perPagePtr) - page, err := validatePage(args.Page, perPage, totalCount) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { - return err + return nil, err } skipCount := validateSkipCount(page, perPage) @@ -486,7 +515,7 @@ func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ct r := results[i] var proof types.TxProof - if args.Prove { + if prove { block := s.vm.blockStore.LoadBlock(r.Height) proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } @@ -501,31 +530,35 @@ func (s *LocalService) TxSearch(req *http.Request, args *TxSearchArgs, reply *ct }) } - reply.Txs = apiResults - reply.TotalCount = totalCount - return nil + return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } -func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, reply *ctypes.ResultBlockSearch) error { - q, err := tmquery.New(args.Query) - if err != nil { - return err +// BlockSearch searches for a paginated set of blocks matching BeginBlock and +// EndBlock event search criteria. +func (s *LocalService) BlockSearch( + ctx *rpctypes.Context, + query string, + pagePtr, perPagePtr *int, + orderBy string, +) (*ctypes.ResultBlockSearch, error) { + + // skip if block indexing is disabled + if _, ok := s.vm.blockIndexer.(*blockidxnull.BlockerIndexer); ok { + return nil, errors.New("block indexing is disabled") } - var ctx context.Context - if req != nil { - ctx = req.Context() - } else { - ctx = context.Background() + q, err := tmquery.New(query) + if err != nil { + return nil, err } - results, err := s.vm.blockIndexer.Search(ctx, q) + results, err := s.vm.blockIndexer.Search(ctx.Context(), q) if err != nil { - return err + return nil, err } // sort results (must be done before pagination) - switch args.OrderBy { + switch orderBy { case "desc", "": sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) @@ -533,16 +566,16 @@ func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, rep sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return errors.New("expected order_by to be either `asc` or `desc` or empty") + return nil, errors.New("expected order_by to be either `asc` or `desc` or empty") } // paginate results totalCount := len(results) - perPage := validatePerPage(args.PerPage) + perPage := validatePerPage(perPagePtr) - page, err := validatePage(args.Page, perPage, totalCount) + page, err := validatePage(pagePtr, perPage, totalCount) if err != nil { - return err + return nil, err } skipCount := validateSkipCount(page, perPage) @@ -562,72 +595,66 @@ func (s *LocalService) BlockSearch(req *http.Request, args *BlockSearchArgs, rep } } - reply.Blocks = apiResults - reply.TotalCount = totalCount - return nil + return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } -func (s *LocalService) BlockchainInfo( - _ *http.Request, - args *BlockchainInfoArgs, - reply *ctypes.ResultBlockchainInfo, -) error { +func (s *LocalService) BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { // maximum 20 block metas const limit int64 = 20 var err error - args.MinHeight, args.MaxHeight, err = filterMinMax( + minHeight, maxHeight, err = filterMinMax( s.vm.blockStore.Base(), s.vm.blockStore.Height(), - args.MinHeight, - args.MaxHeight, + minHeight, + maxHeight, limit) if err != nil { - return err + return nil, err } - s.vm.tmLogger.Debug("BlockchainInfoHandler", "maxHeight", args.MaxHeight, "minHeight", args.MinHeight) + s.vm.tmLogger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) var blockMetas []*types.BlockMeta - for height := args.MaxHeight; height >= args.MinHeight; height-- { + for height := maxHeight; height >= minHeight; height-- { blockMeta := s.vm.blockStore.LoadBlockMeta(height) blockMetas = append(blockMetas, blockMeta) } - reply.LastHeight = s.vm.blockStore.Height() - reply.BlockMetas = blockMetas - return nil + return &ctypes.ResultBlockchainInfo{ + LastHeight: s.vm.blockStore.Height(), + BlockMetas: blockMetas}, nil } -func (s *LocalService) Genesis(_ *http.Request, _ *struct{}, reply *ctypes.ResultGenesis) error { +func (s *LocalService) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { if len(s.vm.genChunks) > 1 { - return errors.New("genesis response is large, please use the genesis_chunked API instead") + return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } - reply.Genesis = s.vm.genesis - return nil + return &ctypes.ResultGenesis{Genesis: s.vm.genesis}, nil } -func (s *LocalService) GenesisChunked(_ *http.Request, args *GenesisChunkedArgs, reply *ctypes.ResultGenesisChunk) error { +func (s *LocalService) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { if s.vm.genChunks == nil { - return fmt.Errorf("service configuration error, genesis chunks are not initialized") + return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } if len(s.vm.genChunks) == 0 { - return fmt.Errorf("service configuration error, there are no chunks") + return nil, fmt.Errorf("service configuration error, there are no chunks") } - id := int(args.Chunk) + id := int(chunk) if id > len(s.vm.genChunks)-1 { - return fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) + return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(s.vm.genChunks)-1, id) } - reply.TotalChunks = len(s.vm.genChunks) - reply.ChunkNumber = id - reply.Data = s.vm.genChunks[id] - return nil + return &ctypes.ResultGenesisChunk{ + TotalChunks: len(s.vm.genChunks), + ChunkNumber: id, + Data: s.vm.genChunks[id], + }, nil } -func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.ResultStatus) error { +func (s *LocalService) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -658,70 +685,79 @@ func (s *LocalService) Status(_ *http.Request, _ *struct{}, reply *ctypes.Result } } - reply.NodeInfo = p2p.DefaultNodeInfo{ - DefaultNodeID: p2p.ID(s.vm.ctx.NodeID.String()), - Network: fmt.Sprintf("%d", s.vm.ctx.NetworkID), - } - reply.SyncInfo = ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: time.Unix(0, latestBlockTimeNano), - EarliestBlockHash: earliestBlockHash, - EarliestAppHash: earliestAppHash, - EarliestBlockHeight: earliestBlockHeight, - EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - } - return nil + result := &ctypes.ResultStatus{ + NodeInfo: p2p.DefaultNodeInfo{ + DefaultNodeID: p2p.ID(s.vm.ctx.NodeID.String()), + Network: fmt.Sprintf("%d", s.vm.ctx.NetworkID), + }, + SyncInfo: ctypes.SyncInfo{ + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: time.Unix(0, latestBlockTimeNano), + EarliestBlockHash: earliestBlockHash, + EarliestAppHash: earliestAppHash, + EarliestBlockHeight: earliestBlockHeight, + EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + CatchingUp: false, + }, + ValidatorInfo: ctypes.ValidatorInfo{ + Address: proposerPubKey.Address(), + PubKey: proposerPubKey, + VotingPower: 0, + }, + } + + return result, nil } -// ToDo: no peers, because it's vm -func (s *LocalService) NetInfo(_ *http.Request, _ *struct{}, reply *ctypes.ResultNetInfo) error { - return nil +// ToDo: no peers, no network from tendermint side +func (s *LocalService) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { + return &ctypes.ResultNetInfo{}, nil } // ToDo: we doesn't have consensusState -func (s *LocalService) DumpConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultDumpConsensusState) error { - return nil +func (s *LocalService) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { + return &ctypes.ResultDumpConsensusState{}, nil } // ToDo: we doesn't have consensusState -func (s *LocalService) ConsensusState(_ *http.Request, _ *struct{}, reply *ctypes.ResultConsensusState) error { - return nil +func (s *LocalService) ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { + return &ctypes.ResultConsensusState{}, nil } -func (s *LocalService) ConsensusParams(_ *http.Request, args *ConsensusParamsArgs, reply *ctypes.ResultConsensusParams) error { - reply.BlockHeight = s.vm.blockStore.Height() - reply.ConsensusParams = *s.vm.genesis.ConsensusParams - return nil +func (s *LocalService) ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + return &ctypes.ResultConsensusParams{ + BlockHeight: s.vm.blockStore.Height(), + ConsensusParams: *s.vm.genesis.ConsensusParams, + }, nil } -func (s *LocalService) Health(_ *http.Request, _ *struct{}, reply *ctypes.ResultHealth) error { - *reply = ctypes.ResultHealth{} - return nil +func (s *LocalService) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { + return &ctypes.ResultHealth{}, nil } -func (s *LocalService) UnconfirmedTxs(_ *http.Request, args *UnconfirmedTxsArgs, reply *ctypes.ResultUnconfirmedTxs) error { - limit := validatePerPage(args.Limit) +func (s *LocalService) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { + limit := validatePerPage(limitPtr) txs := s.vm.mempool.ReapMaxTxs(limit) - reply.Count = len(txs) - reply.Total = s.vm.mempool.Size() - reply.Txs = txs - return nil + return &ctypes.ResultUnconfirmedTxs{ + Count: len(txs), + Total: s.vm.mempool.Size(), + Txs: txs, + }, nil } -func (s *LocalService) NumUnconfirmedTxs(_ *http.Request, _ *struct{}, reply *ctypes.ResultUnconfirmedTxs) error { - reply.Count = s.vm.mempool.Size() - reply.Total = s.vm.mempool.Size() - reply.TotalBytes = s.vm.mempool.TxsBytes() - return nil +func (s *LocalService) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { + return &ctypes.ResultUnconfirmedTxs{ + Count: s.vm.mempool.Size(), + Total: s.vm.mempool.Size(), + TotalBytes: s.vm.mempool.TxsBytes()}, nil } -func (s *LocalService) CheckTx(_ *http.Request, args *CheckTxArgs, reply *ctypes.ResultCheckTx) error { - res, err := s.vm.proxyApp.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: args.Tx}) +func (s *LocalService) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { + res, err := s.vm.proxyApp.Mempool().CheckTxSync(abci.RequestCheckTx{Tx: tx}) if err != nil { - return err + return nil, err } - reply.ResponseCheckTx = *res - return nil + return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil } diff --git a/vm/service_test.go b/vm/service_test.go index 1eb747ef4..f5f4ff84a 100644 --- a/vm/service_test.go +++ b/vm/service_test.go @@ -2,11 +2,16 @@ package vm import ( "context" + "encoding/base64" + "fmt" + tmjson "github.com/consideritdone/landslidecore/libs/json" + rpctypes "github.com/consideritdone/landslidecore/rpc/jsonrpc/types" + "github.com/consideritdone/landslidecore/types" + "strings" "testing" "time" atypes "github.com/consideritdone/landslidecore/abci/types" - ctypes "github.com/consideritdone/landslidecore/rpc/core/types" "github.com/davecgh/go-spew/spew" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,8 +21,8 @@ func TestABCIService(t *testing.T) { vm, service, _ := mustNewKVTestVm(t) t.Run("ABCIInfo", func(t *testing.T) { - reply := new(ctypes.ResultABCIInfo) - assert.NoError(t, service.ABCIInfo(nil, nil, reply)) + reply, err := service.ABCIInfo(&rpctypes.Context{}) + require.NoError(t, err) assert.Equal(t, uint64(1), reply.Response.AppVersion) assert.Equal(t, int64(0), reply.Response.LastBlockHeight) assert.Equal(t, []uint8([]byte(nil)), reply.Response.LastBlockAppHash) @@ -27,8 +32,8 @@ func TestABCIService(t *testing.T) { t.Run("ABCIQuery", func(t *testing.T) { k, v, tx := MakeTxKV() - replyBroadcast := new(ctypes.ResultBroadcastTx) - require.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{tx}, replyBroadcast)) + _, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) + require.NoError(t, err) blk, err := vm.BuildBlock(context.Background()) require.NoError(t, err) @@ -37,10 +42,9 @@ func TestABCIService(t *testing.T) { err = blk.Accept(context.Background()) require.NoError(t, err) - res := new(ctypes.ResultABCIQuery) - err = service.ABCIQuery(nil, &ABCIQueryArgs{Path: "/key", Data: k}, res) - if assert.Nil(t, err) && assert.True(t, res.Response.IsOK()) { - assert.EqualValues(t, v, res.Response.Value) + reply, err := service.ABCIQuery(&rpctypes.Context{}, "/key", k, 0, false) + if assert.Nil(t, err) && assert.True(t, reply.Response.IsOK()) { + assert.EqualValues(t, v, reply.Response.Value) } spew.Dump(vm.mempool.Size()) }) @@ -68,8 +72,8 @@ func TestABCIService(t *testing.T) { }(ctx) _, _, tx := MakeTxKV() - reply := new(ctypes.ResultBroadcastTxCommit) - assert.NoError(t, service.BroadcastTxCommit(nil, &BroadcastTxArgs{tx}, reply)) + reply, err := service.BroadcastTxCommit(&rpctypes.Context{}, tx) + assert.NoError(t, err) assert.True(t, reply.CheckTx.IsOK()) assert.True(t, reply.DeliverTx.IsOK()) assert.Equal(t, 0, vm.mempool.Size()) @@ -81,8 +85,8 @@ func TestABCIService(t *testing.T) { initMempoolSize := vm.mempool.Size() _, _, tx := MakeTxKV() - reply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxAsync(nil, &BroadcastTxArgs{tx}, reply)) + reply, err := service.BroadcastTxAsync(&rpctypes.Context{}, tx) + assert.NoError(t, err) assert.NotNil(t, reply.Hash) assert.Equal(t, initMempoolSize+1, vm.mempool.Size()) assert.EqualValues(t, tx, vm.mempool.ReapMaxTxs(-1)[0]) @@ -94,19 +98,80 @@ func TestABCIService(t *testing.T) { initMempoolSize := vm.mempool.Size() _, _, tx := MakeTxKV() - reply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: tx}, reply)) + reply, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) + assert.NoError(t, err) assert.Equal(t, reply.Code, atypes.CodeTypeOK) assert.Equal(t, initMempoolSize+1, vm.mempool.Size()) assert.EqualValues(t, tx, vm.mempool.ReapMaxTxs(-1)[0]) }) } +func TestEventService(t *testing.T) { + _, service, _ := mustNewCounterTestVm(t) + + // subscribe to new blocks and make sure height increments by 1 + t.Run("Subscribe", func(t *testing.T) { + events := []string{ + types.QueryForEvent(types.EventNewBlock).String(), + types.QueryForEvent(types.EventNewBlockHeader).String(), + types.QueryForEvent(types.EventValidBlock).String(), + } + + for i, event := range events { + _, err := service.Subscribe(&rpctypes.Context{JSONReq: &rpctypes.RPCRequest{ID: rpctypes.JSONRPCIntID(i)}}, event) + require.NoError(t, err) + } + t.Cleanup(func() { + if _, err := service.UnsubscribeAll(&rpctypes.Context{}); err != nil { + t.Error(err) + } + }) + }) + + t.Run("Unsubscribe", func(t *testing.T) { + events := []string{ + types.QueryForEvent(types.EventNewBlock).String(), + types.QueryForEvent(types.EventNewBlockHeader).String(), + types.QueryForEvent(types.EventValidBlock).String(), + } + + for i, event := range events { + _, err := service.Subscribe(&rpctypes.Context{JSONReq: &rpctypes.RPCRequest{ID: rpctypes.JSONRPCIntID(i)}}, event) + require.NoError(t, err) + _, err = service.Unsubscribe(&rpctypes.Context{}, event) + require.NoError(t, err) + } + //TODO: investigate the need to use Cleanup with UnsubscribeAll + //t.Cleanup(func() { + // if _, err := service.UnsubscribeAll(&rpctypes.Context{}); err != nil { + // t.Error(err) + // } + //}) + }) + + t.Run("UnsubscribeAll", func(t *testing.T) { + events := []string{ + types.QueryForEvent(types.EventNewBlock).String(), + types.QueryForEvent(types.EventNewBlockHeader).String(), + types.QueryForEvent(types.EventValidBlock).String(), + } + + for i, event := range events { + _, err := service.Subscribe(&rpctypes.Context{JSONReq: &rpctypes.RPCRequest{ID: rpctypes.JSONRPCIntID(i)}}, event) + require.NoError(t, err) + } + _, err := service.UnsubscribeAll(&rpctypes.Context{}) + if err != nil { + t.Error(err) + } + }) +} + func TestHistoryService(t *testing.T) { vm, service, _ := mustNewCounterTestVm(t) - txReply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: []byte{0x00}}, txReply)) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x00}) + assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) blk, err := vm.BuildBlock(context.Background()) @@ -115,43 +180,64 @@ func TestHistoryService(t *testing.T) { assert.NoError(t, blk.Accept(context.Background())) t.Run("BlockchainInfo", func(t *testing.T) { - reply := new(ctypes.ResultBlockchainInfo) - assert.NoError(t, service.BlockchainInfo(nil, &BlockchainInfoArgs{1, 100}, reply)) + reply, err := service.BlockchainInfo(&rpctypes.Context{}, 1, 100) + assert.NoError(t, err) assert.Equal(t, int64(1), reply.LastHeight) }) t.Run("Genesis", func(t *testing.T) { - reply := new(ctypes.ResultGenesis) - assert.NoError(t, service.Genesis(nil, nil, reply)) + reply, err := service.Genesis(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, vm.genesis, reply.Genesis) }) + + t.Run("GenesisChunked", func(t *testing.T) { + + first, err := service.GenesisChunked(&rpctypes.Context{}, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := service.GenesisChunked(&rpctypes.Context{}, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) + + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, tmjson.Unmarshal(doc, &out), + "first: %+v, doc: %s", first, string(doc)) + }) } func TestNetworkService(t *testing.T) { vm, service, _ := mustNewCounterTestVm(t) t.Run("NetInfo", func(t *testing.T) { - reply := new(ctypes.ResultNetInfo) - assert.NoError(t, service.NetInfo(nil, nil, reply)) + _, err := service.NetInfo(&rpctypes.Context{}) + assert.NoError(t, err) }) t.Run("DumpConsensusState", func(t *testing.T) { - reply := new(ctypes.ResultDumpConsensusState) - assert.NoError(t, service.DumpConsensusState(nil, nil, reply)) + _, err := service.DumpConsensusState(&rpctypes.Context{}) + assert.NoError(t, err) }) t.Run("ConsensusState", func(t *testing.T) { - reply := new(ctypes.ResultConsensusState) - assert.NoError(t, service.ConsensusState(nil, nil, reply)) + _, err := service.ConsensusState(&rpctypes.Context{}) + assert.NoError(t, err) }) t.Run("ConsensusParams", func(t *testing.T) { - reply := new(ctypes.ResultConsensusParams) - assert.NoError(t, service.ConsensusParams(nil, nil, reply)) + reply, err := service.ConsensusParams(&rpctypes.Context{}, nil) + assert.NoError(t, err) assert.Equal(t, int64(0), reply.BlockHeight) - txReply := new(ctypes.ResultBroadcastTx) - assert.NoError(t, service.BroadcastTxSync(nil, &BroadcastTxArgs{Tx: []byte{0x00}}, txReply)) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x00}) + assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) blk, err := vm.BuildBlock(context.Background()) @@ -159,27 +245,28 @@ func TestNetworkService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - assert.NoError(t, service.ConsensusParams(nil, nil, reply)) - assert.Equal(t, int64(1), reply.BlockHeight) + reply2, err := service.ConsensusParams(&rpctypes.Context{}, nil) + assert.NoError(t, err) + assert.Equal(t, int64(1), reply2.BlockHeight) }) t.Run("Health", func(t *testing.T) { - reply := new(ctypes.ResultHealth) - assert.NoError(t, service.Health(nil, nil, reply)) + _, err := service.Health(&rpctypes.Context{}) + assert.NoError(t, err) }) } func TestSignService(t *testing.T) { _, _, tx := MakeTxKV() + tx2 := []byte{0x02} + tx3 := []byte{0x03} vm, service, _ := mustNewKVTestVm(t) blk0, err := vm.BuildBlock(context.Background()) assert.ErrorIs(t, err, errNoPendingTxs, "expecting error no txs") assert.Nil(t, blk0) - txArg := &BroadcastTxArgs{tx} - txReply := new(ctypes.ResultBroadcastTx) - err = service.BroadcastTxSync(nil, txArg, txReply) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) @@ -191,40 +278,39 @@ func TestSignService(t *testing.T) { height1 := int64(blk1.Height()) t.Run("Block", func(t *testing.T) { - replyWithoutHeight := new(ctypes.ResultBlock) - assert.NoError(t, service.Block(nil, &BlockHeightArgs{&height1}, replyWithoutHeight)) + replyWithoutHeight, err := service.Block(&rpctypes.Context{}, &height1) + assert.NoError(t, err) if assert.NotNil(t, replyWithoutHeight.Block) { assert.EqualValues(t, height1, replyWithoutHeight.Block.Height) } - reply := new(ctypes.ResultBlock) - assert.NoError(t, service.Block(nil, &BlockHeightArgs{Height: &height1}, reply)) + reply, err := service.Block(&rpctypes.Context{}, &height1) + assert.NoError(t, err) if assert.NotNil(t, reply.Block) { assert.EqualValues(t, height1, reply.Block.Height) } }) t.Run("BlockByHash", func(t *testing.T) { - replyWithoutHash := new(ctypes.ResultBlock) - assert.NoError(t, service.BlockByHash(nil, &BlockHashArgs{}, replyWithoutHash)) + replyWithoutHash, err := service.BlockByHash(&rpctypes.Context{}, []byte{}) + assert.NoError(t, err) assert.Nil(t, replyWithoutHash.Block) - reply := new(ctypes.ResultBlock) hash := blk1.ID() - - assert.NoError(t, service.BlockByHash(nil, &BlockHashArgs{Hash: hash[:]}, reply)) + reply, err := service.BlockByHash(&rpctypes.Context{}, hash[:]) + assert.NoError(t, err) if assert.NotNil(t, reply.Block) { assert.EqualValues(t, hash[:], reply.Block.Hash().Bytes()) } }) t.Run("BlockResults", func(t *testing.T) { - replyWithoutHeight := new(ctypes.ResultBlockResults) - assert.NoError(t, service.BlockResults(nil, &BlockHeightArgs{}, replyWithoutHeight)) + replyWithoutHeight, err := service.BlockResults(&rpctypes.Context{}, nil) + assert.NoError(t, err) assert.Equal(t, height1, replyWithoutHeight.Height) - reply := new(ctypes.ResultBlockResults) - assert.NoError(t, service.BlockResults(nil, &BlockHeightArgs{Height: &height1}, reply)) + reply, err := service.BlockResults(&rpctypes.Context{}, &height1) + assert.NoError(t, err) if assert.NotNil(t, reply.TxsResults) { assert.Equal(t, height1, reply.Height) } @@ -233,23 +319,158 @@ func TestSignService(t *testing.T) { t.Run("Tx", func(t *testing.T) { time.Sleep(2 * time.Second) - reply := new(ctypes.ResultTx) - assert.NoError(t, service.Tx(nil, &TxArgs{Hash: txReply.Hash.Bytes()}, reply)) + reply, err := service.Tx(&rpctypes.Context{}, txReply.Hash.Bytes(), false) + assert.NoError(t, err) assert.EqualValues(t, txReply.Hash, reply.Hash) assert.EqualValues(t, tx, reply.Tx) }) - //t.Run("TxSearch", func(t *testing.T) { - // reply := new(ctypes.ResultTxSearch) - // assert.NoError(t, service.TxSearch(nil, &TxSearchArgs{Query: "tx.height>0"}, reply)) - // assert.True(t, len(reply.Txs) > 0) - //}) - - //t.Run("BlockSearch", func(t *testing.T) { - // reply := new(ctypes.ResultBlockSearch) - // assert.NoError(t, service.BlockSearch(nil, &BlockSearchArgs{Query: "block.height>0"}, reply)) - // assert.True(t, len(reply.Blocks) > 0) - //}) + t.Run("TxSearch", func(t *testing.T) { + txReply2, err := service.BroadcastTxAsync(&rpctypes.Context{}, tx2) + blk2, err := vm.BuildBlock(context.Background()) + require.NoError(t, err) + assert.NotNil(t, blk2) + assert.NoError(t, blk2.Accept(context.Background())) + assert.Equal(t, atypes.CodeTypeOK, txReply2.Code) + //TODO: why it is not able to find tx? + reply, err := service.TxSearch(&rpctypes.Context{}, fmt.Sprintf("tx.hash='%s'", txReply2.Hash.String()), false, nil, nil, "desc") + assert.NoError(t, err) + assert.True(t, len(reply.Txs) > 0) + // Search by height + reply2, err := service.TxSearch(&rpctypes.Context{}, fmt.Sprintf("tx.height=%d", blk2.Height()), false, nil, nil, "desc") + assert.NoError(t, err) + assert.True(t, len(reply2.Txs) > 0) + }) + + //TODO: Check logic of test + t.Run("Commit", func(t *testing.T) { + txReply, err := service.BroadcastTxAsync(&rpctypes.Context{}, tx3) + require.NoError(t, err) + assert.Equal(t, atypes.CodeTypeOK, txReply.Code) + + assert, require := assert.New(t), require.New(t) + + // get an offset of height to avoid racing and guessing + s, err := service.Status(&rpctypes.Context{}) + require.NoError(err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = service.Block(&rpctypes.Context{}, &h) + require.Error(err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := service.BroadcastTxCommit(&rpctypes.Context{}, tx) + require.NoError(err) + require.True(bres.DeliverTx.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + err = WaitForHeight(service, apph, nil) + require.NoError(err) + + qres, err := service.ABCIQuery(&rpctypes.Context{}, "/key", k, 0, false) + require.NoError(err) + if assert.True(qres.Response.IsOK()) { + assert.Equal(k, qres.Response.Key) + assert.EqualValues(v, qres.Response.Value) + } + + // make sure we can lookup the tx with proof + ptx, err := service.Tx(&rpctypes.Context{}, bres.Hash, true) + require.NoError(err) + assert.EqualValues(txh, ptx.Height) + assert.EqualValues(tx, ptx.Tx) + + // and we can even check the block is added + block, err := service.Block(&rpctypes.Context{}, &apph) + require.NoError(err) + appHash := block.Block.Header.AppHash + assert.True(len(appHash) > 0) + assert.EqualValues(apph, block.Block.Header.Height) + + blockByHash, err := service.BlockByHash(&rpctypes.Context{}, block.BlockID.Hash) + require.NoError(err) + require.Equal(block, blockByHash) + + // now check the results + blockResults, err := service.BlockResults(&rpctypes.Context{}, &txh) + require.Nil(err, "%+v", err) + assert.Equal(txh, blockResults.Height) + if assert.Equal(1, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(0, blockResults.TxsResults[0].Code) + } + + // check blockchain info, now that we know there is info + info, err := service.BlockchainInfo(&rpctypes.Context{}, apph, apph) + require.NoError(err) + assert.True(info.LastHeight >= apph) + if assert.Equal(1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(block.BlockID, lastMeta.BlockID) + } + + // and get the corresponding commit with the same apphash + commit, err := service.Commit(&rpctypes.Context{}, &apph) + require.NoError(err) + cappHash := commit.Header.AppHash + assert.Equal(appHash, cappHash) + assert.NotNil(commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := service.Commit(&rpctypes.Context{}, &h) + require.NoError(err) + assert.Equal(block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + pres, err := service.ABCIQuery(&rpctypes.Context{}, "/key", k, 0, true) + require.NoError(err) + assert.True(pres.Response.IsOK()) + + // XXX Test proof + }) + + //TODO: COMMIT + //TODO: VALIDATORS + + t.Run("Validators", func(t *testing.T) { + + // make sure this is the right genesis file + gen, err := service.Genesis(&rpctypes.Context{}) + require.Nil(t, err, "%+v", err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + h := int64(1) + vals, err := service.Validators(&rpctypes.Context{}, &h, nil, nil) + require.Nil(t, err, "%d: %+v", err) + require.Equal(t, 1, len(vals.Validators)) + require.Equal(t, 1, vals.Count) + require.Equal(t, 1, vals.Total) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + }) + + t.Run("BlockSearch", func(t *testing.T) { + //TODO: CREATE BLOCK? + reply, err := service.BlockSearch(&rpctypes.Context{}, "block.height>0", nil, nil, "desc") + assert.NoError(t, err) + assert.True(t, len(reply.Blocks) > 0) + }) } func TestStatusService(t *testing.T) { @@ -259,17 +480,13 @@ func TestStatusService(t *testing.T) { assert.ErrorIs(t, err, errNoPendingTxs, "expecting error no txs") assert.Nil(t, blk0) - txArg := &BroadcastTxArgs{ - Tx: []byte{0x01}, - } - txReply := &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, txArg, txReply) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x01}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) t.Run("Status", func(t *testing.T) { - reply1 := new(ctypes.ResultStatus) - assert.NoError(t, service.Status(nil, nil, reply1)) + reply1, err := service.Status(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, int64(0), reply1.SyncInfo.LatestBlockHeight) blk, err := vm.BuildBlock(context.Background()) @@ -277,8 +494,8 @@ func TestStatusService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - reply2 := new(ctypes.ResultStatus) - assert.NoError(t, service.Status(nil, nil, reply2)) + reply2, err := service.Status(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, int64(1), reply2.SyncInfo.LatestBlockHeight) }) } @@ -290,32 +507,31 @@ func TestMempoolService(t *testing.T) { assert.ErrorIs(t, err, errNoPendingTxs, "expecting error no txs") assert.Nil(t, blk0) - txArg := &BroadcastTxArgs{ - Tx: []byte{0x01}, - } - txReply := &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, txArg, txReply) + tx := []byte{0x01} + expectedTx := types.Tx(tx) + txReply, err := service.BroadcastTxSync(&rpctypes.Context{}, []byte{0x01}) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, txReply.Code) t.Run("UnconfirmedTxs", func(t *testing.T) { limit := 100 - reply := new(ctypes.ResultUnconfirmedTxs) - assert.NoError(t, service.UnconfirmedTxs(nil, &UnconfirmedTxsArgs{Limit: &limit}, reply)) + reply, err := service.UnconfirmedTxs(&rpctypes.Context{}, &limit) + assert.NoError(t, err) assert.True(t, len(reply.Txs) == 1) - assert.Equal(t, reply.Txs[0], txArg.Tx) + assert.Equal(t, expectedTx, reply.Txs[0]) }) t.Run("NumUnconfirmedTxs", func(t *testing.T) { - reply := new(ctypes.ResultUnconfirmedTxs) - assert.NoError(t, service.NumUnconfirmedTxs(nil, nil, reply)) + reply, err := service.NumUnconfirmedTxs(&rpctypes.Context{}) + assert.NoError(t, err) assert.Equal(t, reply.Count, 1) assert.Equal(t, reply.Total, 1) }) t.Run("CheckTx", func(t *testing.T) { - reply1 := new(ctypes.ResultCheckTx) - assert.NoError(t, service.CheckTx(nil, &CheckTxArgs{Tx: txArg.Tx}, reply1)) + reply1, err := service.CheckTx(&rpctypes.Context{}, tx) + assert.NoError(t, err) + t.Logf("%v\n", reply1) // ToDo: check reply1 blk, err := vm.BuildBlock(context.Background()) @@ -323,8 +539,9 @@ func TestMempoolService(t *testing.T) { assert.NotNil(t, blk) assert.NoError(t, blk.Accept(context.Background())) - reply2 := new(ctypes.ResultCheckTx) - assert.NoError(t, service.CheckTx(nil, &CheckTxArgs{Tx: txArg.Tx}, reply2)) + reply2, err := service.CheckTx(&rpctypes.Context{}, tx) + assert.NoError(t, err) // ToDo: check reply2 + t.Logf("%v\n", reply2) }) } diff --git a/vm/service_utils.go b/vm/service_utils.go index 502c9e681..63caaa808 100644 --- a/vm/service_utils.go +++ b/vm/service_utils.go @@ -2,10 +2,10 @@ package vm import ( "fmt" + rpctypes "github.com/consideritdone/landslidecore/rpc/jsonrpc/types" tmmath "github.com/consideritdone/landslidecore/libs/math" "github.com/consideritdone/landslidecore/rpc/client" - coretypes "github.com/consideritdone/landslidecore/rpc/core/types" "github.com/consideritdone/landslidecore/store" ) @@ -117,8 +117,8 @@ func WaitForHeight(c Service, h int64, waiter client.Waiter) error { } delta := int64(1) for delta > 0 { - r := new(coretypes.ResultStatus) - if err := c.Status(nil, nil, r); err != nil { + r, err := c.Status(&rpctypes.Context{}) + if err != nil { return err } delta = h - r.SyncInfo.LatestBlockHeight diff --git a/vm/vm.go b/vm/vm.go index a07226755..38aeccced 100644 --- a/vm/vm.go +++ b/vm/vm.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "errors" "fmt" + "github.com/consideritdone/landslidecore/crypto/secp256k1" "net/http" "time" @@ -18,16 +19,14 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/gorilla/rpc/v2" "github.com/prometheus/client_golang/prometheus" - dbm "github.com/tendermint/tm-db" abciTypes "github.com/consideritdone/landslidecore/abci/types" "github.com/consideritdone/landslidecore/config" + cfg "github.com/consideritdone/landslidecore/config" cs "github.com/consideritdone/landslidecore/consensus" tmjson "github.com/consideritdone/landslidecore/libs/json" "github.com/consideritdone/landslidecore/libs/log" @@ -35,7 +34,6 @@ import ( "github.com/consideritdone/landslidecore/node" tmproto "github.com/consideritdone/landslidecore/proto/tendermint/types" "github.com/consideritdone/landslidecore/proxy" - rpccore "github.com/consideritdone/landslidecore/rpc/core" rpcserver "github.com/consideritdone/landslidecore/rpc/jsonrpc/server" sm "github.com/consideritdone/landslidecore/state" "github.com/consideritdone/landslidecore/state/indexer" @@ -78,6 +76,7 @@ var ( blockIndexerDBPrefix = []byte("block_events") proposerAddress = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + proposerPubKey = secp256k1.PubKey{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} ) var ( @@ -97,10 +96,10 @@ type VM struct { tmLogger log.Logger - blockStoreDB dbm.DB + blockStoreDB database.Database blockStore *store.BlockStore - stateDB dbm.DB + stateDB database.Database stateStore sm.Store tmState *sm.State @@ -127,11 +126,13 @@ type VM struct { multiGatherer metrics.MultiGatherer txIndexer txindex.TxIndexer - txIndexerDB dbm.DB + txIndexerDB database.Database blockIndexer indexer.BlockIndexer - blockIndexerDB dbm.DB + blockIndexerDB database.Database indexerService *txindex.IndexerService + rpcConfig *cfg.RPCConfig + clock mockable.Clock } @@ -158,10 +159,10 @@ func (vm *VM) Initialize( baseDB := dbManager.Current().Database - vm.blockStoreDB = Database{prefixdb.NewNested(blockStoreDBPrefix, baseDB)} + vm.blockStoreDB = prefixdb.NewNested(blockStoreDBPrefix, baseDB) vm.blockStore = store.NewBlockStore(vm.blockStoreDB) - vm.stateDB = Database{prefixdb.NewNested(stateDBPrefix, baseDB)} + vm.stateDB = prefixdb.NewNested(stateDBPrefix, baseDB) vm.stateStore = sm.NewStore(vm.stateDB) if err := vm.initGenesis(genesisBytes); err != nil { @@ -201,9 +202,11 @@ func (vm *VM) Initialize( } vm.eventBus = eventBus - vm.txIndexerDB = Database{prefixdb.NewNested(txIndexerDBPrefix, baseDB)} + vm.rpcConfig = config.DefaultRPCConfig() + + vm.txIndexerDB = prefixdb.NewNested(txIndexerDBPrefix, baseDB) vm.txIndexer = txidxkv.NewTxIndex(vm.txIndexerDB) - vm.blockIndexerDB = Database{prefixdb.NewNested(blockIndexerDBPrefix, baseDB)} + vm.blockIndexerDB = prefixdb.NewNested(blockIndexerDBPrefix, baseDB) vm.blockIndexer = blockidxkv.New(vm.blockIndexerDB) vm.indexerService = txindex.NewIndexerService(vm.txIndexer, vm.blockIndexer, eventBus) vm.indexerService.SetLogger(vm.tmLogger.With("module", "txindex")) @@ -620,22 +623,59 @@ func (vm *VM) CreateStaticHandlers(ctx context.Context) (map[string]*common.HTTP return nil, nil } +// Routes is a map of available routes. +func (vm *VM) RPCRoutes() map[string]*rpcserver.RPCFunc { + vmTMService := NewService(vm) + return map[string]*rpcserver.RPCFunc{ + // subscribe/unsubscribe are reserved for websocket events. + "subscribe": rpcserver.NewWSRPCFunc(vmTMService.Subscribe, "query"), + "unsubscribe": rpcserver.NewWSRPCFunc(vmTMService.Unsubscribe, "query"), + "unsubscribe_all": rpcserver.NewWSRPCFunc(vmTMService.UnsubscribeAll, ""), + + // info API + "health": rpcserver.NewRPCFunc(vmTMService.Health, ""), + "status": rpcserver.NewRPCFunc(vmTMService.Status, ""), + "net_info": rpcserver.NewRPCFunc(vmTMService.NetInfo, ""), + "blockchain": rpcserver.NewRPCFunc(vmTMService.BlockchainInfo, "minHeight,maxHeight"), + "genesis": rpcserver.NewRPCFunc(vmTMService.Genesis, ""), + "genesis_chunked": rpcserver.NewRPCFunc(vmTMService.GenesisChunked, "chunk"), + "block": rpcserver.NewRPCFunc(vmTMService.Block, "height"), + "block_by_hash": rpcserver.NewRPCFunc(vmTMService.BlockByHash, "hash"), + "block_results": rpcserver.NewRPCFunc(vmTMService.BlockResults, "height"), + "commit": rpcserver.NewRPCFunc(vmTMService.Commit, "height"), + "check_tx": rpcserver.NewRPCFunc(vmTMService.CheckTx, "tx"), + "tx": rpcserver.NewRPCFunc(vmTMService.Tx, "hash,prove"), + "tx_search": rpcserver.NewRPCFunc(vmTMService.TxSearch, "query,prove,page,per_page,order_by"), + "block_search": rpcserver.NewRPCFunc(vmTMService.BlockSearch, "query,page,per_page,order_by"), + "validators": rpcserver.NewRPCFunc(vmTMService.Validators, "height,page,per_page"), + "dump_consensus_state": rpcserver.NewRPCFunc(vmTMService.DumpConsensusState, ""), + "consensus_state": rpcserver.NewRPCFunc(vmTMService.ConsensusState, ""), + "consensus_params": rpcserver.NewRPCFunc(vmTMService.ConsensusParams, "height"), + "unconfirmed_txs": rpcserver.NewRPCFunc(vmTMService.UnconfirmedTxs, "limit"), + "num_unconfirmed_txs": rpcserver.NewRPCFunc(vmTMService.NumUnconfirmedTxs, ""), + + // tx broadcast API + "broadcast_tx_commit": rpcserver.NewRPCFunc(vmTMService.BroadcastTxCommit, "tx"), + "broadcast_tx_sync": rpcserver.NewRPCFunc(vmTMService.BroadcastTxSync, "tx"), + "broadcast_tx_async": rpcserver.NewRPCFunc(vmTMService.BroadcastTxAsync, "tx"), + + // abci API + "abci_query": rpcserver.NewRPCFunc(vmTMService.ABCIQuery, "path,data,height,prove"), + "abci_info": rpcserver.NewRPCFunc(vmTMService.ABCIInfo, ""), + } +} + func (vm *VM) CreateHandlers(_ context.Context) (map[string]*common.HTTPHandler, error) { mux := http.NewServeMux() - rpcLogger := vm.tmLogger.With("module", "rpc-server") - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) - server := rpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(NewService(vm), Name); err != nil { - return nil, err - } + // 1) Register regular routes. + routes := vm.RPCRoutes() + mux.HandleFunc("/", rpcserver.MakeJSONRPCHandler(routes, vm.tmLogger)) return map[string]*common.HTTPHandler{ "/rpc": { LockOptions: common.WriteLock, - Handler: server, + Handler: mux, }, }, nil } diff --git a/vm/vm_test.go b/vm/vm_test.go index 02801de19..128bfb9e6 100644 --- a/vm/vm_test.go +++ b/vm/vm_test.go @@ -4,6 +4,7 @@ import ( "context" _ "embed" "fmt" + rpctypes "github.com/consideritdone/landslidecore/rpc/jsonrpc/types" "os" "testing" @@ -22,7 +23,6 @@ import ( "github.com/consideritdone/landslidecore/abci/example/counter" atypes "github.com/consideritdone/landslidecore/abci/types" tmrand "github.com/consideritdone/landslidecore/libs/rand" - ctypes "github.com/consideritdone/landslidecore/rpc/core/types" ) var ( @@ -100,11 +100,8 @@ func TestInitVm(t *testing.T) { assert.Nil(t, blk0) // submit first tx (0x00) - args := &BroadcastTxArgs{ - Tx: []byte{0x00}, - } - reply := &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, args, reply) + tx := []byte{0x00} + reply, err := service.BroadcastTxSync(&rpctypes.Context{}, tx) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, reply.Code) @@ -129,20 +126,14 @@ func TestInitVm(t *testing.T) { t.Logf("TM Block Tx count: %d", len(tmBlk1.Data.Txs)) // submit second tx (0x01) - args = &BroadcastTxArgs{ - Tx: []byte{0x01}, - } - reply = &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, args, reply) + tx = []byte{0x01} + reply, err = service.BroadcastTxSync(&rpctypes.Context{}, tx) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, reply.Code) // submit 3rd tx (0x02) - args = &BroadcastTxArgs{ - Tx: []byte{0x02}, - } - reply = &ctypes.ResultBroadcastTx{} - err = service.BroadcastTxSync(nil, args, reply) + tx = []byte{0x02} + reply, err = service.BroadcastTxSync(&rpctypes.Context{}, tx) assert.NoError(t, err) assert.Equal(t, atypes.CodeTypeOK, reply.Code)