Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.2
github.com/google/orderedcode v0.0.1
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.5.0
github.com/gtank/merlin v0.1.1
github.com/lib/pq v1.10.4
Expand Down Expand Up @@ -67,6 +66,7 @@ require (
github.com/golang/mock v1.6.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/gorilla/rpc v1.2.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect
github.com/gtank/ristretto255 v0.1.2 // indirect
Expand Down
17 changes: 9 additions & 8 deletions rpc/jsonrpc/server/http_json_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
// HTTP + JSON handler

// jsonrpc calls grab the given method's function info and runs reflect.Call
func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc {
func MakeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
Expand Down Expand Up @@ -67,13 +67,14 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
)
continue
}
if len(r.URL.Path) > 1 {
responses = append(
responses,
types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)),
)
continue
}
//TODO
//if len(r.URL.Path) > 1 {
// responses = append(
// responses,
// types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)),
// )
// continue
//}
rpcFunc, ok := funcMap[request.Method]
if !ok || rpcFunc.ws {
responses = append(responses, types.RPCMethodNotFoundError(request.ID))
Expand Down
2 changes: 1 addition & 1 deletion rpc/jsonrpc/server/rpc_func.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger lo
}

// JSONRPC endpoints
mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger)))
mux.HandleFunc("/", handleInvalidJSONRPCPaths(MakeJSONRPCHandler(funcMap, logger)))
}

// Function introspection
Expand Down
209 changes: 209 additions & 0 deletions vm/db_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
package vm

import (
"bytes"
"fmt"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/database/prefixdb"
dbm "github.com/tendermint/tm-db"
"math/rand"
"sync"
"testing"
)

var (
testDBPrefix = []byte("test")
)

func TestMemDB(t *testing.T) {
vm, _, _ := mustNewKVTestVm(t)
baseDB := vm.dbManager.Current().Database
db := Database{prefixdb.NewNested(testDBPrefix, baseDB)}
t.Run("PrefixDB", func(t *testing.T) { Run(t, db) })
t.Run("BaseDB(MemDB)", func(t *testing.T) { RunAvaDatabase(t, baseDB) })
}

// Run generates concurrent reads and writes to db so the race detector can
// verify concurrent operations are properly synchronized.
// The contents of db are garbage after Run returns.
func Run(t *testing.T, db dbm.DB) {
t.Helper()

const numWorkers = 10
const numKeys = 64

var wg sync.WaitGroup
for i := 0; i < numWorkers; i++ {
wg.Add(1)
i := i
go func() {
defer wg.Done()

// Insert a bunch of keys with random data.
for k := 1; k <= numKeys; k++ {
key := taskKey(i, k) // say, "task-<i>-key-<k>"
value := randomValue()
if err := db.Set(key, value); err != nil {
t.Errorf("Task %d: db.Set(%q=%q) failed: %v",
i, string(key), string(value), err)
}
}

// Iterate over the database to make sure our keys are there.
it, err := db.Iterator(nil, nil)
if err != nil {
t.Errorf("Iterator[%d]: %v", i, err)
return
}
found := make(map[string][]byte)
mine := []byte(fmt.Sprintf("task-%d-", i))
for {
if key := it.Key(); bytes.HasPrefix(key, mine) {
found[string(key)] = it.Value()
}
it.Next()
if !it.Valid() {
break
}
}
if err := it.Error(); err != nil {
t.Errorf("Iterator[%d] reported error: %v", i, err)
}
if err := it.Close(); err != nil {
t.Errorf("Close iterator[%d]: %v", i, err)
}
if len(found) != numKeys {
t.Errorf("Task %d: found %d keys, wanted %d", i, len(found), numKeys)
}

for key, value := range mine {
fmt.Println("--")
fmt.Println(key)
fmt.Println(value)
fmt.Println("--")
}

// Delete all the keys we inserted.
for k := 1; k <= numKeys; k++ {
key := taskKey(i, k) // say, "task-<i>-key-<k>"
if err := db.Delete(key); err != nil {
t.Errorf("Delete %q: %v", key, err)
}
}
// Iterate over the database to make sure our keys are there.
it, err = db.Iterator(nil, nil)
if err != nil {
t.Errorf("Iterator[%d]: %v", i, err)
return
}
foundAfterRemoval := make(map[string][]byte)
for {
if key := it.Key(); bytes.HasPrefix(key, mine) {
foundAfterRemoval[string(key)] = it.Value()
}
it.Next()
if !it.Valid() {
break
}
}
if len(foundAfterRemoval) != 0 {
t.Errorf("Values left after deletion: %v", foundAfterRemoval)
return
}
}()
}
wg.Wait()
}

// Run generates concurrent reads and writes to db so the race detector can
// verify concurrent operations are properly synchronized.
// The contents of db are garbage after Run returns.
func RunAvaDatabase(t *testing.T, db database.Database) {
t.Helper()

const numWorkers = 10
const numKeys = 64

var wg sync.WaitGroup
for i := 0; i < numWorkers; i++ {
wg.Add(1)
i := i
go func() {
defer wg.Done()

// Insert a bunch of keys with random data.
for k := 1; k <= numKeys; k++ {
key := taskKey(i, k) // say, "task-<i>-key-<k>"
value := randomValue()
if err := db.Put(key, value); err != nil {
t.Errorf("Task %d: db.Set(%q=%q) failed: %v",
i, string(key), string(value), err)
}
}

// Iterate over the database to make sure our keys are there.
it := db.NewIterator()
found := make(map[string][]byte)
mine := []byte(fmt.Sprintf("task-%d-", i))
for {
if key := it.Key(); bytes.HasPrefix(key, mine) {
found[string(key)] = it.Value()
}
it.Next()
if !it.Next() {
break
}
}
if err := it.Error(); err != nil {
t.Errorf("Iterator[%d] reported error: %v", i, err)
}
it.Release()

if len(found) != numKeys {
t.Errorf("Task %d: found %d keys, wanted %d", i, len(found), numKeys)
}

for key, value := range mine {
fmt.Println("--")
fmt.Println(key)
fmt.Println(value)
fmt.Println("--")
}

// Delete all the keys we inserted.
for k := 1; k <= numKeys; k++ {
key := taskKey(i, k) // say, "task-<i>-key-<k>"
if err := db.Delete(key); err != nil {
t.Errorf("Delete %q: %v", key, err)
}
}
// Iterate over the database to make sure our keys are there.
it = db.NewIterator()
foundAfterRemoval := make(map[string][]byte)
for {
if key := it.Key(); bytes.HasPrefix(key, mine) {
foundAfterRemoval[string(key)] = it.Value()
}
it.Next()
if !it.Next() {
break
}
}
if len(foundAfterRemoval) != 0 {
t.Errorf("Values left after deletion: %v", foundAfterRemoval)
return
}
}()
}
wg.Wait()
}

func taskKey(i, k int) []byte {
return []byte(fmt.Sprintf("task-%d-key-%d", i, k))
}

func randomValue() []byte {
value := []byte("value-")
dec := rand.Uint32()
return []byte(fmt.Sprintf("%s%d", value, dec))
}
Loading