diff --git a/CHANGELOG.md b/CHANGELOG.md index 7da4899977..bd5934b796 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * (rpc) [#1682](https://github.com/evmos/ethermint/pull/1682) Add config for maximum number of bytes returned from eth_call. * (ante) [#310](https://github.com/crypto-org-chain/ethermint/pull/310) Support blocking list of addresses in mempool. +* (statedb) [#333](https://github.com/crypto-org-chain/ethermint/pull/333) Support native action in statedb, prepare for precompiles. ### State Machine Breaking diff --git a/app/app.go b/app/app.go index 0605ac1cca..6d7bccb435 100644 --- a/app/app.go +++ b/app/app.go @@ -460,7 +460,7 @@ func NewEthermintApp( app.EvmKeeper = evmkeeper.NewKeeper( appCodec, keys[evmtypes.StoreKey], tkeys[evmtypes.TransientKey], authtypes.NewModuleAddress(govtypes.ModuleName), app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.FeeMarketKeeper, - nil, geth.NewEVM, tracer, evmSs, + nil, geth.NewEVM, tracer, evmSs, keys, ) // Create IBC Keeper diff --git a/default.nix b/default.nix index 9b6216094b..465aa8c0a1 100644 --- a/default.nix +++ b/default.nix @@ -17,7 +17,7 @@ in buildGoApplication rec { inherit pname version tags ldflags; src = lib.sourceByRegex ./. [ - "^(x|app|cmd|client|server|crypto|rpc|types|encoding|ethereum|indexer|testutil|version|go.mod|go.sum|gomod2nix.toml)($|/.*)" + "^(x|app|cmd|client|server|crypto|rpc|types|encoding|ethereum|indexer|testutil|version|store|go.mod|go.sum|gomod2nix.toml)($|/.*)" "^tests(/.*[.]go)?$" ]; modules = ./gomod2nix.toml; diff --git a/go.mod b/go.mod index 146edab7d1..9560880fe7 100644 --- a/go.mod +++ b/go.mod @@ -36,6 +36,7 @@ require ( github.com/spf13/viper v1.15.0 github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 github.com/stretchr/testify v1.8.4 + github.com/tidwall/btree v1.7.0 github.com/tidwall/gjson v1.14.4 github.com/tidwall/sjson v1.2.5 github.com/tyler-smith/go-bip39 v1.1.0 @@ -185,7 +186,6 @@ require ( github.com/subosito/gotenv v1.4.2 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect - github.com/tidwall/btree v1.6.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect diff --git a/go.sum b/go.sum index d1ba1c07bc..8bdb6b6010 100644 --- a/go.sum +++ b/go.sum @@ -1035,8 +1035,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= -github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= -github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= diff --git a/gomod2nix.toml b/gomod2nix.toml index 3b5b6060d1..10a52eab63 100644 --- a/gomod2nix.toml +++ b/gomod2nix.toml @@ -517,8 +517,8 @@ schema = 3 version = "v0.16.0" hash = "sha256-JW4zO/0vMzf1dXLePOqaMtiLUZgNbuIseh9GV+jQlf0=" [mod."github.com/tidwall/btree"] - version = "v1.6.0" - hash = "sha256-H4S46Yk3tVfOtrEhVWUrF4S1yWYmzU43W80HlzS9rcY=" + version = "v1.7.0" + hash = "sha256-bnr6c7a0nqo2HyGqxHk0kEZCEsjLYkPbAVY9WzaZ30o=" [mod."github.com/tidwall/gjson"] version = "v1.14.4" hash = "sha256-3DS2YNL95wG0qSajgRtIABD32J+oblaKVk8LIw+KSOc=" diff --git a/rpc/backend/utils.go b/rpc/backend/utils.go index 43610c3217..4e5521fb1c 100644 --- a/rpc/backend/utils.go +++ b/rpc/backend/utils.go @@ -146,17 +146,17 @@ func CalcBaseFee(config *params.ChainConfig, parent *ethtypes.Header, baseFeeCha baseFeeDelta := math.BigMax(num, common.Big1) return num.Add(parent.BaseFee, baseFeeDelta) - } else { - // Otherwise if the parent block used less gas than its target, the baseFee should decrease. - // max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) - num.SetUint64(parentGasTarget - parent.GasUsed) - num.Mul(num, parent.BaseFee) - num.Div(num, denom.SetUint64(parentGasTarget)) - num.Div(num, denom.SetUint64(uint64(baseFeeChangeDenominator))) - baseFee := num.Sub(parent.BaseFee, num) - - return math.BigMax(baseFee, common.Big0) } + + // Otherwise if the parent block used less gas than its target, the baseFee should decrease. + // max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) + num.SetUint64(parentGasTarget - parent.GasUsed) + num.Mul(num, parent.BaseFee) + num.Div(num, denom.SetUint64(parentGasTarget)) + num.Div(num, denom.SetUint64(uint64(baseFeeChangeDenominator))) + baseFee := num.Sub(parent.BaseFee, num) + + return math.BigMax(baseFee, common.Big0) } // output: targetOneFeeHistory diff --git a/store/cachekv/README.md b/store/cachekv/README.md new file mode 100644 index 0000000000..66f0916dea --- /dev/null +++ b/store/cachekv/README.md @@ -0,0 +1,140 @@ +# CacheKVStore specification + +A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction). + +The core goals the CacheKVStore seeks to solve are: + +* Buffer all writes to the parent store, so they can be dropped if they need to be reverted +* Allow iteration over contiguous spans of keys +* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O) + * Note: We actually fail to achieve this for iteration right now + * Note: Need to consider this getting too large and dropping some cached reads +* Make subsequent reads account for prior buffered writes +* Write all buffered changes to the parent store + +We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status. + +## Types and Structs + +```go +type Store struct { + mtx sync.Mutex + cache map[string]*cValue + deleted map[string]struct{} + unsortedCache map[string]struct{} + sortedCache *dbm.MemDB // always ascending sorted + parent types.KVStore +} +``` + +The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use. + +### `cache` + +The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as ‘dirty’ keys which map to a value that is potentially different than what is in the underlying `KVStore`. + +Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`. + +```go +type cValue struct { + value []byte + dirty bool +} +``` + +### `deleted` + +Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set. + +### `unsortedCache` + +Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set. + +### `sortedCache` + +A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order. + +## CRUD Operations and Writing + +The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it). + +`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted. + +### `Get` + +`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`. + +### `Has` + +`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read. + +### `Set` + +New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`. + +Calls `setCacheValue()` with `deleted=false` and `dirty=true`. + +### `Delete` + +A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`. + +Calls `setCacheValue()` with `deleted=true` and `dirty=true`. + +### `Write` + +Key-value pairs in the cache are written to `parent` in ascending order of their keys. + +A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`. + +If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache. + +## Iteration + +Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache. + +In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching). + +[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache. + +### Implementation Overview + +Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator. + +Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested. + +Generating the cache iterator can be decomposed into four parts: + +1. Finding all keys that exist in the range we are iterating over +2. Sorting this list of keys +3. Inserting these keys into `sortedCache` and removing them from `unsortedCache` +4. Returning an iterator over `sortedCache` with the desired range + +Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows. + +If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys. + +```go +n := len(store.unsortedCache) +unsorted := make([]*kv.Pair, 0) + +if n < minSortSize { + for key := range store.unsortedCache { + if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { + cacheValue := store.cache[key] + unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) + } + } + store.clearUnsortedCacheSubset(unsorted, stateUnsorted) + return +} +``` + +Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`. + +At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`). + +In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function. + +Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`. + +As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls. \ No newline at end of file diff --git a/store/cachekv/bench_helper_test.go b/store/cachekv/bench_helper_test.go new file mode 100644 index 0000000000..fe5be27fab --- /dev/null +++ b/store/cachekv/bench_helper_test.go @@ -0,0 +1,44 @@ +package cachekv_test + +import "crypto/rand" + +func randSlice(sliceSize int) []byte { + bz := make([]byte, sliceSize) + _, _ = rand.Read(bz) + return bz +} + +func incrementByteSlice(bz []byte) { + for index := len(bz) - 1; index >= 0; index-- { + if bz[index] < 255 { + bz[index]++ + break + } else { + bz[index] = 0 + } + } +} + +// Generate many keys starting at startKey, and are in sequential order +func generateSequentialKeys(startKey []byte, numKeys int) [][]byte { + toReturn := make([][]byte, 0, numKeys) + cur := make([]byte, len(startKey)) + copy(cur, startKey) + for i := 0; i < numKeys; i++ { + newKey := make([]byte, len(startKey)) + copy(newKey, cur) + toReturn = append(toReturn, newKey) + incrementByteSlice(cur) + } + return toReturn +} + +// Generate many random, unsorted keys +func generateRandomKeys(keySize int, numKeys int) [][]byte { + toReturn := make([][]byte, 0, numKeys) + for i := 0; i < numKeys; i++ { + newKey := randSlice(keySize) + toReturn = append(toReturn, newKey) + } + return toReturn +} diff --git a/store/cachekv/benchmark_test.go b/store/cachekv/benchmark_test.go new file mode 100644 index 0000000000..7ff66e581a --- /dev/null +++ b/store/cachekv/benchmark_test.go @@ -0,0 +1,133 @@ +package cachekv_test + +import ( + fmt "fmt" + "testing" + + dbm "github.com/cometbft/cometbft-db" + "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/evmos/ethermint/store/cachekv" + "github.com/stretchr/testify/require" +) + +func DoBenchmarkDeepCacheStack(b *testing.B, depth int) { + db := dbm.NewMemDB() + initialStore := cachekv.NewStore(dbadapter.Store{DB: db}) + + nItems := 20 + for i := 0; i < nItems; i++ { + initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0}) + } + + var stack CacheStack + stack.Reset(initialStore) + + for i := 0; i < depth; i++ { + stack.Snapshot() + + store := stack.CurrentStore() + store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)}) + } + + store := stack.CurrentStore() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + it := store.Iterator(nil, nil) + items := make([][]byte, 0, nItems) + for ; it.Valid(); it.Next() { + items = append(items, it.Key()) + it.Value() + } + it.Close() + require.Equal(b, nItems, len(items)) + } +} + +func BenchmarkDeepCacheStack1(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 1) +} + +func BenchmarkDeepCacheStack3(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 3) +} + +func BenchmarkDeepCacheStack10(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 10) +} + +func BenchmarkDeepCacheStack13(b *testing.B) { + DoBenchmarkDeepCacheStack(b, 13) +} + +// CacheStack manages a stack of nested cache store to +// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods. +type CacheStack struct { + initialStore *cachekv.Store + // Context of the initial state before transaction execution. + // It's the context used by `StateDB.CommitedState`. + cacheStores []*cachekv.Store +} + +// CurrentContext returns the top context of cached stack, +// if the stack is empty, returns the initial context. +func (cs *CacheStack) CurrentStore() *cachekv.Store { + l := len(cs.cacheStores) + if l == 0 { + return cs.initialStore + } + return cs.cacheStores[l-1] +} + +// Reset sets the initial context and clear the cache context stack. +func (cs *CacheStack) Reset(initialStore *cachekv.Store) { + cs.initialStore = initialStore + cs.cacheStores = nil +} + +// IsEmpty returns true if the cache context stack is empty. +func (cs *CacheStack) IsEmpty() bool { + return len(cs.cacheStores) == 0 +} + +// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts. +func (cs *CacheStack) Commit() { + // commit in order from top to bottom + for i := len(cs.cacheStores) - 1; i >= 0; i-- { + cs.cacheStores[i].Write() + } + cs.cacheStores = nil +} + +// CommitToRevision commit the cache after the target revision, +// to improve efficiency of db operations. +func (cs *CacheStack) CommitToRevision(target int) error { + if target < 0 || target >= len(cs.cacheStores) { + return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)) + } + + // commit in order from top to bottom + for i := len(cs.cacheStores) - 1; i > target; i-- { + cs.cacheStores[i].Write() + } + cs.cacheStores = cs.cacheStores[0 : target+1] + + return nil +} + +// Snapshot pushes a new cached context to the stack, +// and returns the index of it. +func (cs *CacheStack) Snapshot() int { + cs.cacheStores = append(cs.cacheStores, cs.CurrentStore().Clone()) + return len(cs.cacheStores) - 1 +} + +// RevertToSnapshot pops all the cached contexts after the target index (inclusive). +// the target should be snapshot index returned by `Snapshot`. +// This function panics if the index is out of bounds. +func (cs *CacheStack) RevertToSnapshot(target int) { + if target < 0 || target >= len(cs.cacheStores) { + panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))) + } + cs.cacheStores = cs.cacheStores[:target] +} diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go new file mode 100644 index 0000000000..0de7a1376d --- /dev/null +++ b/store/cachekv/internal/btree.go @@ -0,0 +1,101 @@ +package internal + +import ( + "bytes" + "errors" + + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/tidwall/btree" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + // copied from memdb. + bTreeDegree = 32 +) + +var errKeyEmpty = errors.New("key cannot be empty") + +// BTree implements the sorted cache for cachekv store, +// we don't use MemDB here because cachekv is used extensively in sdk core path, +// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. +// +// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. +type BTree struct { + tree *btree.BTreeG[item] +} + +// NewBTree creates a wrapper around `btree.BTreeG`. +func NewBTree() BTree { + return BTree{ + tree: btree.NewBTreeGOptions(byKeys, btree.Options{ + Degree: bTreeDegree, + NoLocks: false, + }), + } +} + +func (bt BTree) Set(key, value []byte, dirty bool) { + bt.tree.Set(item{key: key, value: value, dirty: dirty}) +} + +func (bt BTree) Get(key []byte) ([]byte, bool) { + i, found := bt.tree.Get(newItem(key)) + if !found { + return nil, false + } + return i.value, true +} + +func (bt BTree) Delete(key []byte) { + bt.tree.Delete(newItem(key)) +} + +func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, true), nil +} + +func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, false), nil +} + +// ScanDirtyItems iterate over the dirty entries. +func (bt BTree) ScanDirtyItems(fn func(key, value []byte)) { + bt.tree.Scan(func(item item) bool { + if item.dirty { + fn(item.key, item.value) + } + return true + }) +} + +// Copy the tree. This is a copy-on-write operation and is very fast because +// it only performs a shadowed copy. +func (bt BTree) Copy() BTree { + return BTree{ + tree: bt.tree.Copy(), + } +} + +// item is a btree item with byte slices as keys and values +type item struct { + key []byte + value []byte + dirty bool +} + +// byKeys compares the items by key +func byKeys(a, b item) bool { + return bytes.Compare(a.key, b.key) == -1 +} + +// newItem creates a new pair item. +func newItem(key []byte) item { + return item{key: key} +} diff --git a/store/cachekv/internal/btree_test.go b/store/cachekv/internal/btree_test.go new file mode 100644 index 0000000000..bd19449ba4 --- /dev/null +++ b/store/cachekv/internal/btree_test.go @@ -0,0 +1,208 @@ +package internal + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" +) + +func TestGetSetDelete(t *testing.T) { + db := NewBTree() + + // A nonexistent key should return nil. + value, found := db.Get([]byte("a")) + require.Nil(t, value) + require.False(t, found) + + // Set and get a value. + db.Set([]byte("a"), []byte{0x01}, true) + db.Set([]byte("b"), []byte{0x02}, true) + value, found = db.Get([]byte("a")) + require.Equal(t, []byte{0x01}, value) + require.True(t, found) + + value, found = db.Get([]byte("b")) + require.Equal(t, []byte{0x02}, value) + require.True(t, found) + + // Deleting a non-existent value is fine. + db.Delete([]byte("x")) + + // Delete a value. + db.Delete([]byte("a")) + + value, found = db.Get([]byte("a")) + require.Nil(t, value) + require.False(t, found) + + db.Delete([]byte("b")) + + value, found = db.Get([]byte("b")) + require.Nil(t, value) + require.False(t, found) +} + +func TestDBIterator(t *testing.T) { + db := NewBTree() + + for i := 0; i < 10; i++ { + if i != 6 { // but skip 6. + db.Set(int642Bytes(int64(i)), []byte{}, true) + } + } + + // Blank iterator keys should error + _, err := db.ReverseIterator([]byte{}, nil) + require.Equal(t, errKeyEmpty, err) + _, err = db.ReverseIterator(nil, []byte{}) + require.Equal(t, errKeyEmpty, err) + + itr, err := db.Iterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") + + ritr, err := db.ReverseIterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") + + itr, err = db.Iterator(nil, int642Bytes(0)) + require.NoError(t, err) + verifyIterator(t, itr, []int64(nil), "forward iterator to 0") + + ritr, err = db.ReverseIterator(int642Bytes(10), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") + + itr, err = db.Iterator(int642Bytes(0), nil) + require.NoError(t, err) + verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") + + itr, err = db.Iterator(int642Bytes(1), nil) + require.NoError(t, err) + verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") + + ritr, err = db.ReverseIterator(nil, int642Bytes(10)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") + + ritr, err = db.ReverseIterator(nil, int642Bytes(9)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") + + ritr, err = db.ReverseIterator(nil, int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") + + itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") + + itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") + + itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") + + itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") + + itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") + + itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) + require.NoError(t, err) + verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{5, 4}, "reverse iterator from 6 (ex) to 4") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{5, 4}, "reverse iterator from 7 (ex) to 4") + + ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") + + ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") + + ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64(nil), "reverse iterator from 7 (ex) to 6") + + ritr, err = db.ReverseIterator(int642Bytes(10), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") + + ritr, err = db.ReverseIterator(int642Bytes(6), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") + + ritr, err = db.ReverseIterator(int642Bytes(5), nil) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") + + ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) + require.NoError(t, err) + verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") + + ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64{3, 2}, "reverse iterator from 4 (ex) to 2") + + ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) + require.NoError(t, err) + verifyIterator(t, ritr, + []int64(nil), "reverse iterator from 2 (ex) to 4") + + // Ensure that the iterators don't panic with an empty database. + db2 := NewBTree() + + itr, err = db2.Iterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, itr, nil, "forward iterator with empty db") + + ritr, err = db2.ReverseIterator(nil, nil) + require.NoError(t, err) + verifyIterator(t, ritr, nil, "reverse iterator with empty db") +} + +func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) { + i := 0 + for itr.Valid() { + key := itr.Key() + require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i) + itr.Next() + i++ + } + require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter") + require.NoError(t, itr.Close()) +} + +func int642Bytes(i int64) []byte { + return sdk.Uint64ToBigEndian(uint64(i)) +} + +func bytes2Int64(buf []byte) int64 { + return int64(sdk.BigEndianToUint64(buf)) +} diff --git a/store/cachekv/internal/memiterator.go b/store/cachekv/internal/memiterator.go new file mode 100644 index 0000000000..76f5b158f0 --- /dev/null +++ b/store/cachekv/internal/memiterator.go @@ -0,0 +1,119 @@ +package internal + +import ( + "bytes" + "errors" + + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/tidwall/btree" +) + +var _ types.Iterator = (*memIterator)(nil) + +// memIterator iterates over iterKVCache items. +// if value is nil, means it was deleted. +// Implements Iterator. +type memIterator struct { + iter btree.IterG[item] + + start []byte + end []byte + ascending bool + valid bool +} + +func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { + iter := items.tree.Iter() + var valid bool + if ascending { + if start != nil { + valid = iter.Seek(newItem(start)) + } else { + valid = iter.First() + } + } else { + if end != nil { + valid = iter.Seek(newItem(end)) + if !valid { + valid = iter.Last() + } else { + // end is exclusive + valid = iter.Prev() + } + } else { + valid = iter.Last() + } + } + + mi := &memIterator{ + iter: iter, + start: start, + end: end, + ascending: ascending, + valid: valid, + } + + if mi.valid { + mi.valid = mi.keyInRange(mi.Key()) + } + + return mi +} + +func (mi *memIterator) Domain() (start []byte, end []byte) { + return mi.start, mi.end +} + +func (mi *memIterator) Close() error { + mi.iter.Release() + return nil +} + +func (mi *memIterator) Error() error { + if !mi.Valid() { + return errors.New("invalid memIterator") + } + return nil +} + +func (mi *memIterator) Valid() bool { + return mi.valid +} + +func (mi *memIterator) Next() { + mi.assertValid() + + if mi.ascending { + mi.valid = mi.iter.Next() + } else { + mi.valid = mi.iter.Prev() + } + + if mi.valid { + mi.valid = mi.keyInRange(mi.Key()) + } +} + +func (mi *memIterator) keyInRange(key []byte) bool { + if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { + return false + } + if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 { + return false + } + return true +} + +func (mi *memIterator) Key() []byte { + return mi.iter.Item().key +} + +func (mi *memIterator) Value() []byte { + return mi.iter.Item().value +} + +func (mi *memIterator) assertValid() { + if err := mi.Error(); err != nil { + panic(err) + } +} diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go new file mode 100644 index 0000000000..293bc968e7 --- /dev/null +++ b/store/cachekv/internal/mergeiterator.go @@ -0,0 +1,235 @@ +package internal + +import ( + "bytes" + "errors" + + "github.com/cosmos/cosmos-sdk/store/types" +) + +// cacheMergeIterator merges a parent Iterator and a cache Iterator. +// The cache iterator may return nil keys to signal that an item +// had been deleted (but not deleted in the parent). +// If the cache iterator has the same key as the parent, the +// cache shadows (overrides) the parent. +// +// TODO: Optimize by memoizing. +type cacheMergeIterator struct { + parent types.Iterator + cache types.Iterator + ascending bool + + valid bool +} + +var _ types.Iterator = (*cacheMergeIterator)(nil) + +func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) *cacheMergeIterator { //nolint:revive + iter := &cacheMergeIterator{ + parent: parent, + cache: cache, + ascending: ascending, + } + + iter.valid = iter.skipUntilExistsOrInvalid() + return iter +} + +// Domain implements Iterator. +// Returns parent domain because cache and parent domains are the same. +func (iter *cacheMergeIterator) Domain() (start, end []byte) { + return iter.parent.Domain() +} + +// Valid implements Iterator. +func (iter *cacheMergeIterator) Valid() bool { + return iter.valid +} + +// Next implements Iterator +func (iter *cacheMergeIterator) Next() { + iter.assertValid() + + switch { + case !iter.parent.Valid(): + // If parent is invalid, get the next cache item. + iter.cache.Next() + case !iter.cache.Valid(): + // If cache is invalid, get the next parent item. + iter.parent.Next() + default: + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + switch iter.compare(keyP, keyC) { + case -1: // parent < cache + iter.parent.Next() + case 0: // parent == cache + iter.parent.Next() + iter.cache.Next() + case 1: // parent > cache + iter.cache.Next() + } + } + iter.valid = iter.skipUntilExistsOrInvalid() +} + +// Key implements Iterator +func (iter *cacheMergeIterator) Key() []byte { + iter.assertValid() + + // If parent is invalid, get the cache key. + if !iter.parent.Valid() { + return iter.cache.Key() + } + + // If cache is invalid, get the parent key. + if !iter.cache.Valid() { + return iter.parent.Key() + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + + cmp := iter.compare(keyP, keyC) + switch cmp { + case -1: // parent < cache + return keyP + case 0: // parent == cache + return keyP + case 1: // parent > cache + return keyC + default: + panic("invalid compare result") + } +} + +// Value implements Iterator +func (iter *cacheMergeIterator) Value() []byte { + iter.assertValid() + + // If parent is invalid, get the cache value. + if !iter.parent.Valid() { + return iter.cache.Value() + } + + // If cache is invalid, get the parent value. + if !iter.cache.Valid() { + return iter.parent.Value() + } + + // Both are valid. Compare keys. + keyP, keyC := iter.parent.Key(), iter.cache.Key() + + cmp := iter.compare(keyP, keyC) + switch cmp { + case -1: // parent < cache + return iter.parent.Value() + case 0: // parent == cache + return iter.cache.Value() + case 1: // parent > cache + return iter.cache.Value() + default: + panic("invalid comparison result") + } +} + +// Close implements Iterator +func (iter *cacheMergeIterator) Close() error { + err1 := iter.cache.Close() + if err := iter.parent.Close(); err != nil { + return err + } + + return err1 +} + +// Error returns an error if the cacheMergeIterator is invalid defined by the +// Valid method. +func (iter *cacheMergeIterator) Error() error { + if !iter.Valid() { + return errors.New("invalid cacheMergeIterator") + } + + return nil +} + +// If not valid, panics. +// NOTE: May have side-effect of iterating over cache. +func (iter *cacheMergeIterator) assertValid() { + if err := iter.Error(); err != nil { + panic(err) + } +} + +// Like bytes.Compare but opposite if not ascending. +func (iter *cacheMergeIterator) compare(a, b []byte) int { + if iter.ascending { + return bytes.Compare(a, b) + } + + return bytes.Compare(a, b) * -1 +} + +// Skip all delete-items from the cache w/ `key < until`. After this function, +// current cache item is a non-delete-item, or `until <= key`. +// If the current cache item is not a delete item, does nothing. +// If `until` is nil, there is no limit, and cache may end up invalid. +// CONTRACT: cache is valid. +func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { + for iter.cache.Valid() && + iter.cache.Value() == nil && + (until == nil || iter.compare(iter.cache.Key(), until) < 0) { + iter.cache.Next() + } +} + +// Fast forwards cache (or parent+cache in case of deleted items) until current +// item exists, or until iterator becomes invalid. +// Returns whether the iterator is valid. +func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { + for { + // If parent is invalid, fast-forward cache. + if !iter.parent.Valid() { + iter.skipCacheDeletes(nil) + return iter.cache.Valid() + } + // Parent is valid. + + if !iter.cache.Valid() { + return true + } + // Parent is valid, cache is valid. + + // Compare parent and cache. + keyP := iter.parent.Key() + keyC := iter.cache.Key() + + switch iter.compare(keyP, keyC) { + case -1: // parent < cache. + return true + + case 0: // parent == cache. + // Skip over if cache item is a delete. + valueC := iter.cache.Value() + if valueC == nil { + iter.parent.Next() + iter.cache.Next() + + continue + } + // Cache is not a delete. + + return true // cache exists. + case 1: // cache < parent + // Skip over if cache item is a delete. + valueC := iter.cache.Value() + if valueC == nil { + iter.skipCacheDeletes(keyP) + continue + } + // Cache is not a delete. + + return true // cache exists. + } + } +} diff --git a/store/cachekv/store.go b/store/cachekv/store.go new file mode 100644 index 0000000000..20c6bc0b62 --- /dev/null +++ b/store/cachekv/store.go @@ -0,0 +1,179 @@ +package cachekv + +import ( + "io" + "sync" + + "github.com/cosmos/cosmos-sdk/store/tracekv" + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/evmos/ethermint/store/cachekv/internal" +) + +// Store wraps an in-memory cache around an underlying types.KVStore. +type Store struct { + mtx sync.Mutex + cache internal.BTree // always ascending sorted + parent types.KVStore +} + +var _ types.CacheKVStore = (*Store)(nil) + +// NewStore creates a new Store object +func NewStore(parent types.KVStore) *Store { + return &Store{ + cache: internal.NewBTree(), + parent: parent, + } +} + +// GetStoreType implements Store. +func (store *Store) GetStoreType() types.StoreType { + return store.parent.GetStoreType() +} + +// Clone creates a snapshot of the cache store. +// This is a copy-on-write operation and is very fast because +// it only performs a shadowed copy. +func (store *Store) Clone() *Store { + store.mtx.Lock() + defer store.mtx.Unlock() + + return &Store{ + cache: store.cache.Copy(), + parent: store.parent, + } +} + +// swapCache swap out the internal cache store and leave the current store in a unusable state. +func (store *Store) swapCache() internal.BTree { + store.mtx.Lock() + defer store.mtx.Unlock() + + cache := store.cache + store.cache = internal.BTree{} + return cache +} + +// Restore restores the store cache to a given snapshot. +func (store *Store) Restore(s types.CacheKVStore) { + cache := s.(*Store).swapCache() + + store.mtx.Lock() + defer store.mtx.Unlock() + + store.cache = cache +} + +// Get implements types.KVStore. +func (store *Store) Get(key []byte) (value []byte) { + store.mtx.Lock() + defer store.mtx.Unlock() + + types.AssertValidKey(key) + + if value, found := store.cache.Get(key); found { + return value + } + value = store.parent.Get(key) + store.setCacheValue(key, value, false) + return value +} + +// Set implements types.KVStore. +func (store *Store) Set(key []byte, value []byte) { + store.mtx.Lock() + defer store.mtx.Unlock() + + types.AssertValidKey(key) + types.AssertValidValue(value) + + store.setCacheValue(key, value, true) +} + +// Has implements types.KVStore. +func (store *Store) Has(key []byte) bool { + value := store.Get(key) + return value != nil +} + +// Delete implements types.KVStore. +func (store *Store) Delete(key []byte) { + store.mtx.Lock() + defer store.mtx.Unlock() + + types.AssertValidKey(key) + store.setCacheValue(key, nil, true) +} + +// Implements Cachetypes.KVStore. +func (store *Store) Write() { + store.mtx.Lock() + defer store.mtx.Unlock() + + store.cache.ScanDirtyItems(func(key, value []byte) { + if value == nil { + store.parent.Delete(key) + } else { + store.parent.Set(key, value) + } + }) + + store.cache = internal.NewBTree() +} + +// CacheWrap implements CacheWrapper. +func (store *Store) CacheWrap() types.CacheWrap { + return NewStore(store) +} + +// CacheWrapWithTrace implements the CacheWrapper interface. +func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { + return NewStore(tracekv.NewStore(store, w, tc)) +} + +//---------------------------------------- +// Iteration + +// Iterator implements types.KVStore. +func (store *Store) Iterator(start, end []byte) types.Iterator { + return store.iterator(start, end, true) +} + +// ReverseIterator implements types.KVStore. +func (store *Store) ReverseIterator(start, end []byte) types.Iterator { + return store.iterator(start, end, false) +} + +func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { + store.mtx.Lock() + defer store.mtx.Unlock() + + isoSortedCache := store.cache.Copy() + + var ( + err error + parent, cache types.Iterator + ) + + if ascending { + parent = store.parent.Iterator(start, end) + cache, err = isoSortedCache.Iterator(start, end) + } else { + parent = store.parent.ReverseIterator(start, end) + cache, err = isoSortedCache.ReverseIterator(start, end) + } + if err != nil { + panic(err) + } + + return internal.NewCacheMergeIterator(parent, cache, ascending) +} + +//---------------------------------------- +// etc + +// Only entrypoint to mutate store.cache. +// A `nil` value means a deletion. +func (store *Store) setCacheValue(key, value []byte, dirty bool) { + store.cache.Set(key, value, dirty) +} diff --git a/store/cachekv/store_bench_test.go b/store/cachekv/store_bench_test.go new file mode 100644 index 0000000000..abfc63d99c --- /dev/null +++ b/store/cachekv/store_bench_test.go @@ -0,0 +1,149 @@ +package cachekv_test + +import ( + "testing" + + dbm "github.com/cometbft/cometbft-db" + + "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/evmos/ethermint/store/cachekv" +) + +var sink interface{} + +const defaultValueSizeBz = 1 << 12 + +// This benchmark measures the time of iterator.Next() when the parent store is blank +func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + kvstore := cachekv.NewStore(mem) + // Use a singleton for value, to not waste time computing it + value := randSlice(defaultValueSizeBz) + // Use simple values for keys, pick a random start, + // and take next b.N keys sequentially after.] + startKey := randSlice(32) + + // Add 1 to avoid issues when b.N = 1 + keys := generateSequentialKeys(startKey, b.N+1) + for _, k := range keys { + kvstore.Set(k, value) + } + + b.ReportAllocs() + b.ResetTimer() + + iter := kvstore.Iterator(keys[0], keys[b.N]) + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + _ = iter.Key() + // deadcode elimination stub + sink = iter + } +} + +// Benchmark setting New keys to a store, where the new keys are in sequence. +func benchmarkBlankParentAppend(b *testing.B, keysize int) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + kvstore := cachekv.NewStore(mem) + + // Use a singleton for value, to not waste time computing it + value := randSlice(32) + // Use simple values for keys, pick a random start, + // and take next b.N keys sequentially after. + startKey := randSlice(32) + + keys := generateSequentialKeys(startKey, b.N) + + b.ReportAllocs() + b.ResetTimer() + + for _, k := range keys { + kvstore.Set(k, value) + } +} + +// Benchmark setting New keys to a store, where the new keys are random. +// the speed of this function does not depend on the values in the parent store +func benchmarkRandomSet(b *testing.B, keysize int) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + kvstore := cachekv.NewStore(mem) + + // Use a singleton for value, to not waste time computing it + value := randSlice(defaultValueSizeBz) + // Add 1 to avoid issues when b.N = 1 + keys := generateRandomKeys(keysize, b.N+1) + + b.ReportAllocs() + b.ResetTimer() + + for _, k := range keys { + kvstore.Set(k, value) + } + + iter := kvstore.Iterator(keys[0], keys[b.N]) + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + _ = iter.Key() + // deadcode elimination stub + sink = iter + } +} + +// Benchmark creating an iterator on a parent with D entries, +// that are all deleted in the cacheKV store. +// We essentially are benchmarking the cacheKV iterator creation & iteration times +// with the number of entries deleted in the parent. +func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + + // Use a singleton for value, to not waste time computing it + value := randSlice(32) + // Use simple values for keys, pick a random start, + // and take next D keys sequentially after. + startKey := randSlice(32) + // Add 1 to avoid issues when numDeletes = 1 + keys := generateSequentialKeys(startKey, numDeletes+1) + // setup parent db with D keys. + for _, k := range keys { + mem.Set(k, value) + } + kvstore := cachekv.NewStore(mem) + // Delete all keys from the cache KV store. + // The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design. + // Essentially the iterator will never be valid, in that it should never run. + // However, this is incompatible with the for loop structure the SDK uses, hence + // causes a panic. Thus we do keys[1:]. + for _, k := range keys[1:] { + kvstore.Delete(k) + } + + b.ReportAllocs() + b.ResetTimer() + + iter := kvstore.Iterator(keys[0], keys[numDeletes]) + defer iter.Close() + + for ; iter.Valid(); iter.Next() { + _ = iter.Key() + // deadcode elimination stub + sink = iter + } +} + +func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) { + benchmarkBlankParentIteratorNext(b, 32) +} + +func BenchmarkBlankParentAppendKeySize32(b *testing.B) { + benchmarkBlankParentAppend(b, 32) +} + +func BenchmarkSetKeySize32(b *testing.B) { + benchmarkRandomSet(b, 32) +} + +func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) { + benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000) +} diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go new file mode 100644 index 0000000000..78db8f7385 --- /dev/null +++ b/store/cachekv/store_test.go @@ -0,0 +1,707 @@ +package cachekv_test + +import ( + "fmt" + "testing" + + dbm "github.com/cometbft/cometbft-db" + tmrand "github.com/cometbft/cometbft/libs/rand" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/store/dbadapter" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/evmos/ethermint/store/cachekv" +) + +func newCacheKVStore() types.CacheKVStore { + // create two layer of cache store to better emulate the real world. + mem := dbadapter.Store{DB: dbm.NewMemDB()} + deliverState := cachekv.NewStore(mem) + return deliverState.Clone() +} + +func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } +func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } + +func TestCacheKVStore(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + st := cachekv.NewStore(mem) + + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + + // put something in mem and in cache + mem.Set(keyFmt(1), valFmt(1)) + st.Set(keyFmt(1), valFmt(1)) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + + // update it in cache, shoudn't change mem + st.Set(keyFmt(1), valFmt(2)) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + require.Equal(t, valFmt(1), mem.Get(keyFmt(1))) + + // write it. should change mem + st.Write() + require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + + // more writes and checks + st.Write() + st.Write() + require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + + // make a new one, check it + st = cachekv.NewStore(mem) + require.Equal(t, valFmt(2), st.Get(keyFmt(1))) + + // make a new one and delete - should not be removed from mem + st = cachekv.NewStore(mem) + st.Delete(keyFmt(1)) + require.Empty(t, st.Get(keyFmt(1))) + require.Equal(t, mem.Get(keyFmt(1)), valFmt(2)) + + // Write. should now be removed from both + st.Write() + require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") + require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty") +} + +func TestCacheKVStoreNoNilSet(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + st := cachekv.NewStore(mem) + require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic") + require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") + require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") +} + +func TestCacheKVStoreNested(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + st := cachekv.NewStore(mem) + + // set. check its there on st and not on mem. + st.Set(keyFmt(1), valFmt(1)) + require.Empty(t, mem.Get(keyFmt(1))) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + + // make a new from st and check + st2 := cachekv.NewStore(st) + require.Equal(t, valFmt(1), st2.Get(keyFmt(1))) + + // update the value on st2, check it only effects st2 + st2.Set(keyFmt(1), valFmt(3)) + require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(1), st.Get(keyFmt(1))) + require.Equal(t, valFmt(3), st2.Get(keyFmt(1))) + + // st2 writes to its parent, st. doesnt effect mem + st2.Write() + require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) + require.Equal(t, valFmt(3), st.Get(keyFmt(1))) + + // updates mem + st.Write() + require.Equal(t, valFmt(3), mem.Get(keyFmt(1))) +} + +func TestCacheKVIteratorBounds(t *testing.T) { + st := newCacheKVStore() + + // set some items + nItems := 5 + for i := 0; i < nItems; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + + // iterate over all of them + itr := st.Iterator(nil, nil) + i := 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, nItems, i) + require.NoError(t, itr.Close()) + + // iterate over none + itr = st.Iterator(bz("money"), nil) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + } + require.Equal(t, 0, i) + require.NoError(t, itr.Close()) + + // iterate over lower + itr = st.Iterator(keyFmt(0), keyFmt(3)) + i = 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, 3, i) + require.NoError(t, itr.Close()) + + // iterate over upper + itr = st.Iterator(keyFmt(2), keyFmt(4)) + i = 2 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, 4, i) + require.NoError(t, itr.Close()) +} + +func TestCacheKVReverseIteratorBounds(t *testing.T) { + st := newCacheKVStore() + + // set some items + nItems := 5 + for i := 0; i < nItems; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + + // iterate over all of them + itr := st.ReverseIterator(nil, nil) + i := 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(nItems-1-i), k) + require.Equal(t, valFmt(nItems-1-i), v) + i++ + } + require.Equal(t, nItems, i) + require.NoError(t, itr.Close()) + + // iterate over none + itr = st.ReverseIterator(bz("money"), nil) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + } + require.Equal(t, 0, i) + require.NoError(t, itr.Close()) + + // iterate over lower + end := 3 + itr = st.ReverseIterator(keyFmt(0), keyFmt(end)) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(end-i), k) + require.Equal(t, valFmt(end-i), v) + } + require.Equal(t, 3, i) + require.NoError(t, itr.Close()) + + // iterate over upper + end = 4 + itr = st.ReverseIterator(keyFmt(2), keyFmt(end)) + i = 0 + for ; itr.Valid(); itr.Next() { + i++ + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(end-i), k) + require.Equal(t, valFmt(end-i), v) + } + require.Equal(t, 2, i) + require.NoError(t, itr.Close()) +} + +func TestCacheKVMergeIteratorBasics(t *testing.T) { + st := newCacheKVStore() + + // set and delete an item in the cache, iterator should be empty + k, v := keyFmt(0), valFmt(0) + st.Set(k, v) + st.Delete(k) + assertIterateDomain(t, st, 0) + + // now set it and assert its there + st.Set(k, v) + assertIterateDomain(t, st, 1) + + // write it and assert its there + st.Write() + assertIterateDomain(t, st, 1) + + // remove it in cache and assert its not + st.Delete(k) + assertIterateDomain(t, st, 0) + + // write the delete and assert its not there + st.Write() + assertIterateDomain(t, st, 0) + + // add two keys and assert theyre there + k1, v1 := keyFmt(1), valFmt(1) + st.Set(k, v) + st.Set(k1, v1) + assertIterateDomain(t, st, 2) + + // write it and assert theyre there + st.Write() + assertIterateDomain(t, st, 2) + + // remove one in cache and assert its not + st.Delete(k1) + assertIterateDomain(t, st, 1) + + // write the delete and assert its not there + st.Write() + assertIterateDomain(t, st, 1) + + // delete the other key in cache and asserts its empty + st.Delete(k) + assertIterateDomain(t, st, 0) +} + +func TestCacheKVMergeIteratorDeleteLast(t *testing.T) { + st := newCacheKVStore() + + // set some items and write them + nItems := 5 + for i := 0; i < nItems; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + st.Write() + + // set some more items and leave dirty + for i := nItems; i < nItems*2; i++ { + st.Set(keyFmt(i), valFmt(i)) + } + + // iterate over all of them + assertIterateDomain(t, st, nItems*2) + + // delete them all + for i := 0; i < nItems*2; i++ { + last := nItems*2 - 1 - i + st.Delete(keyFmt(last)) + assertIterateDomain(t, st, last) + } +} + +func TestCacheKVMergeIteratorDeletes(t *testing.T) { + st := newCacheKVStore() + truth := dbm.NewMemDB() + + // set some items and write them + nItems := 10 + for i := 0; i < nItems; i++ { + doOp(t, st, truth, opSet, i) + } + st.Write() + + // delete every other item, starting from 0 + for i := 0; i < nItems; i += 2 { + doOp(t, st, truth, opDel, i) + assertIterateDomainCompare(t, st, truth) + } + + // reset + st = newCacheKVStore() + truth = dbm.NewMemDB() + + // set some items and write them + for i := 0; i < nItems; i++ { + doOp(t, st, truth, opSet, i) + } + st.Write() + + // delete every other item, starting from 1 + for i := 1; i < nItems; i += 2 { + doOp(t, st, truth, opDel, i) + assertIterateDomainCompare(t, st, truth) + } +} + +func TestCacheKVMergeIteratorChunks(t *testing.T) { + st := newCacheKVStore() + + // Use the truth to check values on the merge iterator + truth := dbm.NewMemDB() + + // sets to the parent + setRange(t, st, truth, 0, 20) + setRange(t, st, truth, 40, 60) + st.Write() + + // sets to the cache + setRange(t, st, truth, 20, 40) + setRange(t, st, truth, 60, 80) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}}) + + // remove some parents and some cache + deleteRange(t, st, truth, 15, 25) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}}) + + // remove some parents and some cache + deleteRange(t, st, truth, 35, 45) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}}) + + // write, add more to the cache, and delete some cache + st.Write() + setRange(t, st, truth, 38, 42) + deleteRange(t, st, truth, 40, 43) + assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}}) +} + +func TestCacheKVMergeIteratorDomain(t *testing.T) { + st := newCacheKVStore() + + itr := st.Iterator(nil, nil) + start, end := itr.Domain() + require.Equal(t, start, end) + require.NoError(t, itr.Close()) + + itr = st.Iterator(keyFmt(40), keyFmt(60)) + start, end = itr.Domain() + require.Equal(t, keyFmt(40), start) + require.Equal(t, keyFmt(60), end) + require.NoError(t, itr.Close()) + + start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain() + require.Equal(t, keyFmt(0), start) + require.Equal(t, keyFmt(80), end) +} + +func TestCacheKVMergeIteratorRandom(t *testing.T) { + st := newCacheKVStore() + truth := dbm.NewMemDB() + + start, end := 25, 975 + max := 1000 + setRange(t, st, truth, start, end) + + // do an op, test the iterator + for i := 0; i < 2000; i++ { + doRandomOp(t, st, truth, max) + assertIterateDomainCompare(t, st, truth) + } +} + +func TestNilEndIterator(t *testing.T) { + const SIZE = 3000 + + tests := []struct { + name string + write bool + startIndex int + end []byte + }{ + {name: "write=false, end=nil", write: false, end: nil, startIndex: 1000}, + {name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000}, + {name: "write=true, end=nil", write: true, end: nil, startIndex: 1000}, + {name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + st := newCacheKVStore() + + for i := 0; i < SIZE; i++ { + kstr := keyFmt(i) + st.Set(kstr, valFmt(i)) + } + + if tt.write { + st.Write() + } + + itr := st.Iterator(keyFmt(tt.startIndex), tt.end) + i := tt.startIndex + j := 0 + for itr.Valid() { + require.Equal(t, keyFmt(i), itr.Key()) + require.Equal(t, valFmt(i), itr.Value()) + itr.Next() + i++ + j++ + } + + require.Equal(t, SIZE-tt.startIndex, j) + require.NoError(t, itr.Close()) + }) + } +} + +// TestIteratorDeadlock demonstrate the deadlock issue in cache store. +func TestIteratorDeadlock(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + store := cachekv.NewStore(mem) + // the channel buffer is 64 and received once, so put at least 66 elements. + for i := 0; i < 66; i++ { + store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1}) + } + it := store.Iterator(nil, nil) + defer it.Close() + store.Set([]byte("key20"), []byte{1}) + // it'll be blocked here with previous version, or enable lock on btree. + it2 := store.Iterator(nil, nil) + defer it2.Close() +} + +//------------------------------------------------------------------------------------------- +// do some random ops + +const ( + opSet = 0 + opSetRange = 1 + opDel = 2 + opDelRange = 3 + opWrite = 4 + + totalOps = 5 // number of possible operations +) + +func randInt(n int) int { + return tmrand.NewRand().Int() % n +} + +// useful for replaying a error case if we find one +func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) { + switch op { + case opSet: + k := args[0] + st.Set(keyFmt(k), valFmt(k)) + err := truth.Set(keyFmt(k), valFmt(k)) + require.NoError(t, err) + case opSetRange: + start := args[0] + end := args[1] + setRange(t, st, truth, start, end) + case opDel: + k := args[0] + st.Delete(keyFmt(k)) + err := truth.Delete(keyFmt(k)) + require.NoError(t, err) + case opDelRange: + start := args[0] + end := args[1] + deleteRange(t, st, truth, start, end) + case opWrite: + st.Write() + } +} + +func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) { + r := randInt(totalOps) + switch r { + case opSet: + k := randInt(maxKey) + st.Set(keyFmt(k), valFmt(k)) + err := truth.Set(keyFmt(k), valFmt(k)) + require.NoError(t, err) + case opSetRange: + start := randInt(maxKey - 2) + end := randInt(maxKey-start) + start + setRange(t, st, truth, start, end) + case opDel: + k := randInt(maxKey) + st.Delete(keyFmt(k)) + err := truth.Delete(keyFmt(k)) + require.NoError(t, err) + case opDelRange: + start := randInt(maxKey - 2) + end := randInt(maxKey-start) + start + deleteRange(t, st, truth, start, end) + case opWrite: + st.Write() + } +} + +//------------------------------------------------------------------------------------------- + +// iterate over whole domain +func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) { + itr := st.Iterator(nil, nil) + i := 0 + for ; itr.Valid(); itr.Next() { + k, v := itr.Key(), itr.Value() + require.Equal(t, keyFmt(i), k) + require.Equal(t, valFmt(i), v) + i++ + } + require.Equal(t, expectedN, i) + require.NoError(t, itr.Close()) +} + +func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) { + // iterate over each and check they match the other + itr := st.Iterator(nil, nil) + itr2, err := mem.Iterator(nil, nil) // ground truth + require.NoError(t, err) + + krc := newKeyRangeCounter(r) + i := 0 + + for ; krc.valid(); krc.next() { + require.True(t, itr.Valid()) + require.True(t, itr2.Valid()) + + // check the key/val matches the ground truth + k, v := itr.Key(), itr.Value() + k2, v2 := itr2.Key(), itr2.Value() + require.Equal(t, k, k2) + require.Equal(t, v, v2) + + // check they match the counter + require.Equal(t, k, keyFmt(krc.key())) + + itr.Next() + itr2.Next() + i++ + } + + require.False(t, itr.Valid()) + require.False(t, itr2.Valid()) + require.NoError(t, itr.Close()) + require.NoError(t, itr2.Close()) +} + +func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) { + // iterate over each and check they match the other + itr := st.Iterator(nil, nil) + itr2, err := mem.Iterator(nil, nil) // ground truth + require.NoError(t, err) + checkIterators(t, itr, itr2) + checkIterators(t, itr2, itr) + require.NoError(t, itr.Close()) + require.NoError(t, itr2.Close()) +} + +func checkIterators(t *testing.T, itr, itr2 types.Iterator) { + for ; itr.Valid(); itr.Next() { + require.True(t, itr2.Valid()) + k, v := itr.Key(), itr.Value() + k2, v2 := itr2.Key(), itr2.Value() + require.Equal(t, k, k2) + require.Equal(t, v, v2) + itr2.Next() + } + require.False(t, itr.Valid()) + require.False(t, itr2.Valid()) +} + +//-------------------------------------------------------- + +func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { + for i := start; i < end; i++ { + st.Set(keyFmt(i), valFmt(i)) + err := mem.Set(keyFmt(i), valFmt(i)) + require.NoError(t, err) + } +} + +func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { + for i := start; i < end; i++ { + st.Delete(keyFmt(i)) + err := mem.Delete(keyFmt(i)) + require.NoError(t, err) + } +} + +//-------------------------------------------------------- + +type keyRange struct { + start int + end int +} + +func (kr keyRange) len() int { + return kr.end - kr.start +} + +func newKeyRangeCounter(kr []keyRange) *keyRangeCounter { + return &keyRangeCounter{keyRanges: kr} +} + +// we can iterate over this and make sure our real iterators have all the right keys +type keyRangeCounter struct { + rangeIdx int + idx int + keyRanges []keyRange +} + +func (krc *keyRangeCounter) valid() bool { + maxRangeIdx := len(krc.keyRanges) - 1 + maxRange := krc.keyRanges[maxRangeIdx] + + // if we're not in the max range, we're valid + if krc.rangeIdx <= maxRangeIdx && + krc.idx < maxRange.len() { + return true + } + + return false +} + +func (krc *keyRangeCounter) next() { + thisKeyRange := krc.keyRanges[krc.rangeIdx] + if krc.idx == thisKeyRange.len()-1 { + krc.rangeIdx++ + krc.idx = 0 + } else { + krc.idx++ + } +} + +func (krc *keyRangeCounter) key() int { + thisKeyRange := krc.keyRanges[krc.rangeIdx] + return thisKeyRange.start + krc.idx +} + +//-------------------------------------------------------- + +func bz(s string) []byte { return []byte(s) } + +func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) { + b.ReportAllocs() + st := newCacheKVStore() + b.ResetTimer() + // assumes b.N < 2**24 + for i := 0; i < b.N; i++ { + st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) + } +} + +func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) { + b.ReportAllocs() + st := newCacheKVStore() + for i := 0; i < b.N; i++ { + arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)} + st.Set(arr, arr) + } + b.ResetTimer() + // assumes b.N < 2**24 + for i := 0; i < b.N; i++ { + st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) + } +} + +//-------------------------------------------------------- + +func BenchmarkCacheKVStoreSetAndCommit(b *testing.B) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + store := cachekv.NewStore(mem) + store1 := store.Clone() + for j := 0; j < 10; j++ { + store1.Set(sdk.Uint64ToBigEndian(uint64(i+j)), []byte{byte(i)}) + } + store.Restore(store1) + store.Write() + } +} diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go new file mode 100644 index 0000000000..43f64da39f --- /dev/null +++ b/store/cachemulti/store.go @@ -0,0 +1,191 @@ +package cachemulti + +import ( + "fmt" + "io" + + "github.com/cosmos/cosmos-sdk/store/tracekv" + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/evmos/ethermint/store/cachekv" +) + +// storeNameCtxKey is the TraceContext metadata key that identifies +// the store which emitted a given trace. +const storeNameCtxKey = "store_name" + +//---------------------------------------- +// Store + +// Store holds many branched stores. +// Implements MultiStore. +// NOTE: a Store (and MultiStores in general) should never expose the +// keys for the substores. +type Store struct { + stores map[types.StoreKey]*cachekv.Store + + traceWriter io.Writer + traceContext types.TraceContext +} + +var _ types.CacheMultiStore = Store{} + +// NewFromKVStore creates a new Store object from a mapping of store keys to +// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store +// is a branched store. +func NewFromKVStore( + stores map[types.StoreKey]types.KVStore, + traceWriter io.Writer, traceContext types.TraceContext, +) Store { + cms := Store{ + stores: make(map[types.StoreKey]*cachekv.Store, len(stores)), + traceWriter: traceWriter, + traceContext: traceContext, + } + + for key, store := range stores { + if cms.TracingEnabled() { + tctx := cms.traceContext.Clone().Merge(types.TraceContext{ + storeNameCtxKey: key.Name(), + }) + + store = tracekv.NewStore(store, cms.traceWriter, tctx) + } + cms.stores[key] = cachekv.NewStore(store) + } + + return cms +} + +// NewStore creates a new Store object from parent rootmulti store, it branch out inner store of the specified keys. +func NewStore( + parent types.MultiStore, keys map[string]*types.KVStoreKey, +) Store { + stores := make(map[types.StoreKey]types.KVStore, len(keys)) + for _, key := range keys { + stores[key] = parent.GetKVStore(key) + } + return NewFromKVStore(stores, nil, nil) +} + +func newCacheMultiStoreFromCMS(cms Store) Store { + stores := make(map[types.StoreKey]types.KVStore) + for k, v := range cms.stores { + stores[k] = v + } + + return NewFromKVStore(stores, cms.traceWriter, cms.traceContext) +} + +// SetTracer sets the tracer for the MultiStore that the underlying +// stores will utilize to trace operations. A MultiStore is returned. +func (cms Store) SetTracer(w io.Writer) types.MultiStore { + cms.traceWriter = w + return cms +} + +// SetTracingContext updates the tracing context for the MultiStore by merging +// the given context with the existing context by key. Any existing keys will +// be overwritten. It is implied that the caller should update the context when +// necessary between tracing operations. It returns a modified MultiStore. +func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore { + if cms.traceContext != nil { + for k, v := range tc { + cms.traceContext[k] = v + } + } else { + cms.traceContext = tc + } + + return cms +} + +// TracingEnabled returns if tracing is enabled for the MultiStore. +func (cms Store) TracingEnabled() bool { + return cms.traceWriter != nil +} + +// LatestVersion returns the branch version of the store +func (cms Store) LatestVersion() int64 { + panic("cannot get latest version from branch cached multi-store") +} + +// GetStoreType returns the type of the store. +func (cms Store) GetStoreType() types.StoreType { + return types.StoreTypeMulti +} + +// Write calls Write on each underlying store. +func (cms Store) Write() { + for _, store := range cms.stores { + store.Write() + } +} + +// Clone creates a snapshot of each store of the cache-multistore. +// Each copy is a copy-on-write operation and therefore is very fast. +func (cms Store) Clone() types.CacheMultiStore { + stores := make(map[types.StoreKey]*cachekv.Store, len(cms.stores)) + for key, store := range cms.stores { + stores[key] = store.Clone() + } + return Store{ + stores: stores, + + traceWriter: cms.traceWriter, + traceContext: cms.traceContext, + } +} + +// Restore restores the cache-multistore cache to a given snapshot. +func (cms Store) Restore(s types.CacheMultiStore) { + ms := s.(Store) + for key, store := range cms.stores { + otherStore, ok := ms.stores[key] + if !ok { + panic("Invariant violation: Restore should only be called on a store cloned from itself") + } + store.Restore(otherStore) + } +} + +// Implements CacheWrapper. +func (cms Store) CacheWrap() types.CacheWrap { + return cms.CacheMultiStore().(types.CacheWrap) +} + +// CacheWrapWithTrace implements the CacheWrapper interface. +func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { + return cms.CacheWrap() +} + +// Implements MultiStore. +func (cms Store) CacheMultiStore() types.CacheMultiStore { + return newCacheMultiStoreFromCMS(cms) +} + +// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic +// as an already cached multi-store cannot load previous versions. +// +// TODO: The store implementation can possibly be modified to support this as it +// seems safe to load previous versions (heights). +func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { + panic("cannot branch cached multi-store with a version") +} + +// GetStore returns an underlying Store by key. +func (cms Store) GetStore(key types.StoreKey) types.Store { + s := cms.stores[key] + if key == nil || s == nil { + panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + } + return types.Store(s) +} + +// GetKVStore returns an underlying KVStore by key. +func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { + store := cms.stores[key] + if key == nil || store == nil { + panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + } + return types.KVStore(store) +} diff --git a/store/cachemulti/store_test.go b/store/cachemulti/store_test.go new file mode 100644 index 0000000000..80d54a1f43 --- /dev/null +++ b/store/cachemulti/store_test.go @@ -0,0 +1,25 @@ +package cachemulti + +import ( + "fmt" + "testing" + + "github.com/evmos/ethermint/store/cachekv" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/store/types" +) + +func TestStoreGetKVStore(t *testing.T) { + require := require.New(t) + + s := Store{stores: map[types.StoreKey]*cachekv.Store{}} + key := types.NewKVStoreKey("abc") + errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key) + + require.PanicsWithValue(errMsg, + func() { s.GetStore(key) }) + + require.PanicsWithValue(errMsg, + func() { s.GetKVStore(key) }) +} diff --git a/x/evm/keeper/keeper.go b/x/evm/keeper/keeper.go index 58621e310a..d8f50704b4 100644 --- a/x/evm/keeper/keeper.go +++ b/x/evm/keeper/keeper.go @@ -78,6 +78,10 @@ type Keeper struct { evmConstructor evm.Constructor // Legacy subspace ss paramstypes.Subspace + + // a set of store keys that should cover all the precompile use cases, + // or ideally just pass the application's all stores. + keys map[string]*storetypes.KVStoreKey } // NewKeeper generates new evm module keeper @@ -93,6 +97,7 @@ func NewKeeper( evmConstructor evm.Constructor, tracer string, ss paramstypes.Subspace, + keys map[string]*storetypes.KVStoreKey, ) *Keeper { // ensure evm module account is set if addr := ak.GetModuleAddress(types.ModuleName); addr == nil { @@ -118,9 +123,14 @@ func NewKeeper( evmConstructor: evmConstructor, tracer: tracer, ss: ss, + keys: keys, } } +func (k Keeper) StoreKeys() map[string]*storetypes.KVStoreKey { + return k.keys +} + // Logger returns a module-specific logger. func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+types.ModuleName) diff --git a/x/evm/module.go b/x/evm/module.go index 5b15836095..4094c3c048 100644 --- a/x/evm/module.go +++ b/x/evm/module.go @@ -80,7 +80,7 @@ func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingCo // RegisterRESTRoutes performs a no-op as the EVM module doesn't expose REST // endpoints -func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) { +func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) { } func (b AppModuleBasic) RegisterGRPCGatewayRoutes(c client.Context, serveMux *runtime.ServeMux) { @@ -132,7 +132,7 @@ func (AppModule) Name() string { // RegisterInvariants interface for registering invariants. Performs a no-op // as the evm module doesn't expose invariants. -func (am AppModule) RegisterInvariants(ir sdk.InvariantRegistry) { +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) { } // RegisterServices registers a GRPC query service to respond to the diff --git a/x/evm/simulation/operations.go b/x/evm/simulation/operations.go index cd5800adc4..7087071b8e 100644 --- a/x/evm/simulation/operations.go +++ b/x/evm/simulation/operations.go @@ -94,7 +94,7 @@ func WeightedOperations( // SimulateEthSimpleTransfer simulate simple eth account transferring gas token. // It randomly choose sender, recipient and transferable amount. // Other tx details like nonce, gasprice, gaslimit are calculated to get valid value. -func SimulateEthSimpleTransfer(ak types.AccountKeeper, k *keeper.Keeper) simtypes.Operation { +func SimulateEthSimpleTransfer(_ types.AccountKeeper, k *keeper.Keeper) simtypes.Operation { return func( r *rand.Rand, bapp *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { @@ -117,7 +117,7 @@ func SimulateEthSimpleTransfer(ak types.AccountKeeper, k *keeper.Keeper) simtype // SimulateEthCreateContract simulate create an ERC20 contract. // It makes operationSimulateEthCallContract the future operations of SimulateEthCreateContract // to ensure valid contract call. -func SimulateEthCreateContract(ak types.AccountKeeper, k *keeper.Keeper) simtypes.Operation { +func SimulateEthCreateContract(_ types.AccountKeeper, k *keeper.Keeper) simtypes.Operation { return func( r *rand.Rand, bapp *baseapp.BaseApp, ctx sdk.Context, accs []simtypes.Account, chainID string, ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { @@ -178,7 +178,7 @@ func operationSimulateEthCallContract(k *keeper.Keeper, contractAddr, to *common // SimulateEthTx creates valid ethereum tx and pack it as cosmos tx, and deliver it. func SimulateEthTx( - ctx *simulateContext, from, to *common.Address, amount *big.Int, data *hexutil.Bytes, prv cryptotypes.PrivKey, fops []simtypes.FutureOperation, + ctx *simulateContext, from, _ *common.Address, _ *big.Int, data *hexutil.Bytes, prv cryptotypes.PrivKey, fops []simtypes.FutureOperation, ) (simtypes.OperationMsg, []simtypes.FutureOperation, error) { ethTx, err := CreateRandomValidEthTx(ctx, from, nil, nil, data) if err == ErrNoEnoughBalance { diff --git a/x/evm/statedb/interfaces.go b/x/evm/statedb/interfaces.go index e4e83e09c3..afd6674ccf 100644 --- a/x/evm/statedb/interfaces.go +++ b/x/evm/statedb/interfaces.go @@ -16,6 +16,7 @@ package statedb import ( + storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" @@ -33,6 +34,9 @@ type ExtStateDB interface { // Keeper provide underlying storage of StateDB type Keeper interface { + // for cache store wrapping + StoreKeys() map[string]*storetypes.KVStoreKey + // Read methods GetAccount(ctx sdk.Context, addr common.Address) *Account GetState(ctx sdk.Context, addr common.Address, key common.Hash) common.Hash diff --git a/x/evm/statedb/mock_test.go b/x/evm/statedb/mock_test.go index 544cbfa1b4..abcef5b4ce 100644 --- a/x/evm/statedb/mock_test.go +++ b/x/evm/statedb/mock_test.go @@ -5,6 +5,7 @@ import ( "errors" "math/big" + storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -34,6 +35,10 @@ func NewMockKeeper() *MockKeeper { } } +func (k MockKeeper) StoreKeys() map[string]*storetypes.KVStoreKey { + return nil +} + func (k MockKeeper) GetAccount(ctx sdk.Context, addr common.Address) *statedb.Account { acct, ok := k.accounts[addr] if !ok { diff --git a/x/evm/statedb/native.go b/x/evm/statedb/native.go new file mode 100644 index 0000000000..0178984624 --- /dev/null +++ b/x/evm/statedb/native.go @@ -0,0 +1,20 @@ +package statedb + +import ( + "github.com/cosmos/cosmos-sdk/store/types" + "github.com/ethereum/go-ethereum/common" +) + +var _ JournalEntry = nativeChange{} + +type nativeChange struct { + snapshot types.MultiStore +} + +func (native nativeChange) Dirtied() *common.Address { + return nil +} + +func (native nativeChange) Revert(s *StateDB) { + s.restoreNativeState(native.snapshot) +} diff --git a/x/evm/statedb/statedb.go b/x/evm/statedb/statedb.go index c03d501cbf..fd6b6d878f 100644 --- a/x/evm/statedb/statedb.go +++ b/x/evm/statedb/statedb.go @@ -26,6 +26,8 @@ import ( ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + + "github.com/evmos/ethermint/store/cachemulti" ) // revision is the identifier of a version of state. @@ -44,8 +46,9 @@ var _ vm.StateDB = &StateDB{} // * Contracts // * Accounts type StateDB struct { - keeper Keeper - ctx sdk.Context + keeper Keeper + ctx sdk.Context + cacheCtx sdk.Context // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. @@ -69,9 +72,11 @@ type StateDB struct { // New creates a new state from a given trie. func New(ctx sdk.Context, keeper Keeper, txConfig TxConfig) *StateDB { + cacheCtx := ctx.WithMultiStore(cachemulti.NewStore(ctx.MultiStore(), keeper.StoreKeys())) return &StateDB{ keeper: keeper, ctx: ctx, + cacheCtx: cacheCtx, stateObjects: make(map[common.Address]*stateObject), journal: newJournal(), accessList: newAccessList(), @@ -80,6 +85,13 @@ func New(ctx sdk.Context, keeper Keeper, txConfig TxConfig) *StateDB { } } +// CacheMultiStore cast the multistore to *cachemulti.Store. +// invariant: the multistore must be a `cachemulti.Store`, +// prove: it's set in constructor and only modified in `restoreNativeState` which keeps the invariant. +func (s *StateDB) CacheMultiStore() cachemulti.Store { + return s.cacheCtx.MultiStore().(cachemulti.Store) +} + // Keeper returns the underlying `Keeper` func (s *StateDB) Keeper() Keeper { return s.keeper @@ -298,6 +310,24 @@ func (s *StateDB) setStateObject(object *stateObject) { s.stateObjects[object.Address()] = object } +func (s *StateDB) restoreNativeState(ms sdk.MultiStore) { + s.cacheCtx = s.cacheCtx.WithMultiStore(ms) +} + +// ExecuteNativeAction executes native action in isolate, +// the writes will be revert when either the native action itself fail +// or the wrapping message call reverted. +func (s *StateDB) ExecuteNativeAction(action func(ctx sdk.Context) error) error { + snapshot := s.CacheMultiStore().Clone() + err := action(s.cacheCtx) + if err != nil { + s.restoreNativeState(snapshot) + return err + } + s.journal.append(nativeChange{snapshot: snapshot}) + return nil +} + /* * SETTERS */ @@ -451,6 +481,10 @@ func (s *StateDB) RevertToSnapshot(revid int) { // Commit writes the dirty states to keeper // the StateDB object should be discarded after committed. func (s *StateDB) Commit() error { + // commit the native cache store first, + // the states managed by precompiles and the other part of StateDB must not overlap. + s.CacheMultiStore().Write() + for _, addr := range s.journal.sortedDirties() { obj := s.stateObjects[addr] if obj.suicided { diff --git a/x/feemarket/module.go b/x/feemarket/module.go index c7fe6eba21..5eb52e8925 100644 --- a/x/feemarket/module.go +++ b/x/feemarket/module.go @@ -80,7 +80,7 @@ func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingCo // RegisterRESTRoutes performs a no-op as the EVM module doesn't expose REST // endpoints -func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) { +func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) { } func (b AppModuleBasic) RegisterGRPCGatewayRoutes(c client.Context, serveMux *runtime.ServeMux) { @@ -130,7 +130,7 @@ func (AppModule) Name() string { // RegisterInvariants interface for registering invariants. Performs a no-op // as the fee market module doesn't expose invariants. -func (am AppModule) RegisterInvariants(ir sdk.InvariantRegistry) {} +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} // RegisterServices registers the GRPC query service and migrator service to respond to the // module-specific GRPC queries and handle the upgrade store migration for the module. @@ -174,7 +174,7 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw } // RegisterStoreDecoder registers a decoder for fee market module's types -func (am AppModule) RegisterStoreDecoder(sdr sdk.StoreDecoderRegistry) {} +func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} // GenerateGenesisState creates a randomized GenState of the fee market module. func (AppModule) GenerateGenesisState(simState *module.SimulationState) { @@ -182,6 +182,6 @@ func (AppModule) GenerateGenesisState(simState *module.SimulationState) { } // WeightedOperations returns the all the fee market module operations with their respective weights. -func (am AppModule) WeightedOperations(simState module.SimulationState) []simtypes.WeightedOperation { +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { return nil }