diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 24ca58bfb2a4..8236052e83cf 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -291,7 +291,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: core/go.sum @@ -322,7 +322,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: depinject/go.sum @@ -351,7 +351,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: errors/go.sum @@ -382,7 +382,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: math/go.sum @@ -480,7 +480,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: orm/go.sum @@ -604,7 +604,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: store/go.sum @@ -635,7 +635,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.20" + go-version: "1.21" check-latest: true cache: true cache-dependency-path: log/go.sum diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go deleted file mode 100644 index 209f7e58c4dd..000000000000 --- a/store/cachekv/internal/btree.go +++ /dev/null @@ -1,91 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "github.com/tidwall/btree" - - "cosmossdk.io/store/types" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - // copied from memdb. - bTreeDegree = 32 -) - -var errKeyEmpty = errors.New("key cannot be empty") - -// BTree implements the sorted cache for cachekv store, -// we don't use MemDB here because cachekv is used extensively in sdk core path, -// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. -// -// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. -type BTree struct { - tree *btree.BTreeG[item] -} - -// NewBTree creates a wrapper around `btree.BTreeG`. -func NewBTree() BTree { - return BTree{ - tree: btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: false, - }), - } -} - -func (bt BTree) Set(key, value []byte) { - bt.tree.Set(newItem(key, value)) -} - -func (bt BTree) Get(key []byte) []byte { - i, found := bt.tree.Get(newItem(key, nil)) - if !found { - return nil - } - return i.value -} - -func (bt BTree) Delete(key []byte) { - bt.tree.Delete(newItem(key, nil)) -} - -func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, true), nil -} - -func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, false), nil -} - -// Copy the tree. This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (bt BTree) Copy() BTree { - return BTree{ - tree: bt.tree.Copy(), - } -} - -// item is a btree item with byte slices as keys and values -type item struct { - key []byte - value []byte -} - -// byKeys compares the items by key -func byKeys(a, b item) bool { - return bytes.Compare(a.key, b.key) == -1 -} - -// newItem creates a new pair item. -func newItem(key, value []byte) item { - return item{key: key, value: value} -} diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go index 58e9497b3028..c9a1e7d3a677 100644 --- a/store/cachekv/internal/mergeiterator.go +++ b/store/cachekv/internal/mergeiterator.go @@ -14,21 +14,24 @@ import ( // cache shadows (overrides) the parent. // // TODO: Optimize by memoizing. -type cacheMergeIterator struct { - parent types.Iterator - cache types.Iterator +type cacheMergeIterator[V any] struct { + parent types.GIterator[V] + cache types.GIterator[V] ascending bool valid bool + + isZero func(V) bool } -var _ types.Iterator = (*cacheMergeIterator)(nil) +var _ types.Iterator = (*cacheMergeIterator[[]byte])(nil) -func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator { - iter := &cacheMergeIterator{ +func NewCacheMergeIterator[V any](parent, cache types.GIterator[V], ascending bool, isZero func(V) bool) types.GIterator[V] { + iter := &cacheMergeIterator[V]{ parent: parent, cache: cache, ascending: ascending, + isZero: isZero, } iter.valid = iter.skipUntilExistsOrInvalid() @@ -37,17 +40,17 @@ func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.I // Domain implements Iterator. // Returns parent domain because cache and parent domains are the same. -func (iter *cacheMergeIterator) Domain() (start, end []byte) { +func (iter *cacheMergeIterator[V]) Domain() (start, end []byte) { return iter.parent.Domain() } // Valid implements Iterator. -func (iter *cacheMergeIterator) Valid() bool { +func (iter *cacheMergeIterator[V]) Valid() bool { return iter.valid } // Next implements Iterator -func (iter *cacheMergeIterator) Next() { +func (iter *cacheMergeIterator[V]) Next() { iter.assertValid() switch { @@ -74,7 +77,7 @@ func (iter *cacheMergeIterator) Next() { } // Key implements Iterator -func (iter *cacheMergeIterator) Key() []byte { +func (iter *cacheMergeIterator[V]) Key() []byte { iter.assertValid() // If parent is invalid, get the cache key. @@ -104,7 +107,7 @@ func (iter *cacheMergeIterator) Key() []byte { } // Value implements Iterator -func (iter *cacheMergeIterator) Value() []byte { +func (iter *cacheMergeIterator[V]) Value() V { iter.assertValid() // If parent is invalid, get the cache value. @@ -134,7 +137,7 @@ func (iter *cacheMergeIterator) Value() []byte { } // Close implements Iterator -func (iter *cacheMergeIterator) Close() error { +func (iter *cacheMergeIterator[V]) Close() error { err1 := iter.cache.Close() if err := iter.parent.Close(); err != nil { return err @@ -145,7 +148,7 @@ func (iter *cacheMergeIterator) Close() error { // Error returns an error if the cacheMergeIterator is invalid defined by the // Valid method. -func (iter *cacheMergeIterator) Error() error { +func (iter *cacheMergeIterator[V]) Error() error { if !iter.Valid() { return errors.New("invalid cacheMergeIterator") } @@ -155,14 +158,14 @@ func (iter *cacheMergeIterator) Error() error { // If not valid, panics. // NOTE: May have side-effect of iterating over cache. -func (iter *cacheMergeIterator) assertValid() { +func (iter *cacheMergeIterator[V]) assertValid() { if err := iter.Error(); err != nil { panic(err) } } // Like bytes.Compare but opposite if not ascending. -func (iter *cacheMergeIterator) compare(a, b []byte) int { +func (iter *cacheMergeIterator[V]) compare(a, b []byte) int { if iter.ascending { return bytes.Compare(a, b) } @@ -175,9 +178,9 @@ func (iter *cacheMergeIterator) compare(a, b []byte) int { // If the current cache item is not a delete item, does nothing. // If `until` is nil, there is no limit, and cache may end up invalid. // CONTRACT: cache is valid. -func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { +func (iter *cacheMergeIterator[V]) skipCacheDeletes(until []byte) { for iter.cache.Valid() && - iter.cache.Value() == nil && + iter.isZero(iter.cache.Value()) && (until == nil || iter.compare(iter.cache.Key(), until) < 0) { iter.cache.Next() } @@ -186,7 +189,7 @@ func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { // Fast forwards cache (or parent+cache in case of deleted items) until current // item exists, or until iterator becomes invalid. // Returns whether the iterator is valid. -func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { +func (iter *cacheMergeIterator[V]) skipUntilExistsOrInvalid() bool { for { // If parent is invalid, fast-forward cache. if !iter.parent.Valid() { @@ -211,7 +214,7 @@ func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { case 0: // parent == cache. // Skip over if cache item is a delete. valueC := iter.cache.Value() - if valueC == nil { + if iter.isZero(valueC) { iter.parent.Next() iter.cache.Next() @@ -223,7 +226,7 @@ func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { case 1: // cache < parent // Skip over if cache item is a delete. valueC := iter.cache.Value() - if valueC == nil { + if iter.isZero(valueC) { iter.skipCacheDeletes(keyP) continue } diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go deleted file mode 100644 index ecdc86a8e43a..000000000000 --- a/store/cachekv/search_benchmark_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv - -import ( - "strconv" - "testing" - - "cosmossdk.io/store/cachekv/internal" -) - -func BenchmarkLargeUnsortedMisses(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - store := generateStore() - b.StartTimer() - - for k := 0; k < 10000; k++ { - // cache has A + Z values - // these are within range, but match nothing - store.dirtyItems([]byte("B1"), []byte("B2")) - } - } -} - -func generateStore() *Store { - cache := map[string]*cValue{} - unsorted := map[string]struct{}{} - for i := 0; i < 5000; i++ { - key := "A" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - for i := 0; i < 5000; i++ { - key := "Z" + strconv.Itoa(i) - unsorted[key] = struct{}{} - cache[key] = &cValue{} - } - - return &Store{ - cache: cache, - unsortedCache: unsorted, - sortedCache: internal.NewBTree(), - } -} diff --git a/store/cachekv/search_test.go b/store/cachekv/search_test.go deleted file mode 100644 index 41321c076eae..000000000000 --- a/store/cachekv/search_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package cachekv - -import "testing" - -func TestFindStartIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 8, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent but within >=start", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: -1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findStartIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} - -func TestFindEndIndex(t *testing.T) { - tests := []struct { - name string - sortedL []string - query string - want int - }{ - { - name: "non-existent value", - sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "o", - want: 7, - }, - { - name: "dupes start at index 0", - sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "a", - want: 0, - }, - { - name: "dupes start at non-index 0", - sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"}, - query: "c", - want: 1, - }, - { - name: "at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"}, - query: "z", - want: 7, - }, - { - name: "dupes at end", - sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"}, - query: "z", - want: 7, - }, - { - name: "entirely dupes", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "z", - want: 0, - }, - { - name: "non-existent and out of range", - sortedL: []string{"z", "z", "z", "z", "z"}, - query: "p", - want: -1, - }, - { - name: "non-existent and out of range", - sortedL: []string{"d", "e", "f", "g", "h"}, - query: "z", - want: 4, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - body := tt.sortedL - got := findEndIndex(body, tt.query) - if got != tt.want { - t.Fatalf("Got: %d, want: %d", got, tt.want) - } - }) - } -} diff --git a/store/cachekv/store.go b/store/cachekv/store.go index 08cfc2b325f6..5d174acaaeb8 100644 --- a/store/cachekv/store.go +++ b/store/cachekv/store.go @@ -1,199 +1,152 @@ package cachekv import ( - "bytes" - "io" - "sort" - "sync" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/math" "cosmossdk.io/store/cachekv/internal" - "cosmossdk.io/store/internal/conv" - "cosmossdk.io/store/internal/kv" - "cosmossdk.io/store/tracekv" + "cosmossdk.io/store/internal/btree" "cosmossdk.io/store/types" ) -// cValue represents a cached value. -// If dirty is true, it indicates the cached value is different from the underlying value. -type cValue struct { - value []byte - dirty bool -} +type Store = GStore[[]byte] -// Store wraps an in-memory cache around an underlying types.KVStore. -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - unsortedCache map[string]struct{} - sortedCache internal.BTree // always ascending sorted - parent types.KVStore +var ( + _ types.CacheKVStore = (*Store)(nil) + _ types.CacheWrap = (*Store)(nil) + _ types.BranchStore = (*Store)(nil) +) + +func NewStore(parent types.KVStore) *Store { + return NewGStore( + parent, + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + ) } -var _ types.CacheKVStore = (*Store)(nil) +// GStore wraps an in-memory cache around an underlying types.KVStore. +type GStore[V any] struct { + writeSet btree.BTree[V] // always ascending sorted + parent types.GKVStore[V] + + // isZero is a function that returns true if the value is considered "zero", for []byte and pointers the zero value + // is `nil`, zero value is not allowed to set to a key, and it's returned if the key is not found. + isZero func(V) bool + zeroValue V + // valueLen validates the value before it's set + valueLen func(V) int +} // NewStore creates a new Store object -func NewStore(parent types.KVStore) *Store { - return &Store{ - cache: make(map[string]*cValue), - unsortedCache: make(map[string]struct{}), - sortedCache: internal.NewBTree(), - parent: parent, +func NewGStore[V any](parent types.GKVStore[V], isZero func(V) bool, valueLen func(V) int) *GStore[V] { + return &GStore[V]{ + writeSet: btree.NewBTree[V](), + parent: parent, + isZero: isZero, + valueLen: valueLen, } } // GetStoreType implements Store. -func (store *Store) GetStoreType() types.StoreType { +func (store *GStore[V]) GetStoreType() types.StoreType { return store.parent.GetStoreType() } -// Get implements types.KVStore. -func (store *Store) Get(key []byte) (value []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() +// Clone creates a copy-on-write snapshot of the cache store, +// it only performs a shallow copy so is very fast. +func (store *GStore[V]) Clone() types.BranchStore { + v := *store + v.writeSet = store.writeSet.Copy() + return &v +} + +// swapCache swap out the internal cache store and leave the current store unusable. +func (store *GStore[V]) swapCache() btree.BTree[V] { + cache := store.writeSet + store.writeSet = btree.BTree[V]{} + return cache +} + +// Restore restores the store cache to a given snapshot, leaving the snapshot unusable. +func (store *GStore[V]) Restore(s types.BranchStore) { + store.writeSet = s.(*GStore[V]).swapCache() +} +// Get implements types.KVStore. +func (store *GStore[V]) Get(key []byte) V { types.AssertValidKey(key) - cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)] - if !ok { - value = store.parent.Get(key) - store.setCacheValue(key, value, false) - } else { - value = cacheValue.value + value, found := store.writeSet.Get(key) + if !found { + return store.parent.Get(key) } - return value } // Set implements types.KVStore. -func (store *Store) Set(key, value []byte) { +func (store *GStore[V]) Set(key []byte, value V) { types.AssertValidKey(key) - types.AssertValidValue(value) + types.AssertValidValueGeneric(value, store.isZero, store.valueLen) - store.mtx.Lock() - defer store.mtx.Unlock() - store.setCacheValue(key, value, true) + store.writeSet.Set(key, value) } // Has implements types.KVStore. -func (store *Store) Has(key []byte) bool { - value := store.Get(key) - return value != nil -} - -// Delete implements types.KVStore. -func (store *Store) Delete(key []byte) { +func (store *GStore[V]) Has(key []byte) bool { types.AssertValidKey(key) - store.mtx.Lock() - defer store.mtx.Unlock() - - store.setCacheValue(key, nil, true) + value, found := store.writeSet.Get(key) + if !found { + return store.parent.Has(key) + } + return !store.isZero(value) } -func (store *Store) resetCaches() { - if len(store.cache) > 100_000 { - // Cache is too large. We likely did something linear time - // (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated. - // TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation. - // 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem. - store.cache = make(map[string]*cValue) - store.unsortedCache = make(map[string]struct{}) - } else { - // Clear the cache using the map clearing idiom - // and not allocating fresh objects. - // Please see https://bencher.orijtech.com/perfclinic/mapclearing/ - for key := range store.cache { - delete(store.cache, key) - } - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } - store.sortedCache = internal.NewBTree() +// Delete implements types.KVStore. +func (store *GStore[V]) Delete(key []byte) { + types.AssertValidKey(key) + store.writeSet.Set(key, store.zeroValue) } // Implements Cachetypes.KVStore. -func (store *Store) Write() { - store.mtx.Lock() - defer store.mtx.Unlock() - - if len(store.cache) == 0 && len(store.unsortedCache) == 0 { - store.sortedCache = internal.NewBTree() - return - } - - type cEntry struct { - key string - val *cValue - } - - // We need a copy of all of the keys. - // Not the best. To reduce RAM pressure, we copy the values as well - // and clear out the old caches right after the copy. - sortedCache := make([]cEntry, 0, len(store.cache)) - - for key, dbValue := range store.cache { - if dbValue.dirty { - sortedCache = append(sortedCache, cEntry{key, dbValue}) - } - } - store.resetCaches() - sort.Slice(sortedCache, func(i, j int) bool { - return sortedCache[i].key < sortedCache[j].key - }) - - // TODO: Consider allowing usage of Batch, which would allow the write to - // at least happen atomically. - for _, obj := range sortedCache { - // We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot - // be sure if the underlying store might do a save with the byteslice or - // not. Once we get confirmation that .Delete is guaranteed not to - // save the byteslice, then we can assume only a read-only copy is sufficient. - if obj.val.value != nil { - // It already exists in the parent, hence update it. - store.parent.Set([]byte(obj.key), obj.val.value) +func (store *GStore[V]) Write() { + store.writeSet.Scan(func(key []byte, value V) bool { + if store.isZero(value) { + store.parent.Delete(key) } else { - store.parent.Delete([]byte(obj.key)) + store.parent.Set(key, value) } - } + return true + }) + store.writeSet.Clear() } -// CacheWrap implements CacheWrapper. -func (store *Store) CacheWrap() types.CacheWrap { - return NewStore(store) +func (store *GStore[V]) Discard() { + store.writeSet.Clear() } -// CacheWrapWithTrace implements the CacheWrapper interface. -func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return NewStore(tracekv.NewStore(store, w, tc)) +// CacheWrap implements CacheWrapper. +func (store *GStore[V]) CacheWrap() types.CacheWrap { + return NewGStore(store, store.isZero, store.valueLen) } //---------------------------------------- // Iteration // Iterator implements types.KVStore. -func (store *Store) Iterator(start, end []byte) types.Iterator { +func (store *GStore[V]) Iterator(start, end []byte) types.GIterator[V] { return store.iterator(start, end, true) } // ReverseIterator implements types.KVStore. -func (store *Store) ReverseIterator(start, end []byte) types.Iterator { +func (store *GStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { return store.iterator(start, end, false) } -func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { - store.mtx.Lock() - defer store.mtx.Unlock() - - store.dirtyItems(start, end) - isoSortedCache := store.sortedCache.Copy() +func (store *GStore[V]) iterator(start, end []byte, ascending bool) types.GIterator[V] { + isoSortedCache := store.writeSet.Copy() var ( err error - parent, cache types.Iterator + parent, cache types.GIterator[V] ) if ascending { @@ -207,202 +160,5 @@ func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { panic(err) } - return internal.NewCacheMergeIterator(parent, cache, ascending) -} - -func findStartIndex(strL []string, startQ string) int { - // Modified binary search to find the very first element in >=startQ. - if len(strL) == 0 { - return -1 - } - - var left, right, mid int - right = len(strL) - 1 - for left <= right { - mid = (left + right) >> 1 - midStr := strL[mid] - if midStr == startQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] != midStr { - return i + 1 - } - } - return 0 - } - if midStr < startQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - if left >= 0 && left < len(strL) && strL[left] >= startQ { - return left - } - return -1 -} - -func findEndIndex(strL []string, endQ string) int { - if len(strL) == 0 { - return -1 - } - - // Modified binary search to find the very first element > 1 - midStr := strL[mid] - if midStr == endQ { - // Handle condition where there might be multiple values equal to startQ. - // We are looking for the very first value < midStL, that i+1 will be the first - // element >= midStr. - for i := mid - 1; i >= 0; i-- { - if strL[i] < midStr { - return i + 1 - } - } - return 0 - } - if midStr < endQ { - left = mid + 1 - } else { // midStrL > startQ - right = mid - 1 - } - } - - // Binary search failed, now let's find a value less than endQ. - for i := right; i >= 0; i-- { - if strL[i] < endQ { - return i - } - } - - return -1 -} - -type sortState int - -const ( - stateUnsorted sortState = iota - stateAlreadySorted -) - -const minSortSize = 1024 - -// Constructs a slice of dirty items, to use w/ memIterator. -func (store *Store) dirtyItems(start, end []byte) { - startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end) - if end != nil && startStr > endStr { - // Nothing to do here. - return - } - - n := len(store.unsortedCache) - unsorted := make([]*kv.Pair, 0) - // If the unsortedCache is too big, its costs too much to determine - // whats in the subset we are concerned about. - // If you are interleaving iterator calls with writes, this can easily become an - // O(N^2) overhead. - // Even without that, too many range checks eventually becomes more expensive - // than just not having the cache. - if n < minSortSize { - for key := range store.unsortedCache { - // dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return - } - - // Otherwise it is large so perform a modified binary search to find - // the target ranges for the keys that we should be looking for. - strL := make([]string, 0, n) - for key := range store.unsortedCache { - strL = append(strL, key) - } - sort.Strings(strL) - - // Now find the values within the domain - // [start, end) - startIndex := findStartIndex(strL, startStr) - if startIndex < 0 { - startIndex = 0 - } - - var endIndex int - if end == nil { - endIndex = len(strL) - 1 - } else { - endIndex = findEndIndex(strL, endStr) - } - if endIndex < 0 { - endIndex = len(strL) - 1 - } - - // Since we spent cycles to sort the values, we should process and remove a reasonable amount - // ensure start to end is at least minSortSize in size - // if below minSortSize, expand it to cover additional values - // this amortizes the cost of processing elements across multiple calls - if endIndex-startIndex < minSortSize { - endIndex = math.Min(startIndex+minSortSize, len(strL)-1) - if endIndex-startIndex < minSortSize { - startIndex = math.Max(endIndex-minSortSize, 0) - } - } - - kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex) - for i := startIndex; i <= endIndex; i++ { - key := strL[i] - cacheValue := store.cache[key] - kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - - // kvL was already sorted so pass it in as is. - store.clearUnsortedCacheSubset(kvL, stateAlreadySorted) -} - -func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) { - n := len(store.unsortedCache) - if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map. - for key := range store.unsortedCache { - delete(store.unsortedCache, key) - } - } else { // Otherwise, normally delete the unsorted keys from the map. - for _, kv := range unsorted { - delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key)) - } - } - - if sortState == stateUnsorted { - sort.Slice(unsorted, func(i, j int) bool { - return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0 - }) - } - - for _, item := range unsorted { - // sortedCache is able to store `nil` value to represent deleted items. - store.sortedCache.Set(item.Key, item.Value) - } -} - -//---------------------------------------- -// etc - -// Only entrypoint to mutate store.cache. -// A `nil` value means a deletion. -func (store *Store) setCacheValue(key, value []byte, dirty bool) { - keyStr := conv.UnsafeBytesToStr(key) - store.cache[keyStr] = &cValue{ - value: value, - dirty: dirty, - } - if dirty { - store.unsortedCache[keyStr] = struct{}{} - } + return internal.NewCacheMergeIterator(parent, cache, ascending, store.isZero) } diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go index 3c5622355403..140df66e3b94 100644 --- a/store/cachekv/store_test.go +++ b/store/cachekv/store_test.go @@ -447,6 +447,20 @@ func TestIteratorDeadlock(t *testing.T) { defer it2.Close() } +func TestBranchStore(t *testing.T) { + mem := dbadapter.Store{DB: dbm.NewMemDB()} + store := cachekv.NewStore(mem) + + store.Set([]byte("key1"), []byte("value1")) + + branch := store.Clone().(types.CacheKVStore) + branch.Set([]byte("key1"), []byte("value2")) + + require.Equal(t, []byte("value1"), store.Get([]byte("key1"))) + store.Restore(branch.(types.BranchStore)) + require.Equal(t, []byte("value2"), store.Get([]byte("key1"))) +} + //------------------------------------------------------------------------------------------- // do some random ops diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go index 696911370c5d..8a578477d705 100644 --- a/store/cachemulti/store.go +++ b/store/cachemulti/store.go @@ -6,8 +6,6 @@ import ( dbm "github.com/cosmos/cosmos-db" - "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/dbadapter" "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" ) @@ -24,12 +22,13 @@ const storeNameCtxKey = "store_name" // NOTE: a Store (and MultiStores in general) should never expose the // keys for the substores. type Store struct { - db types.CacheKVStore stores map[types.StoreKey]types.CacheWrap - keys map[string]types.StoreKey traceWriter io.Writer traceContext types.TraceContext + parentStore func(types.StoreKey) types.CacheWrapper + + branched bool } var _ types.CacheMultiStore = Store{} @@ -38,26 +37,17 @@ var _ types.CacheMultiStore = Store{} // CacheWrapper objects and a KVStore as the database. Each CacheWrapper store // is a branched store. func NewFromKVStore( - store types.KVStore, stores map[types.StoreKey]types.CacheWrapper, - keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, + stores map[types.StoreKey]types.CacheWrapper, + traceWriter io.Writer, traceContext types.TraceContext, ) Store { cms := Store{ - db: cachekv.NewStore(store), stores: make(map[types.StoreKey]types.CacheWrap, len(stores)), - keys: keys, traceWriter: traceWriter, traceContext: traceContext, } for key, store := range stores { - if cms.TracingEnabled() { - tctx := cms.traceContext.Clone().Merge(types.TraceContext{ - storeNameCtxKey: key.Name(), - }) - - store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx) - } - cms.stores[key] = cachekv.NewStore(store.(types.KVStore)) + cms.initStore(key, store) } return cms @@ -66,19 +56,40 @@ func NewFromKVStore( // NewStore creates a new Store object from a mapping of store keys to // CacheWrapper objects. Each CacheWrapper store is a branched store. func NewStore( - db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey, + _ dbm.DB, stores map[types.StoreKey]types.CacheWrapper, _ map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext, ) Store { - return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext) + return NewFromKVStore(stores, traceWriter, traceContext) } -func newCacheMultiStoreFromCMS(cms Store) Store { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range cms.stores { - stores[k] = v +// NewFromParent constructs a cache multistore with a parent store lazily, +// the parent is usually another cache multistore or the block-stm multiversion store. +func NewFromParent( + parentStore func(types.StoreKey) types.CacheWrapper, + traceWriter io.Writer, traceContext types.TraceContext, +) Store { + return Store{ + stores: make(map[types.StoreKey]types.CacheWrap), + traceWriter: traceWriter, + traceContext: traceContext, + parentStore: parentStore, } +} + +func (cms Store) initStore(key types.StoreKey, store types.CacheWrapper) types.CacheWrap { + if cms.TracingEnabled() { + // only support tracing on KVStore. + if kvstore, ok := store.(types.KVStore); ok { + tctx := cms.traceContext.Clone().Merge(types.TraceContext{ + storeNameCtxKey: key.Name(), + }) - return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext) + store = tracekv.NewStore(kvstore, cms.traceWriter, tctx) + } + } + cache := store.CacheWrap() + cms.stores[key] = cache + return cache } // SetTracer sets the tracer for the MultiStore that the underlying @@ -109,11 +120,6 @@ func (cms Store) TracingEnabled() bool { return cms.traceWriter != nil } -// LatestVersion returns the branch version of the store -func (cms Store) LatestVersion() int64 { - panic("cannot get latest version from branch cached multi-store") -} - // GetStoreType returns the type of the store. func (cms Store) GetStoreType() types.StoreType { return types.StoreTypeMulti @@ -121,50 +127,114 @@ func (cms Store) GetStoreType() types.StoreType { // Write calls Write on each underlying store. func (cms Store) Write() { - cms.db.Write() + if cms.branched { + panic("cannot Write on branched store") + } for _, store := range cms.stores { store.Write() } } +func (cms Store) Discard() { + for _, store := range cms.stores { + store.Discard() + } +} + // Implements CacheWrapper. func (cms Store) CacheWrap() types.CacheWrap { return cms.CacheMultiStore().(types.CacheWrap) } -// CacheWrapWithTrace implements the CacheWrapper interface. -func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return cms.CacheWrap() -} - // Implements MultiStore. func (cms Store) CacheMultiStore() types.CacheMultiStore { - return newCacheMultiStoreFromCMS(cms) + return NewFromParent(cms.getCacheWrapper, cms.traceWriter, cms.traceContext) } -// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic -// as an already cached multi-store cannot load previous versions. -// -// TODO: The store implementation can possibly be modified to support this as it -// seems safe to load previous versions (heights). -func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { - panic("cannot branch cached multi-store with a version") +func (cms Store) getCacheWrapper(key types.StoreKey) types.CacheWrapper { + store, ok := cms.stores[key] + if !ok && cms.parentStore != nil { + // load on demand + store = cms.initStore(key, cms.parentStore(key)) + } + if key == nil || store == nil { + panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + } + return store } // GetStore returns an underlying Store by key. func (cms Store) GetStore(key types.StoreKey) types.Store { - s := cms.stores[key] - if key == nil || s == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + store, ok := cms.getCacheWrapper(key).(types.Store) + if !ok { + panic(fmt.Sprintf("store with key %v is not Store", key)) } - return s.(types.Store) + return store } // GetKVStore returns an underlying KVStore by key. func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { - store := cms.stores[key] - if key == nil || store == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) + store, ok := cms.getCacheWrapper(key).(types.KVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not KVStore", key)) + } + return store +} + +// GetObjKVStore returns an underlying KVStore by key. +func (cms Store) GetObjKVStore(key types.StoreKey) types.ObjKVStore { + store, ok := cms.getCacheWrapper(key).(types.ObjKVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not ObjKVStore", key)) } - return store.(types.KVStore) + return store +} + +func (cms Store) Clone() Store { + stores := make(map[types.StoreKey]types.CacheWrap, len(cms.stores)) + for k, v := range cms.stores { + stores[k] = v.(types.BranchStore).Clone().(types.CacheWrap) + } + return Store{ + stores: stores, + traceWriter: cms.traceWriter, + traceContext: cms.traceContext, + parentStore: cms.parentStore, + + branched: true, + } +} + +func (cms Store) Restore(other Store) { + if !other.branched { + panic("cannot restore from non-branched store") + } + + // discard the non-exists stores + for k, v := range cms.stores { + if _, ok := other.stores[k]; !ok { + // clear the cache store if it's not in the other + v.Discard() + } + } + + // restore the other stores + for k, v := range other.stores { + store, ok := cms.stores[k] + if !ok { + store = cms.initStore(k, cms.parentStore(k)) + } + + store.(types.BranchStore).Restore(v.(types.BranchStore)) + } +} + +func (cms Store) RunAtomic(cb func(types.CacheMultiStore) error) error { + branch := cms.Clone() + if err := cb(branch); err != nil { + return err + } + + cms.Restore(branch) + return nil } diff --git a/store/cachemulti/store_test.go b/store/cachemulti/store_test.go index 0ea7785bff93..38d1182cdf7a 100644 --- a/store/cachemulti/store_test.go +++ b/store/cachemulti/store_test.go @@ -1,11 +1,16 @@ package cachemulti import ( + "errors" "fmt" "testing" + dbm "github.com/cosmos/cosmos-db" "github.com/stretchr/testify/require" + "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/internal" + "cosmossdk.io/store/internal/btree" "cosmossdk.io/store/types" ) @@ -22,3 +27,62 @@ func TestStoreGetKVStore(t *testing.T) { require.PanicsWithValue(errMsg, func() { s.GetKVStore(key) }) } + +func TestRunAtomic(t *testing.T) { + store := dbadapter.Store{DB: dbm.NewMemDB()} + objStore := internal.NewBTreeStore(btree.NewBTree[any](), + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, + ) + keys := map[string]types.StoreKey{ + "abc": types.NewKVStoreKey("abc"), + "obj": types.NewObjectStoreKey("obj"), + } + parent := Store{stores: map[types.StoreKey]types.CacheWrap{ + keys["abc"]: store.CacheWrap(), + keys["obj"]: objStore.CacheWrap(), + }} + + s := Store{stores: map[types.StoreKey]types.CacheWrap{}, parentStore: parent.getCacheWrapper} + s.RunAtomic(func(ms types.CacheMultiStore) error { + ms.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value")) + ms.GetObjKVStore(keys["obj"]).Set([]byte("key"), "value") + return nil + }) + require.Equal(t, []byte("value"), s.GetKVStore(keys["abc"]).Get([]byte("key"))) + require.Equal(t, []byte(nil), s.GetKVStore(keys["abc"]).Get([]byte("key-non-exist"))) + require.Equal(t, "value", s.GetObjKVStore(keys["obj"]).Get([]byte("key")).(string)) + + require.Error(t, s.RunAtomic(func(ms types.CacheMultiStore) error { + ms.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value2")) + ms.GetObjKVStore(keys["obj"]).Set([]byte("key"), "value2") + return errors.New("failure") + })) + require.Equal(t, []byte("value"), s.GetKVStore(keys["abc"]).Get([]byte("key"))) + require.Equal(t, "value", s.GetObjKVStore(keys["obj"]).Get([]byte("key")).(string)) +} + +func TestBranchStore(t *testing.T) { + store := dbadapter.Store{DB: dbm.NewMemDB()} + objStore := internal.NewBTreeStore(btree.NewBTree[any](), + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, + ) + keys := map[string]types.StoreKey{ + "abc": types.NewKVStoreKey("abc"), + "obj": types.NewObjectStoreKey("obj"), + } + parent := Store{stores: map[types.StoreKey]types.CacheWrap{ + keys["abc"]: store.CacheWrap(), + keys["obj"]: objStore.CacheWrap(), + }} + + s := Store{stores: map[types.StoreKey]types.CacheWrap{}, parentStore: parent.getCacheWrapper} + s.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value")) + snapshot := s.Clone() + s.GetKVStore(keys["abc"]).Set([]byte("key"), []byte("value2")) + s.GetObjKVStore(keys["obj"]).Set([]byte("key"), "value") + s.Restore(snapshot) + require.Equal(t, []byte("value"), s.GetKVStore(keys["abc"]).Get([]byte("key"))) + require.Equal(t, nil, s.GetObjKVStore(keys["obj"]).Get([]byte("key"))) +} diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go index 013e26df2030..804ed4d38d8b 100644 --- a/store/dbadapter/store.go +++ b/store/dbadapter/store.go @@ -1,12 +1,9 @@ package dbadapter import ( - "io" - dbm "github.com/cosmos/cosmos-db" "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" ) @@ -81,10 +78,5 @@ func (dsa Store) CacheWrap() types.CacheWrap { return cachekv.NewStore(dsa) } -// CacheWrapWithTrace implements KVStore. -func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(dsa, w, tc)) -} - // dbm.DB implements KVStore so we can CacheKVStore it. var _ types.KVStore = Store{} diff --git a/store/dbadapter/store_test.go b/store/dbadapter/store_test.go index 9685887f9126..515fd38c2bab 100644 --- a/store/dbadapter/store_test.go +++ b/store/dbadapter/store_test.go @@ -80,7 +80,4 @@ func TestCacheWraps(t *testing.T) { cacheWrapper := store.CacheWrap() require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) } diff --git a/store/gaskv/store.go b/store/gaskv/store.go index e0f96af7151e..377417b5b738 100644 --- a/store/gaskv/store.go +++ b/store/gaskv/store.go @@ -1,67 +1,97 @@ package gaskv import ( - "io" - "cosmossdk.io/store/types" ) +// ObjectValueLength is the emulated number of bytes for storing transient objects in gas accounting. +const ObjectValueLength = 16 + var _ types.KVStore = &Store{} -// Store applies gas tracking to an underlying KVStore. It implements the +type Store = GStore[[]byte] + +func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { + return NewGStore(parent, gasMeter, gasConfig, + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + ) +} + +type ObjStore = GStore[any] + +func NewObjStore(parent types.ObjKVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *ObjStore { + return NewGStore(parent, gasMeter, gasConfig, + func(v any) bool { return v == nil }, + func(v any) int { return ObjectValueLength }, + ) +} + +// GStore applies gas tracking to an underlying KVStore. It implements the // KVStore interface. -type Store struct { +type GStore[V any] struct { gasMeter types.GasMeter gasConfig types.GasConfig - parent types.KVStore + parent types.GKVStore[V] + + isZero func(V) bool + valueLen func(V) int } -// NewStore returns a reference to a new GasKVStore. -func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store { - kvs := &Store{ +// NewGStore returns a reference to a new GasKVStore. +func NewGStore[V any]( + parent types.GKVStore[V], + gasMeter types.GasMeter, + gasConfig types.GasConfig, + isZero func(V) bool, + valueLen func(V) int, +) *GStore[V] { + kvs := &GStore[V]{ gasMeter: gasMeter, gasConfig: gasConfig, parent: parent, + isZero: isZero, + valueLen: valueLen, } return kvs } // Implements Store. -func (gs *Store) GetStoreType() types.StoreType { +func (gs *GStore[V]) GetStoreType() types.StoreType { return gs.parent.GetStoreType() } // Implements KVStore. -func (gs *Store) Get(key []byte) (value []byte) { +func (gs *GStore[V]) Get(key []byte) (value V) { gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc) value = gs.parent.Get(key) // TODO overflow-safe math? gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc) + gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(gs.valueLen(value)), types.GasReadPerByteDesc) return value } // Implements KVStore. -func (gs *Store) Set(key, value []byte) { +func (gs *GStore[V]) Set(key []byte, value V) { types.AssertValidKey(key) - types.AssertValidValue(value) + types.AssertValidValueGeneric(value, gs.isZero, gs.valueLen) gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc) // TODO overflow-safe math? gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc) - gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc) + gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(gs.valueLen(value)), types.GasWritePerByteDesc) gs.parent.Set(key, value) } // Implements KVStore. -func (gs *Store) Has(key []byte) bool { +func (gs *GStore[V]) Has(key []byte) bool { gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc) return gs.parent.Has(key) } // Implements KVStore. -func (gs *Store) Delete(key []byte) { +func (gs *GStore[V]) Delete(key []byte) { // charge gas to prevent certain attack vectors even though space is being freed gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc) gs.parent.Delete(key) @@ -70,7 +100,7 @@ func (gs *Store) Delete(key []byte) { // Iterator implements the KVStore interface. It returns an iterator which // incurs a flat gas cost for seeking to the first key/value pair and a variable // gas cost based on the current value's length if the iterator is valid. -func (gs *Store) Iterator(start, end []byte) types.Iterator { +func (gs *GStore[V]) Iterator(start, end []byte) types.GIterator[V] { return gs.iterator(start, end, true) } @@ -78,99 +108,95 @@ func (gs *Store) Iterator(start, end []byte) types.Iterator { // iterator which incurs a flat gas cost for seeking to the first key/value pair // and a variable gas cost based on the current value's length if the iterator // is valid. -func (gs *Store) ReverseIterator(start, end []byte) types.Iterator { +func (gs *GStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { return gs.iterator(start, end, false) } // Implements KVStore. -func (gs *Store) CacheWrap() types.CacheWrap { +func (gs *GStore[V]) CacheWrap() types.CacheWrap { panic("cannot CacheWrap a GasKVStore") } -// CacheWrapWithTrace implements the KVStore interface. -func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a GasKVStore") -} - -func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator { - var parent types.Iterator +func (gs *GStore[V]) iterator(start, end []byte, ascending bool) types.GIterator[V] { + var parent types.GIterator[V] if ascending { parent = gs.parent.Iterator(start, end) } else { parent = gs.parent.ReverseIterator(start, end) } - gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent) - gi.(*gasIterator).consumeSeekGas() + gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent, gs.valueLen) + gi.consumeSeekGas() return gi } -type gasIterator struct { +type gasIterator[V any] struct { gasMeter types.GasMeter gasConfig types.GasConfig - parent types.Iterator + parent types.GIterator[V] + valueLen func(V) int } -func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator { - return &gasIterator{ +func newGasIterator[V any](gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.GIterator[V], valueLen func(V) int) *gasIterator[V] { + return &gasIterator[V]{ gasMeter: gasMeter, gasConfig: gasConfig, parent: parent, + valueLen: valueLen, } } // Implements Iterator. -func (gi *gasIterator) Domain() (start, end []byte) { +func (gi *gasIterator[V]) Domain() (start, end []byte) { return gi.parent.Domain() } // Implements Iterator. -func (gi *gasIterator) Valid() bool { +func (gi *gasIterator[V]) Valid() bool { return gi.parent.Valid() } // Next implements the Iterator interface. It seeks to the next key/value pair // in the iterator. It incurs a flat gas cost for seeking and a variable gas // cost based on the current value's length if the iterator is valid. -func (gi *gasIterator) Next() { +func (gi *gasIterator[V]) Next() { gi.consumeSeekGas() gi.parent.Next() } // Key implements the Iterator interface. It returns the current key and it does // not incur any gas cost. -func (gi *gasIterator) Key() (key []byte) { +func (gi *gasIterator[V]) Key() (key []byte) { key = gi.parent.Key() return key } // Value implements the Iterator interface. It returns the current value and it // does not incur any gas cost. -func (gi *gasIterator) Value() (value []byte) { - value = gi.parent.Value() - return value +func (gi *gasIterator[V]) Value() (value V) { + return gi.parent.Value() } // Implements Iterator. -func (gi *gasIterator) Close() error { +func (gi *gasIterator[V]) Close() error { return gi.parent.Close() } // Error delegates the Error call to the parent iterator. -func (gi *gasIterator) Error() error { +func (gi *gasIterator[V]) Error() error { return gi.parent.Error() } // consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost // based on the current value's length. -func (gi *gasIterator) consumeSeekGas() { +func (gi *gasIterator[V]) consumeSeekGas() { if gi.Valid() { key := gi.Key() value := gi.Value() gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc) - gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc) + gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(gi.valueLen(value)), types.GasValuePerByteDesc) } gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc) } diff --git a/store/gaskv/store_test.go b/store/gaskv/store_test.go index 354832d17c40..d4fd70778cd9 100644 --- a/store/gaskv/store_test.go +++ b/store/gaskv/store_test.go @@ -24,7 +24,6 @@ func TestGasKVStoreBasic(t *testing.T) { require.Equal(t, types.StoreTypeDB, st.GetStoreType()) require.Panics(t, func() { st.CacheWrap() }) - require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) }) require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") diff --git a/store/iavl/store.go b/store/iavl/store.go index 7bdcb1df9177..e73dcd6c1fdd 100644 --- a/store/iavl/store.go +++ b/store/iavl/store.go @@ -3,7 +3,6 @@ package iavl import ( "errors" "fmt" - "io" cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" dbm "github.com/cosmos/cosmos-db" @@ -16,7 +15,6 @@ import ( "cosmossdk.io/store/internal/kv" "cosmossdk.io/store/metrics" pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" "cosmossdk.io/store/wrapper" ) @@ -179,11 +177,6 @@ func (st *Store) CacheWrap() types.CacheWrap { return cachekv.NewStore(st) } -// CacheWrapWithTrace implements the Store interface. -func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(st, w, tc)) -} - // Implements types.KVStore. func (st *Store) Set(key, value []byte) { types.AssertValidKey(key) diff --git a/store/iavl/store_test.go b/store/iavl/store_test.go index 7ad24d7fe33d..46d9fa62371c 100644 --- a/store/iavl/store_test.go +++ b/store/iavl/store_test.go @@ -663,9 +663,6 @@ func TestCacheWraps(t *testing.T) { cacheWrapper := store.CacheWrap() require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) } func TestChangeSets(t *testing.T) { diff --git a/store/internal/btree/btree.go b/store/internal/btree/btree.go new file mode 100644 index 000000000000..920b373ff491 --- /dev/null +++ b/store/internal/btree/btree.go @@ -0,0 +1,126 @@ +package btree + +import ( + "bytes" + "errors" + + "github.com/tidwall/btree" + + "cosmossdk.io/store/types" +) + +const ( + // The approximate number of items and children per B-tree node. Tuned with benchmarks. + // copied from memdb. + bTreeDegree = 32 +) + +var errKeyEmpty = errors.New("key cannot be empty") + +// BTree implements the sorted cache for cachekv store, +// we don't use MemDB here because cachekv is used extensively in sdk core path, +// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. +// +// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. +type BTree[V any] struct { + tree *btree.BTreeG[item[V]] +} + +// NewBTree creates a wrapper around `btree.BTreeG`. +func NewBTree[V any]() BTree[V] { + return BTree[V]{} +} + +func (bt *BTree[V]) init() { + if bt.tree != nil { + return + } + bt.tree = btree.NewBTreeGOptions(byKeys[V], btree.Options{ + Degree: bTreeDegree, + NoLocks: false, + }) +} + +// Set supports nil as value when used as overlay +func (bt *BTree[V]) Set(key []byte, value V) { + bt.init() + bt.tree.Set(newItem(key, value)) +} + +func (bt BTree[V]) Get(key []byte) (V, bool) { + if bt.tree == nil { + var zero V + return zero, false + } + i, found := bt.tree.Get(newItemWithKey[V](key)) + return i.value, found +} + +func (bt *BTree[V]) Delete(key []byte) { + if bt.tree == nil { + return + } + bt.tree.Delete(newItemWithKey[V](key)) +} + +func (bt BTree[V]) Iterator(start, end []byte) (types.GIterator[V], error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, true), nil +} + +func (bt BTree[V]) ReverseIterator(start, end []byte) (types.GIterator[V], error) { + if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { + return nil, errKeyEmpty + } + return newMemIterator(start, end, bt, false), nil +} + +// Copy the tree. This is a copy-on-write operation and is very fast because +// it only performs a shadowed copy. +func (bt BTree[V]) Copy() BTree[V] { + if bt.tree == nil { + return BTree[V]{} + } + return BTree[V]{ + tree: bt.tree.Copy(), + } +} + +func (bt BTree[V]) Clear() { + if bt.tree == nil { + return + } + bt.tree.Clear() +} + +func (bt BTree[V]) Scan(cb func(key []byte, value V) bool) { + if bt.tree == nil { + return + } + bt.tree.Scan(func(i item[V]) bool { + return cb(i.key, i.value) + }) +} + +// item is a btree item with byte slices as keys and values +type item[V any] struct { + key []byte + value V +} + +// byKeys compares the items by key +func byKeys[V any](a, b item[V]) bool { + return bytes.Compare(a.key, b.key) == -1 +} + +// newItem creates a new pair item. +func newItem[V any](key []byte, value V) item[V] { + return item[V]{key: key, value: value} +} + +// newItem creates a new pair item with empty value. +func newItemWithKey[V any](key []byte) item[V] { + return item[V]{key: key} +} diff --git a/store/cachekv/internal/btree_test.go b/store/internal/btree/btree_test.go similarity index 91% rename from store/cachekv/internal/btree_test.go rename to store/internal/btree/btree_test.go index 06437997f636..75613ebbb69c 100644 --- a/store/cachekv/internal/btree_test.go +++ b/store/internal/btree/btree_test.go @@ -1,4 +1,4 @@ -package internal +package btree import ( "testing" @@ -9,20 +9,23 @@ import ( ) func TestGetSetDelete(t *testing.T) { - db := NewBTree() + db := NewBTree[[]byte]() // A nonexistent key should return nil. - value := db.Get([]byte("a")) + value, found := db.Get([]byte("a")) require.Nil(t, value) + require.False(t, found) // Set and get a value. db.Set([]byte("a"), []byte{0x01}) db.Set([]byte("b"), []byte{0x02}) - value = db.Get([]byte("a")) + value, found = db.Get([]byte("a")) require.Equal(t, []byte{0x01}, value) + require.True(t, found) - value = db.Get([]byte("b")) + value, found = db.Get([]byte("b")) require.Equal(t, []byte{0x02}, value) + require.True(t, found) // Deleting a non-existent value is fine. db.Delete([]byte("x")) @@ -30,17 +33,27 @@ func TestGetSetDelete(t *testing.T) { // Delete a value. db.Delete([]byte("a")) - value = db.Get([]byte("a")) + value, found = db.Get([]byte("a")) require.Nil(t, value) + require.False(t, found) db.Delete([]byte("b")) - value = db.Get([]byte("b")) + value, found = db.Get([]byte("b")) require.Nil(t, value) + require.False(t, found) +} + +func TestNilValue(t *testing.T) { + db := NewBTree[[]byte]() + db.Set([]byte("a"), nil) + value, found := db.Get([]byte("a")) + require.Nil(t, value) + require.True(t, found) } func TestDBIterator(t *testing.T) { - db := NewBTree() + db := NewBTree[[]byte]() for i := 0; i < 10; i++ { if i != 6 { // but skip 6. @@ -171,7 +184,7 @@ func TestDBIterator(t *testing.T) { []int64(nil), "reverse iterator from 2 (ex) to 4") // Ensure that the iterators don't panic with an empty database. - db2 := NewBTree() + db2 := NewBTree[[]byte]() itr, err = db2.Iterator(nil, nil) require.NoError(t, err) diff --git a/store/cachekv/internal/memiterator.go b/store/internal/btree/memiterator.go similarity index 60% rename from store/cachekv/internal/memiterator.go rename to store/internal/btree/memiterator.go index 9dbba7587071..b3caabd4584b 100644 --- a/store/cachekv/internal/memiterator.go +++ b/store/internal/btree/memiterator.go @@ -1,4 +1,4 @@ -package internal +package btree import ( "bytes" @@ -9,13 +9,13 @@ import ( "cosmossdk.io/store/types" ) -var _ types.Iterator = (*memIterator)(nil) +var _ types.Iterator = (*memIterator[[]byte])(nil) // memIterator iterates over iterKVCache items. // if value is nil, means it was deleted. // Implements Iterator. -type memIterator struct { - iter btree.IterG[item] +type memIterator[V any] struct { + iter btree.IterG[item[V]] start []byte end []byte @@ -23,18 +23,30 @@ type memIterator struct { valid bool } -func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { +func newMemIterator[V any](start, end []byte, items BTree[V], ascending bool) *memIterator[V] { + if items.tree == nil { + return &memIterator[V]{ + start: start, + end: end, + ascending: ascending, + valid: false, + } + } + + var ( + valid bool + empty V + ) iter := items.tree.Iter() - var valid bool if ascending { if start != nil { - valid = iter.Seek(newItem(start, nil)) + valid = iter.Seek(newItem(start, empty)) } else { valid = iter.First() } } else { if end != nil { - valid = iter.Seek(newItem(end, nil)) + valid = iter.Seek(newItem(end, empty)) if !valid { valid = iter.Last() } else { @@ -46,7 +58,7 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator } } - mi := &memIterator{ + mi := &memIterator[V]{ iter: iter, start: start, end: end, @@ -61,27 +73,27 @@ func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator return mi } -func (mi *memIterator) Domain() (start, end []byte) { +func (mi *memIterator[V]) Domain() (start, end []byte) { return mi.start, mi.end } -func (mi *memIterator) Close() error { +func (mi *memIterator[V]) Close() error { mi.iter.Release() return nil } -func (mi *memIterator) Error() error { +func (mi *memIterator[V]) Error() error { if !mi.Valid() { return errors.New("invalid memIterator") } return nil } -func (mi *memIterator) Valid() bool { +func (mi *memIterator[V]) Valid() bool { return mi.valid } -func (mi *memIterator) Next() { +func (mi *memIterator[V]) Next() { mi.assertValid() if mi.ascending { @@ -95,7 +107,7 @@ func (mi *memIterator) Next() { } } -func (mi *memIterator) keyInRange(key []byte) bool { +func (mi *memIterator[V]) keyInRange(key []byte) bool { if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { return false } @@ -105,15 +117,15 @@ func (mi *memIterator) keyInRange(key []byte) bool { return true } -func (mi *memIterator) Key() []byte { +func (mi *memIterator[V]) Key() []byte { return mi.iter.Item().key } -func (mi *memIterator) Value() []byte { +func (mi *memIterator[V]) Value() V { return mi.iter.Item().value } -func (mi *memIterator) assertValid() { +func (mi *memIterator[V]) assertValid() { if err := mi.Error(); err != nil { panic(err) } diff --git a/store/internal/btreeadaptor.go b/store/internal/btreeadaptor.go new file mode 100644 index 000000000000..dbc212779177 --- /dev/null +++ b/store/internal/btreeadaptor.go @@ -0,0 +1,61 @@ +package internal + +import ( + "cosmossdk.io/store/cachekv" + "cosmossdk.io/store/internal/btree" + "cosmossdk.io/store/types" +) + +var ( + _ types.KVStore = (*BTreeStore[[]byte])(nil) + _ types.ObjKVStore = (*BTreeStore[any])(nil) +) + +// BTreeStore is a wrapper for a BTree with GKVStore[V] implementation +type BTreeStore[V any] struct { + btree.BTree[V] + isZero func(V) bool + valueLen func(V) int +} + +// NewBTreeStore constructs new BTree adapter +func NewBTreeStore[V any](btree btree.BTree[V], isZero func(V) bool, valueLen func(V) int) *BTreeStore[V] { + return &BTreeStore[V]{btree, isZero, valueLen} +} + +func (ts *BTreeStore[V]) Get(key []byte) (value V) { + value, _ = ts.BTree.Get(key) + return +} + +// Hash Implements GKVStore. +func (ts *BTreeStore[V]) Has(key []byte) bool { + _, found := ts.BTree.Get(key) + return found +} + +func (ts *BTreeStore[V]) Iterator(start, end []byte) types.GIterator[V] { + it, err := ts.BTree.Iterator(start, end) + if err != nil { + panic(err) + } + return it +} + +func (ts *BTreeStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { + it, err := ts.BTree.ReverseIterator(start, end) + if err != nil { + panic(err) + } + return it +} + +// GetStoreType returns the type of the store. +func (ts *BTreeStore[V]) GetStoreType() types.StoreType { + return types.StoreTypeDB +} + +// CacheWrap branches the underlying store. +func (ts *BTreeStore[V]) CacheWrap() types.CacheWrap { + return cachekv.NewGStore(ts, ts.isZero, ts.valueLen) +} diff --git a/store/listenkv/store.go b/store/listenkv/store.go index b08a6e395071..43d69fa663fc 100644 --- a/store/listenkv/store.go +++ b/store/listenkv/store.go @@ -1,8 +1,7 @@ package listenkv import ( - "io" - + "cosmossdk.io/store/cachekv" "cosmossdk.io/store/types" ) @@ -132,11 +131,5 @@ func (s *Store) GetStoreType() types.StoreType { // CacheWrap implements the KVStore interface. It panics as a Store // cannot be cache wrapped. func (s *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a ListenKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. It panics as a -// Store cannot be cache wrapped. -func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a ListenKVStore") + return cachekv.NewStore(s) } diff --git a/store/listenkv/store_test.go b/store/listenkv/store_test.go index 51b88912c2e1..f688f5c4c0d9 100644 --- a/store/listenkv/store_test.go +++ b/store/listenkv/store_test.go @@ -272,10 +272,5 @@ func TestListenKVStoreGetStoreType(t *testing.T) { func TestListenKVStoreCacheWrap(t *testing.T) { store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) -} - -func TestListenKVStoreCacheWrapWithTrace(t *testing.T) { - store := newEmptyListenKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) + store.CacheWrap() } diff --git a/store/mem/mem_test.go b/store/mem/mem_test.go index 6595b45dce17..23eb731fe648 100644 --- a/store/mem/mem_test.go +++ b/store/mem/mem_test.go @@ -30,9 +30,6 @@ func TestStore(t *testing.T) { cacheWrapper := db.CacheWrap() require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) } func TestCommit(t *testing.T) { diff --git a/store/mem/store.go b/store/mem/store.go index b819d7536302..2b2bc228ddcd 100644 --- a/store/mem/store.go +++ b/store/mem/store.go @@ -1,14 +1,11 @@ package mem import ( - "io" - dbm "github.com/cosmos/cosmos-db" "cosmossdk.io/store/cachekv" "cosmossdk.io/store/dbadapter" pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" ) @@ -41,11 +38,6 @@ func (s Store) CacheWrap() types.CacheWrap { return cachekv.NewStore(s) } -// CacheWrapWithTrace implements KVStore. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) -} - // Commit performs a no-op as entries are persistent between commitments. func (s *Store) Commit() (id types.CommitID) { return } diff --git a/store/prefix/store.go b/store/prefix/store.go index 32b9e8247e2c..908f70210b55 100644 --- a/store/prefix/store.go +++ b/store/prefix/store.go @@ -3,27 +3,58 @@ package prefix import ( "bytes" "errors" - "io" "cosmossdk.io/store/cachekv" - "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" ) -var _ types.KVStore = Store{} +type ( + Store = GStore[[]byte] + ObjStore = GStore[any] +) + +var ( + _ types.KVStore = Store{} + _ types.ObjKVStore = ObjStore{} +) -// Store is similar with cometbft/cometbft/libs/db/prefix_db +func NewStore(parent types.KVStore, prefix []byte) Store { + return NewGStore( + parent, prefix, + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + ) +} + +func NewObjStore(parent types.ObjKVStore, prefix []byte) ObjStore { + return NewGStore( + parent, prefix, + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, + ) +} + +// GStore is similar with cometbft/cometbft/libs/db/prefix_db // both gives access only to the limited subset of the store // for convinience or safety -type Store struct { - parent types.KVStore +type GStore[V any] struct { + parent types.GKVStore[V] prefix []byte + + isZero func(V) bool + valueLen func(V) int } -func NewStore(parent types.KVStore, prefix []byte) Store { - return Store{ +func NewGStore[V any]( + parent types.GKVStore[V], prefix []byte, + isZero func(V) bool, valueLen func(V) int, +) GStore[V] { + return GStore[V]{ parent: parent, prefix: prefix, + + isZero: isZero, + valueLen: valueLen, } } @@ -34,7 +65,7 @@ func cloneAppend(bz, tail []byte) (res []byte) { return } -func (s Store) key(key []byte) (res []byte) { +func (s GStore[V]) key(key []byte) (res []byte) { if key == nil { panic("nil key on Store") } @@ -43,46 +74,41 @@ func (s Store) key(key []byte) (res []byte) { } // Implements Store -func (s Store) GetStoreType() types.StoreType { +func (s GStore[V]) GetStoreType() types.StoreType { return s.parent.GetStoreType() } // Implements CacheWrap -func (s Store) CacheWrap() types.CacheWrap { - return cachekv.NewStore(s) -} - -// CacheWrapWithTrace implements the KVStore interface. -func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return cachekv.NewStore(tracekv.NewStore(s, w, tc)) +func (s GStore[V]) CacheWrap() types.CacheWrap { + return cachekv.NewGStore(s, s.isZero, s.valueLen) } // Implements KVStore -func (s Store) Get(key []byte) []byte { +func (s GStore[V]) Get(key []byte) V { res := s.parent.Get(s.key(key)) return res } // Implements KVStore -func (s Store) Has(key []byte) bool { +func (s GStore[V]) Has(key []byte) bool { return s.parent.Has(s.key(key)) } // Implements KVStore -func (s Store) Set(key, value []byte) { +func (s GStore[V]) Set(key []byte, value V) { types.AssertValidKey(key) - types.AssertValidValue(value) + types.AssertValidValueGeneric(value, s.isZero, s.valueLen) s.parent.Set(s.key(key), value) } // Implements KVStore -func (s Store) Delete(key []byte) { +func (s GStore[V]) Delete(key []byte) { s.parent.Delete(s.key(key)) } // Implements KVStore // Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106 -func (s Store) Iterator(start, end []byte) types.Iterator { +func (s GStore[V]) Iterator(start, end []byte) types.GIterator[V] { newstart := cloneAppend(s.prefix, start) var newend []byte @@ -99,7 +125,7 @@ func (s Store) Iterator(start, end []byte) types.Iterator { // ReverseIterator implements KVStore // Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129 -func (s Store) ReverseIterator(start, end []byte) types.Iterator { +func (s GStore[V]) ReverseIterator(start, end []byte) types.GIterator[V] { newstart := cloneAppend(s.prefix, start) var newend []byte @@ -114,18 +140,18 @@ func (s Store) ReverseIterator(start, end []byte) types.Iterator { return newPrefixIterator(s.prefix, start, end, iter) } -var _ types.Iterator = (*prefixIterator)(nil) +var _ types.Iterator = (*prefixIterator[[]byte])(nil) -type prefixIterator struct { +type prefixIterator[V any] struct { prefix []byte start []byte end []byte - iter types.Iterator + iter types.GIterator[V] valid bool } -func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator { - return &prefixIterator{ +func newPrefixIterator[V any](prefix, start, end []byte, parent types.GIterator[V]) *prefixIterator[V] { + return &prefixIterator[V]{ prefix: prefix, start: start, end: end, @@ -135,17 +161,17 @@ func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefix } // Implements Iterator -func (pi *prefixIterator) Domain() ([]byte, []byte) { +func (pi *prefixIterator[V]) Domain() ([]byte, []byte) { return pi.start, pi.end } // Implements Iterator -func (pi *prefixIterator) Valid() bool { +func (pi *prefixIterator[V]) Valid() bool { return pi.valid && pi.iter.Valid() } // Implements Iterator -func (pi *prefixIterator) Next() { +func (pi *prefixIterator[V]) Next() { if !pi.valid { panic("prefixIterator invalid, cannot call Next()") } @@ -157,7 +183,7 @@ func (pi *prefixIterator) Next() { } // Implements Iterator -func (pi *prefixIterator) Key() (key []byte) { +func (pi *prefixIterator[V]) Key() (key []byte) { if !pi.valid { panic("prefixIterator invalid, cannot call Key()") } @@ -169,7 +195,7 @@ func (pi *prefixIterator) Key() (key []byte) { } // Implements Iterator -func (pi *prefixIterator) Value() []byte { +func (pi *prefixIterator[V]) Value() V { if !pi.valid { panic("prefixIterator invalid, cannot call Value()") } @@ -178,13 +204,13 @@ func (pi *prefixIterator) Value() []byte { } // Implements Iterator -func (pi *prefixIterator) Close() error { +func (pi *prefixIterator[V]) Close() error { return pi.iter.Close() } // Error returns an error if the prefixIterator is invalid defined by the Valid // method. -func (pi *prefixIterator) Error() error { +func (pi *prefixIterator[V]) Error() error { if !pi.Valid() { return errors.New("invalid prefixIterator") } diff --git a/store/prefix/store_test.go b/store/prefix/store_test.go index 738835770425..818733033e09 100644 --- a/store/prefix/store_test.go +++ b/store/prefix/store_test.go @@ -445,7 +445,4 @@ func TestCacheWraps(t *testing.T) { cacheWrapper := store.CacheWrap() require.IsType(t, &cachekv.Store{}, cacheWrapper) - - cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil) - require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace) } diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index 94336bc5faa8..bb3bc04ac4f2 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -63,17 +63,18 @@ type Store struct { iavlCacheSize int iavlDisableFastNode bool storesParams map[types.StoreKey]storeParams - stores map[types.StoreKey]types.CommitKVStore - keysByName map[string]types.StoreKey - initialVersion int64 - removalMap map[types.StoreKey]bool - traceWriter io.Writer - traceContext types.TraceContext - traceContextMutex sync.Mutex - interBlockCache types.MultiStorePersistentCache - listeners map[types.StoreKey]*types.MemoryListener - metrics metrics.StoreMetrics - commitHeader cmtproto.Header + // CommitStore is a common interface to unify generic CommitKVStore of different value types + stores map[types.StoreKey]types.CommitStore + keysByName map[string]types.StoreKey + initialVersion int64 + removalMap map[types.StoreKey]bool + traceWriter io.Writer + traceContext types.TraceContext + traceContextMutex sync.Mutex + interBlockCache types.MultiStorePersistentCache + listeners map[types.StoreKey]*types.MemoryListener + metrics metrics.StoreMetrics + commitHeader cmtproto.Header } var ( @@ -92,7 +93,7 @@ func NewStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) iavlCacheSize: iavl.DefaultIAVLCacheSize, iavlDisableFastNode: iavlDisablefastNodeDefault, storesParams: make(map[types.StoreKey]storeParams), - stores: make(map[types.StoreKey]types.CommitKVStore), + stores: make(map[types.StoreKey]types.CommitStore), keysByName: make(map[string]types.StoreKey), listeners: make(map[types.StoreKey]*types.MemoryListener), removalMap: make(map[types.StoreKey]bool), @@ -155,12 +156,6 @@ func (rs *Store) MountStoreWithDB(key types.StoreKey, typ types.StoreType, db db // GetCommitStore returns a mounted CommitStore for a given StoreKey. If the // store is wrapped in an inter-block cache, it will be unwrapped before returning. func (rs *Store) GetCommitStore(key types.StoreKey) types.CommitStore { - return rs.GetCommitKVStore(key) -} - -// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the -// store is wrapped in an inter-block cache, it will be unwrapped before returning. -func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { // If the Store has an inter-block cache, first attempt to lookup and unwrap // the underlying CommitKVStore by StoreKey. If it does not exist, fallback to // the main mapping of CommitKVStores. @@ -173,6 +168,17 @@ func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { return rs.stores[key] } +// GetCommitKVStore returns a mounted CommitKVStore for a given StoreKey. If the +// store is wrapped in an inter-block cache, it will be unwrapped before returning. +func (rs *Store) GetCommitKVStore(key types.StoreKey) types.CommitKVStore { + store, ok := rs.GetCommitStore(key).(types.CommitKVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not CommitKVStore", key)) + } + + return store +} + // StoreKeysByName returns mapping storeNames -> StoreKeys func (rs *Store) StoreKeysByName() map[string]types.StoreKey { return rs.keysByName @@ -221,7 +227,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error { } // load each Store (note this doesn't panic on unmounted keys now) - newStores := make(map[types.StoreKey]types.CommitKVStore) + newStores := make(map[types.StoreKey]types.CommitStore) storesKeys := make([]types.StoreKey, 0, len(rs.storesParams)) @@ -544,25 +550,22 @@ func (rs *Store) CacheWrap() types.CacheWrap { return rs.CacheMultiStore().(types.CacheWrap) } -// CacheWrapWithTrace implements the CacheWrapper interface. -func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return rs.CacheWrap() -} - // CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore. // It implements the MultiStore interface. func (rs *Store) CacheMultiStore() types.CacheMultiStore { stores := make(map[types.StoreKey]types.CacheWrapper) for k, v := range rs.stores { - store := types.KVStore(v) - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(k) { - store = listenkv.NewStore(store, k, rs.listeners[k]) + store := types.CacheWrapper(v) + if kv, ok := store.(types.KVStore); ok { + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(k) { + store = listenkv.NewStore(kv, k, rs.listeners[k]) + } } stores[k] = store } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext()) + return cachemulti.NewFromKVStore(stores, rs.traceWriter, rs.getTracingContext()) } // CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it @@ -574,7 +577,7 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor var commitInfo *types.CommitInfo storeInfos := map[string]bool{} for key, store := range rs.stores { - var cacheStore types.KVStore + var cacheStore types.CacheWrapper switch store.GetStoreType() { case types.StoreTypeIAVL: // If the store is wrapped with an inter-block cache, we must first unwrap @@ -613,16 +616,18 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor cacheStore = store } - // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, - // set same listeners on cache store will observe duplicated writes. - if rs.ListeningEnabled(key) { - cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key]) + if kv, ok := cacheStore.(types.KVStore); ok { + // Wire the listenkv.Store to allow listeners to observe the writes from the cache store, + // set same listeners on cache store will observe duplicated writes. + if rs.ListeningEnabled(key) { + cacheStore = listenkv.NewStore(kv, key, rs.listeners[key]) + } } cachedStores[key] = cacheStore } - return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil + return cachemulti.NewFromKVStore(cachedStores, rs.traceWriter, rs.getTracingContext()), nil } // GetStore returns a mounted Store for a given StoreKey. If the StoreKey does @@ -632,7 +637,7 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor // TODO: This isn't used directly upstream. Consider returning the Store as-is // instead of unwrapping. func (rs *Store) GetStore(key types.StoreKey) types.Store { - store := rs.GetCommitKVStore(key) + store := rs.GetCommitStore(key) if store == nil { panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) } @@ -651,7 +656,10 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { if s == nil { panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) } - store := types.KVStore(s) + store, ok := s.(types.KVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not KVStore", key)) + } if rs.TracingEnabled() { store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext()) @@ -663,6 +671,20 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { return store } +// GetObjKVStore returns a mounted ObjKVStore for a given StoreKey. +func (rs *Store) GetObjKVStore(key types.StoreKey) types.ObjKVStore { + s := rs.stores[key] + if s == nil { + panic(fmt.Sprintf("store does not exist for key: %s", key.Name())) + } + store, ok := s.(types.ObjKVStore) + if !ok { + panic(fmt.Sprintf("store with key %v is not ObjKVStore", key)) + } + + return store +} + func (rs *Store) handlePruning(version int64) error { pruneHeight := rs.pruningManager.GetPruningHeight(version) rs.logger.Debug("prune start", "height", version) @@ -714,7 +736,7 @@ func (rs *Store) GetStoreByName(name string) types.Store { return nil } - return rs.GetCommitKVStore(key) + return rs.GetCommitStore(key) } // Query calls substore.Query with the same `req` where `req.Path` is @@ -829,10 +851,10 @@ func (rs *Store) Snapshot(height uint64, protoWriter protoio.Writer) error { stores := []namedStore{} keys := keysFromStoreKeyMap(rs.stores) for _, key := range keys { - switch store := rs.GetCommitKVStore(key).(type) { + switch store := rs.GetCommitStore(key).(type) { case *iavl.Store: stores = append(stores, namedStore{name: key.Name(), Store: store}) - case *transient.Store, *mem.Store: + case *transient.Store, *mem.Store, *transient.ObjStore: // Non-persisted stores shouldn't be snapshotted continue default: @@ -993,7 +1015,7 @@ loop: return snapshotItem, rs.LoadLatestVersion() } -func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) { +func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitStore, error) { var db dbm.DB if params.db != nil { @@ -1034,20 +1056,26 @@ func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID return commitDBStoreAdapter{Store: dbadapter.Store{DB: db}}, nil case types.StoreTypeTransient: - _, ok := key.(*types.TransientStoreKey) - if !ok { - return nil, fmt.Errorf("invalid StoreKey for StoreTypeTransient: %s", key.String()) + if _, ok := key.(*types.TransientStoreKey); !ok { + return nil, fmt.Errorf("unexpected key type for a TransientStoreKey; got: %s, %T", key.String(), key) } return transient.NewStore(), nil case types.StoreTypeMemory: if _, ok := key.(*types.MemoryStoreKey); !ok { - return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s", key.String()) + return nil, fmt.Errorf("unexpected key type for a MemoryStoreKey; got: %s, %T", key.String(), key) } return mem.NewStore(), nil + case types.StoreTypeObject: + if _, ok := key.(*types.ObjectStoreKey); !ok { + return nil, fmt.Errorf("unexpected key type for a ObjectStoreKey; got: %s, %T", key.String(), key) + } + + return transient.NewObjStore(), nil + default: panic(fmt.Sprintf("unrecognized store type %v", params.typ)) } @@ -1059,7 +1087,7 @@ func (rs *Store) buildCommitInfo(version int64) *types.CommitInfo { for _, key := range keys { store := rs.stores[key] storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { + if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory || storeType == types.StoreTypeObject { continue } storeInfos = append(storeInfos, types.StoreInfo{ @@ -1177,7 +1205,7 @@ func GetLatestVersion(db dbm.DB) int64 { } // Commits each store and returns a new commitInfo. -func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { +func commitStores(version int64, storeMap map[types.StoreKey]types.CommitStore, removalMap map[types.StoreKey]bool) *types.CommitInfo { storeInfos := make([]types.StoreInfo, 0, len(storeMap)) storeKeys := keysFromStoreKeyMap(storeMap) @@ -1197,7 +1225,7 @@ func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore } storeType := store.GetStoreType() - if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory { + if storeType == types.StoreTypeTransient || storeType == types.StoreTypeMemory || storeType == types.StoreTypeObject { continue } diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index 2702f3e08623..3d7dca809d2f 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -17,6 +17,7 @@ import ( sdkmaps "cosmossdk.io/store/internal/maps" "cosmossdk.io/store/metrics" pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/tracekv" "cosmossdk.io/store/types" ) @@ -690,9 +691,6 @@ func TestCacheWraps(t *testing.T) { cacheWrapper := multi.CacheWrap() require.IsType(t, cachemulti.Store{}, cacheWrapper) - - cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil) - require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace) } func TestTraceConcurrency(t *testing.T) { @@ -710,7 +708,7 @@ func TestTraceConcurrency(t *testing.T) { cms := multi.CacheMultiStore() store1 := cms.GetKVStore(key) - cw := store1.CacheWrapWithTrace(b, tc) + cw := tracekv.NewStore(store1.CacheWrap().(types.KVStore), b, tc) _ = cw require.NotNil(t, store1) @@ -788,6 +786,7 @@ var ( testStoreKey1 = types.NewKVStoreKey("store1") testStoreKey2 = types.NewKVStoreKey("store2") testStoreKey3 = types.NewKVStoreKey("store3") + testStoreKey4 = types.NewKVStoreKey("store4") ) func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store { @@ -860,7 +859,7 @@ func getExpectedCommitID(store *Store, ver int64) types.CommitID { } } -func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte { +func hashStores(stores map[types.StoreKey]types.CommitStore) []byte { m := make(map[string][]byte, len(stores)) for key, store := range stores { name := key.Name() @@ -915,35 +914,40 @@ func TestStateListeners(t *testing.T) { require.Empty(t, ms.PopStateCache()) } -type commitKVStoreStub struct { - types.CommitKVStore +type commitStoreStub struct { + types.CommitStore Committed int } -func (stub *commitKVStoreStub) Commit() types.CommitID { - commitID := stub.CommitKVStore.Commit() +func (stub *commitStoreStub) Commit() types.CommitID { + commitID := stub.CommitStore.Commit() stub.Committed++ return commitID } -func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) { +func prepareStoreMap() (map[types.StoreKey]types.CommitStore, error) { var db dbm.DB = dbm.NewMemDB() store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil) store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil) store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil) + store.MountStoreWithDB(types.NewMemoryStoreKey("mem1"), types.StoreTypeMemory, nil) + store.MountStoreWithDB(types.NewObjectStoreKey("obj1"), types.StoreTypeObject, nil) if err := store.LoadLatestVersion(); err != nil { return nil, err } - return map[types.StoreKey]types.CommitKVStore{ - testStoreKey1: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore), + return map[types.StoreKey]types.CommitStore{ + testStoreKey1: &commitStoreStub{ + CommitStore: store.GetStoreByName("iavl1").(types.CommitStore), + }, + testStoreKey2: &commitStoreStub{ + CommitStore: store.GetStoreByName("iavl2").(types.CommitStore), }, - testStoreKey2: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore), + testStoreKey3: &commitStoreStub{ + CommitStore: store.GetStoreByName("trans1").(types.CommitStore), }, - testStoreKey3: &commitKVStoreStub{ - CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore), + testStoreKey4: &commitStoreStub{ + CommitStore: store.GetStoreByName("obj1").(types.CommitStore), }, }, nil } @@ -974,7 +978,7 @@ func TestCommitStores(t *testing.T) { t.Run(tc.name, func(t *testing.T) { storeMap, err := prepareStoreMap() require.NoError(t, err) - store := storeMap[testStoreKey1].(*commitKVStoreStub) + store := storeMap[testStoreKey1].(*commitStoreStub) for i := tc.committed; i > 0; i-- { store.Commit() } diff --git a/store/tracekv/store.go b/store/tracekv/store.go index ba6df431da16..9fbc1bb4a3ef 100644 --- a/store/tracekv/store.go +++ b/store/tracekv/store.go @@ -6,6 +6,7 @@ import ( "io" "cosmossdk.io/errors" + "cosmossdk.io/store/cachekv" "cosmossdk.io/store/types" ) @@ -164,13 +165,7 @@ func (tkv *Store) GetStoreType() types.StoreType { // CacheWrap implements the KVStore interface. It panics because a Store // cannot be branched. func (tkv *Store) CacheWrap() types.CacheWrap { - panic("cannot CacheWrap a TraceKVStore") -} - -// CacheWrapWithTrace implements the KVStore interface. It panics as a -// Store cannot be branched. -func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - panic("cannot CacheWrapWithTrace a TraceKVStore") + return cachekv.NewStore(tkv) } // writeOperation writes a KVStore operation to the underlying io.Writer as diff --git a/store/tracekv/store_test.go b/store/tracekv/store_test.go index 2c42734baefd..00e4406c7574 100644 --- a/store/tracekv/store_test.go +++ b/store/tracekv/store_test.go @@ -283,10 +283,5 @@ func TestTraceKVStoreGetStoreType(t *testing.T) { func TestTraceKVStoreCacheWrap(t *testing.T) { store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrap() }) -} - -func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) { - store := newEmptyTraceKVStore(nil) - require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) }) + store.CacheWrap() } diff --git a/store/transient/store.go b/store/transient/store.go index 6f393279f571..53332e9f33c1 100644 --- a/store/transient/store.go +++ b/store/transient/store.go @@ -1,9 +1,8 @@ package transient import ( - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/dbadapter" + "cosmossdk.io/store/internal" + "cosmossdk.io/store/internal/btree" pruningtypes "cosmossdk.io/store/pruning/types" "cosmossdk.io/store/types" ) @@ -11,43 +10,73 @@ import ( var ( _ types.Committer = (*Store)(nil) _ types.KVStore = (*Store)(nil) + + _ types.Committer = (*ObjStore)(nil) + _ types.ObjKVStore = (*ObjStore)(nil) ) // Store is a wrapper for a MemDB with Commiter implementation +type GStore[V any] struct { + internal.BTreeStore[V] +} + +// NewGStore constructs new generic transient store +func NewGStore[V any](isZero func(V) bool, valueLen func(V) int) *GStore[V] { + return &GStore[V]{*internal.NewBTreeStore(btree.NewBTree[V](), isZero, valueLen)} +} + +// Store specializes GStore for []byte type Store struct { - dbadapter.Store + GStore[[]byte] } -// Constructs new MemDB adapter func NewStore() *Store { - return &Store{Store: dbadapter.Store{DB: dbm.NewMemDB()}} + return &Store{*NewGStore( + func(v []byte) bool { return v == nil }, + func(v []byte) int { return len(v) }, + )} +} + +func (*Store) GetStoreType() types.StoreType { + return types.StoreTypeTransient +} + +// ObjStore specializes GStore for any +type ObjStore struct { + GStore[any] +} + +func NewObjStore() *ObjStore { + return &ObjStore{*NewGStore( + func(v any) bool { return v == nil }, + func(v any) int { return 1 }, // for value length validation + )} +} + +func (*ObjStore) GetStoreType() types.StoreType { + return types.StoreTypeObject } // Implements CommitStore // Commit cleans up Store. -func (ts *Store) Commit() (id types.CommitID) { - ts.Store = dbadapter.Store{DB: dbm.NewMemDB()} +func (ts *GStore[V]) Commit() (id types.CommitID) { + ts.Clear() return } -func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {} +func (ts *GStore[V]) SetPruning(_ pruningtypes.PruningOptions) {} // GetPruning is a no-op as pruning options cannot be directly set on this store. // They must be set on the root commit multi-store. -func (ts *Store) GetPruning() pruningtypes.PruningOptions { +func (ts *GStore[V]) GetPruning() pruningtypes.PruningOptions { return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined) } // Implements CommitStore -func (ts *Store) LastCommitID() types.CommitID { +func (ts *GStore[V]) LastCommitID() types.CommitID { return types.CommitID{} } -func (ts *Store) WorkingHash() []byte { +func (ts *GStore[V]) WorkingHash() []byte { return []byte{} } - -// Implements Store. -func (ts *Store) GetStoreType() types.StoreType { - return types.StoreTypeTransient -} diff --git a/store/types/store.go b/store/types/store.go index 8980179950e2..67bd140f5e75 100644 --- a/store/types/store.go +++ b/store/types/store.go @@ -128,14 +128,11 @@ type MultiStore interface { // call CacheMultiStore.Write(). CacheMultiStore() CacheMultiStore - // CacheMultiStoreWithVersion branches the underlying MultiStore where - // each stored is loaded at a specific version (height). - CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) - // Convenience for fetching substores. // If the store does not exist, panics. GetStore(StoreKey) Store GetKVStore(StoreKey) KVStore + GetObjKVStore(StoreKey) ObjKVStore // TracingEnabled returns if tracing is enabled for the MultiStore. TracingEnabled() bool @@ -149,6 +146,14 @@ type MultiStore interface { // implied that the caller should update the context when necessary between // tracing operations. The modified MultiStore is returned. SetTracingContext(TraceContext) MultiStore +} + +type RootMultiStore interface { + MultiStore + + // CacheMultiStoreWithVersion branches the underlying MultiStore where + // each stored is loaded at a specific version (height). + CacheMultiStoreWithVersion(version int64) (CacheMultiStore, error) // LatestVersion returns the latest version in the store LatestVersion() int64 @@ -158,12 +163,14 @@ type MultiStore interface { type CacheMultiStore interface { MultiStore Write() // Writes operations to underlying KVStore + + RunAtomic(func(CacheMultiStore) error) error } // CommitMultiStore is an interface for a MultiStore without cache capabilities. type CommitMultiStore interface { Committer - MultiStore + RootMultiStore snapshottypes.Snapshotter // Mount a store of type using the given db. @@ -229,25 +236,25 @@ type CommitMultiStore interface { //---------subsp------------------------------- // KVStore -// BasicKVStore is a simple interface to get/set data -type BasicKVStore interface { +// GBasicKVStore is a simple interface to get/set data +type GBasicKVStore[V any] interface { // Get returns nil if key doesn't exist. Panics on nil key. - Get(key []byte) []byte + Get(key []byte) V // Has checks if a key exists. Panics on nil key. Has(key []byte) bool // Set sets the key. Panics on nil key or value. - Set(key, value []byte) + Set(key []byte, value V) // Delete deletes the key. Panics on nil key. Delete(key []byte) } -// KVStore additionally provides iteration and deletion -type KVStore interface { +// GKVStore additionally provides iteration and deletion +type GKVStore[V any] interface { Store - BasicKVStore + GBasicKVStore[V] // Iterator over a domain of keys in ascending order. End is exclusive. // Start must be less than end, or the Iterator is invalid. @@ -255,18 +262,54 @@ type KVStore interface { // To iterate over entire domain, use store.Iterator(nil, nil) // CONTRACT: No writes may happen within a domain while an iterator exists over it. // Exceptionally allowed for cachekv.Store, safe to write in the modules. - Iterator(start, end []byte) Iterator + Iterator(start, end []byte) GIterator[V] // Iterator over a domain of keys in descending order. End is exclusive. // Start must be less than end, or the Iterator is invalid. // Iterator must be closed by caller. // CONTRACT: No writes may happen within a domain while an iterator exists over it. // Exceptionally allowed for cachekv.Store, safe to write in the modules. - ReverseIterator(start, end []byte) Iterator + ReverseIterator(start, end []byte) GIterator[V] +} + +// GIterator is the generic version of dbm's Iterator +type GIterator[V any] interface { + // Domain returns the start (inclusive) and end (exclusive) limits of the iterator. + // CONTRACT: start, end readonly []byte + Domain() (start, end []byte) + + // Valid returns whether the current iterator is valid. Once invalid, the Iterator remains + // invalid forever. + Valid() bool + + // Next moves the iterator to the next key in the database, as defined by order of iteration. + // If Valid returns false, this method will panic. + Next() + + // Key returns the key at the current position. Panics if the iterator is invalid. + // CONTRACT: key readonly []byte + Key() (key []byte) + + // Value returns the value at the current position. Panics if the iterator is invalid. + // CONTRACT: value readonly []byte + Value() (value V) + + // Error returns the last error encountered by the iterator, if any. + Error() error + + // Close closes the iterator, relasing any allocated resources. + Close() error } -// Iterator is an alias db's Iterator for convenience. -type Iterator = dbm.Iterator +type ( + Iterator = GIterator[[]byte] + BasicKVStore = GBasicKVStore[[]byte] + KVStore = GKVStore[[]byte] + + ObjIterator = GIterator[any] + ObjBasicKVStore = GBasicKVStore[any] + ObjKVStore = GKVStore[any] +) // CacheKVStore branches a KVStore and provides read cache functionality. // After calling .Write() on the CacheKVStore, all previously created @@ -292,22 +335,18 @@ type CommitKVStore interface { // a Committer, since Commit ephemeral store make no sense. It can return KVStore, // HeapStore, SpaceStore, etc. type CacheWrap interface { + CacheWrapper + // Write syncs with the underlying store. Write() - // CacheWrap recursively wraps again. - CacheWrap() CacheWrap - - // CacheWrapWithTrace recursively wraps again with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap + // Discard the write set + Discard() } type CacheWrapper interface { // CacheWrap branches a store. CacheWrap() CacheWrap - - // CacheWrapWithTrace branches a store with tracing enabled. - CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap } func (cid CommitID) IsZero() bool { @@ -318,6 +357,12 @@ func (cid CommitID) String() string { return fmt.Sprintf("CommitID{%v:%X}", cid.Hash, cid.Version) } +// BranchStore +type BranchStore interface { + Clone() BranchStore + Restore(BranchStore) +} + //---------------------------------------- // Store types @@ -332,6 +377,7 @@ const ( StoreTypeMemory StoreTypeSMT StoreTypePersistent + StoreTypeObject ) func (st StoreType) String() string { @@ -356,6 +402,9 @@ func (st StoreType) String() string { case StoreTypePersistent: return "StoreTypePersistent" + + case StoreTypeObject: + return "StoreTypeObject" } return "unknown store type" @@ -435,6 +484,29 @@ func (key *TransientStoreKey) String() string { return fmt.Sprintf("TransientStoreKey{%p, %s}", key, key.name) } +// ObjectStoreKey is used for indexing transient stores in a MultiStore +type ObjectStoreKey struct { + name string +} + +// Constructs new ObjectStoreKey +// Must return a pointer according to the ocap principle +func NewObjectStoreKey(name string) *ObjectStoreKey { + return &ObjectStoreKey{ + name: name, + } +} + +// Implements StoreKey +func (key *ObjectStoreKey) Name() string { + return key.name +} + +// Implements StoreKey +func (key *ObjectStoreKey) String() string { + return fmt.Sprintf("ObjectStoreKey{%p, %s}", key, key.name) +} + // MemoryStoreKey defines a typed key to be used with an in-memory KVStore. type MemoryStoreKey struct { name string @@ -532,3 +604,17 @@ func NewMemoryStoreKeys(names ...string) map[string]*MemoryStoreKey { return keys } + +// NewObjectStoreKeys constructs a new map matching store key names to their +// respective ObjectStoreKey references. +// The function will panic if there is a potential conflict in names (see `assertNoPrefix` +// function for more details). +func NewObjectStoreKeys(names ...string) map[string]*ObjectStoreKey { + assertNoCommonPrefix(names) + keys := make(map[string]*ObjectStoreKey) + for _, n := range names { + keys[n] = NewObjectStoreKey(n) + } + + return keys +} diff --git a/store/types/validity.go b/store/types/validity.go index a1fbaba999c7..cfb6088c8a32 100644 --- a/store/types/validity.go +++ b/store/types/validity.go @@ -1,5 +1,7 @@ package types +import "errors" + var ( // 128K - 1 MaxKeyLength = (1 << 17) - 1 @@ -22,7 +24,20 @@ func AssertValidValue(value []byte) { if value == nil { panic("value is nil") } - if len(value) > MaxValueLength { - panic("value is too large") + AssertValidValueLength(len(value)) +} + +// AssertValidValueGeneric checks if the value is valid(value is not nil and within length limit) +func AssertValidValueGeneric[V any](value V, isZero func(V) bool, valueLen func(V) int) { + if isZero(value) { + panic("value is nil") + } + AssertValidValueLength(valueLen(value)) +} + +// AssertValidValueLength checks if the value length is within length limit +func AssertValidValueLength(l int) { + if l > MaxValueLength { + panic(errors.New("value is too large")) } }