From 098647e1358a8ee0b5206b14348376783347c5c7 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Mon, 19 Aug 2024 19:46:57 +0700
Subject: [PATCH 01/41] core,trie,eth,cmd: rework preimage store (#533)
* core,trie,eth,cmd: rework preimage store
* ci: trigger unittest path-base-implementing
---
.github/workflows/test-pr.yml | 1 +
cmd/evm/internal/t8ntool/execution.go | 2 +-
core/state/state_test.go | 3 +-
eth/api_test.go | 3 +-
trie/database.go | 97 ++++++---------------------
trie/preimages.go | 91 +++++++++++++++++++++++++
trie/secure_trie.go | 25 ++++---
trie/trie.go | 9 +++
8 files changed, 143 insertions(+), 88 deletions(-)
create mode 100644 trie/preimages.go
diff --git a/.github/workflows/test-pr.yml b/.github/workflows/test-pr.yml
index 89ad036616..063ea26483 100644
--- a/.github/workflows/test-pr.yml
+++ b/.github/workflows/test-pr.yml
@@ -7,6 +7,7 @@ on:
pull_request:
branches:
- master
+ - path-base-implementing
concurrency:
group: ${{ github.head_ref || github.run_id }}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 5a75d142b4..8781ccc2a3 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -262,7 +262,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
}
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
- sdb := state.NewDatabase(db)
+ sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 6868a78b15..8f19a5ff2c 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie"
)
type stateTest struct {
@@ -40,7 +41,7 @@ func newStateTest() *stateTest {
func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, nil), nil)
+ sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
s := &stateTest{db: db, state: sdb}
// generate a few entries
diff --git a/eth/api_test.go b/eth/api_test.go
index 39a1d58460..e1bfa48bc9 100644
--- a/eth/api_test.go
+++ b/eth/api_test.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/trie"
)
var dumper = spew.ConfigState{Indent: " "}
@@ -66,7 +67,7 @@ func TestAccountRange(t *testing.T) {
t.Parallel()
var (
- statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), nil)
+ statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true})
state, _ = state.New(common.Hash{}, statedb, nil)
addrs = [AccountRangeMaxResults * 2]common.Address{}
m = map[common.Address]bool{}
diff --git a/trie/database.go b/trie/database.go
index 58ca4e6f3c..744c2b0f81 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -74,8 +74,6 @@ type Database struct {
oldest common.Hash // Oldest tracked node, flush-list head
newest common.Hash // Newest tracked node, flush-list tail
- preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
-
gctime time.Duration // Time spent on garbage collection since last commit
gcnodes uint64 // Nodes garbage collected since last commit
gcsize common.StorageSize // Data storage garbage collected since last commit
@@ -84,11 +82,10 @@ type Database struct {
flushnodes uint64 // Nodes flushed since last commit
flushsize common.StorageSize // Data storage flushed since last commit
- dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata)
- childrenSize common.StorageSize // Storage size of the external children tracking
- preimagesSize common.StorageSize // Storage size of the preimages cache
-
- lock sync.RWMutex
+ dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata)
+ childrenSize common.StorageSize // Storage size of the external children tracking
+ preimages *preimageStore // Store for caching preimages of trie nodes
+ lock sync.RWMutex
}
// rawNode is a simple binary blob used to differentiate between collapsed trie
@@ -298,15 +295,18 @@ func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database
cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
}
}
+ var preimage *preimageStore
+ if config != nil && config.Preimages {
+ preimage = newPreimageStore(diskdb)
+ }
+
db := &Database{
diskdb: diskdb,
cleans: cleans,
dirties: map[common.Hash]*cachedNode{{}: {
children: make(map[common.Hash]uint16),
}},
- }
- if config == nil || config.Preimages { // TODO(karalabe): Flip to default off in the future
- db.preimages = make(map[common.Hash][]byte)
+ preimages: preimage,
}
return db
}
@@ -349,24 +349,6 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
}
-// insertPreimage writes a new trie node pre-image to the memory database if it's
-// yet unknown. The method will NOT make a copy of the slice,
-// only use if the preimage will NOT be changed later on.
-//
-// Note, this method assumes that the database's lock is held!
-func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {
- // Short circuit if preimage collection is disabled
- if db.preimages == nil {
- return
- }
- // Track the preimage if a yet unknown one
- if _, ok := db.preimages[hash]; ok {
- return
- }
- db.preimages[hash] = preimage
- db.preimagesSize += common.StorageSize(common.HashLength + len(preimage))
-}
-
// node retrieves a cached trie node from memory, or returns nil if none can be
// found in the memory cache.
func (db *Database) node(hash common.Hash) node {
@@ -443,24 +425,6 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
return nil, errors.New("not found")
}
-// preimage retrieves a cached trie node pre-image from memory. If it cannot be
-// found cached, the method queries the persistent database for the content.
-func (db *Database) preimage(hash common.Hash) []byte {
- // Short circuit if preimage collection is disabled
- if db.preimages == nil {
- return nil
- }
- // Retrieve the node from cache if available
- db.lock.RLock()
- preimage := db.preimages[hash]
- db.lock.RUnlock()
-
- if preimage != nil {
- return preimage
- }
- return rawdb.ReadPreimage(db.diskdb, hash)
-}
-
// Nodes retrieves the hashes of all the nodes cached within the memory database.
// This method is extremely expensive and should only be used to validate internal
// states in test code.
@@ -605,20 +569,10 @@ func (db *Database) Cap(limit common.StorageSize) error {
// If the preimage cache got large enough, push to disk. If it's still small
// leave for later to deduplicate writes.
- flushPreimages := db.preimagesSize > 4*1024*1024
- if flushPreimages {
- if db.preimages == nil {
- log.Error("Attempted to write preimages whilst disabled")
- } else {
- rawdb.WritePreimages(batch, db.preimages)
- if batch.ValueSize() > ethdb.IdealBatchSize {
- if err := batch.Write(); err != nil {
- return err
- }
- batch.Reset()
- }
- }
+ if db.preimages != nil {
+ db.preimages.commit(false)
}
+
// Keep committing nodes from the flush-list until we're below allowance
oldest := db.oldest
for size > limit && oldest != (common.Hash{}) {
@@ -652,13 +606,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
db.lock.Lock()
defer db.lock.Unlock()
- if flushPreimages {
- if db.preimages == nil {
- log.Error("Attempted to reset preimage cache whilst disabled")
- } else {
- db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0
- }
- }
for db.oldest != oldest {
node := db.dirties[db.oldest]
delete(db.dirties, db.oldest)
@@ -702,13 +649,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
// Move all of the accumulated preimages into a write batch
if db.preimages != nil {
- rawdb.WritePreimages(batch, db.preimages)
- // Since we're going to replay trie node writes into the clean cache, flush out
- // any batched pre-images before continuing.
- if err := batch.Write(); err != nil {
- return err
- }
- batch.Reset()
+ db.preimages.commit(true)
}
// Move the trie itself into the batch, flushing if enough data is accumulated
nodes, storage := len(db.dirties), db.dirtiesSize
@@ -731,9 +672,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
batch.Reset()
// Reset the storage counters and bumped metrics
- if db.preimages != nil {
- db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0
- }
+
memcacheCommitTimeTimer.Update(time.Since(start))
memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
@@ -845,7 +784,11 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
// counted.
var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize)
var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2))
- return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize
+ var preimageSize common.StorageSize
+ if db.preimages != nil {
+ preimageSize = db.preimages.size()
+ }
+ return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize
}
// saveCache saves clean state cache to given directory path
diff --git a/trie/preimages.go b/trie/preimages.go
new file mode 100644
index 0000000000..f5b1291a23
--- /dev/null
+++ b/trie/preimages.go
@@ -0,0 +1,91 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// preimageStore is the store for caching preimages of node key.
+type preimageStore struct {
+ lock sync.RWMutex
+ disk ethdb.KeyValueStore
+ preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
+ preimagesSize common.StorageSize // Storage size of the preimages cache
+}
+
+// newPreimageStore initializes the store for caching preimages.
+func newPreimageStore(disk ethdb.KeyValueStore) *preimageStore {
+ return &preimageStore{
+ disk: disk,
+ preimages: make(map[common.Hash][]byte),
+ }
+}
+
+func (store *preimageStore) insertPreimage(preimages map[common.Hash][]byte) {
+ store.lock.Lock()
+ defer store.lock.Unlock()
+
+ for hash, preimage := range preimages {
+ if _, ok := store.preimages[hash]; ok {
+ continue
+ }
+ store.preimages[hash] = preimage
+ store.preimagesSize += common.StorageSize(common.HashLength + len(preimage))
+ }
+}
+
+func (store *preimageStore) preimage(hash common.Hash) []byte {
+ // Lock the store for reading
+ store.lock.RLock()
+ preimage := store.preimages[hash]
+ store.lock.RUnlock()
+ if preimage != nil {
+ return preimage
+ }
+ // Incase preimage is not existed in memory, then read from disk.
+ return rawdb.ReadPreimage(store.disk, hash)
+}
+
+func (store *preimageStore) commit(force bool) error {
+ store.lock.Lock()
+ defer store.lock.Unlock()
+
+ // If preimages size is less than 4MB and not forced to commit, then return.
+ if store.preimagesSize <= 4*1024*1024 && !force {
+ return nil
+ }
+
+ batch := store.disk.NewBatch()
+ rawdb.WritePreimages(batch, store.preimages)
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ store.preimages, store.preimagesSize = make(map[common.Hash][]byte), 0
+ return nil
+}
+
+func (store *preimageStore) size() common.StorageSize {
+ store.lock.RLock()
+ defer store.lock.RUnlock()
+
+ return store.preimagesSize
+}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 18be12d34a..7a16a09898 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -37,6 +37,7 @@ import (
// SecureTrie is not safe for concurrent use.
type SecureTrie struct {
trie Trie
+ preimages *preimageStore
hashKeyBuf [common.HashLength]byte
secKeyCache map[string][]byte
secKeyCacheOwner *SecureTrie // Pointer to self, replace the key cache on mismatch
@@ -61,7 +62,7 @@ func NewSecure(root common.Hash, db *Database) (*SecureTrie, error) {
if err != nil {
return nil, err
}
- return &SecureTrie{trie: *trie}, nil
+ return &SecureTrie{trie: *trie, preimages: db.preimages}, nil
}
// Get returns the value for key stored in the trie.
@@ -153,7 +154,11 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
return key
}
- return t.trie.db.preimage(common.BytesToHash(shaKey))
+ if t.preimages == nil {
+ return nil
+ }
+
+ return t.preimages.preimage(common.BytesToHash(shaKey))
}
// Commit writes all nodes and the secure hash pre-images to the trie's database.
@@ -164,12 +169,13 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
func (t *SecureTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
- if t.trie.db.preimages != nil { // Ugly direct check but avoids the below write lock
- t.trie.db.lock.Lock()
+ if t.preimages != nil { // Ugly direct check but avoids the below write lock
+ preimages := make(map[common.Hash][]byte)
+
for hk, key := range t.secKeyCache {
- t.trie.db.insertPreimage(common.BytesToHash([]byte(hk)), key)
+ preimages[common.BytesToHash([]byte(hk))] = key
}
- t.trie.db.lock.Unlock()
+ t.preimages.insertPreimage(preimages)
}
t.secKeyCache = make(map[string][]byte)
}
@@ -185,8 +191,11 @@ func (t *SecureTrie) Hash() common.Hash {
// Copy returns a copy of SecureTrie.
func (t *SecureTrie) Copy() *SecureTrie {
- cpy := *t
- return &cpy
+ return &SecureTrie{
+ trie: *t.trie.Copy(),
+ preimages: t.preimages,
+ secKeyCache: t.secKeyCache,
+ }
}
// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration
diff --git a/trie/trie.go b/trie/trie.go
index 13343112b8..eb53258700 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -587,3 +587,12 @@ func (t *Trie) Reset() {
t.root = nil
t.unhashed = 0
}
+
+// Copy returns a copy of Trie.
+func (t *Trie) Copy() *Trie {
+ return &Trie{
+ db: t.db,
+ root: t.root,
+ unhashed: t.unhashed,
+ }
+}
From 9b7810e790127fbb31f7d1bbb9b4681e67aa8090 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 30 Aug 2024 12:13:34 +0700
Subject: [PATCH 02/41] trie, les, tests, core: implement trie tracer: Trie
tracer is an auxiliary tool to capture all deleted node wwhich can't be
captured by trie.Committer. The deleted nodes (#552)
can be removed from the disk later. Implement traverse and rework init Trie
---
core/types/hashing_test.go | 13 +-
tests/fuzzers/rangeproof/rangeproof-fuzzer.go | 9 +-
trie/committer.go | 2 +-
trie/iterator_test.go | 3 +-
trie/proof.go | 23 ++-
trie/proof_test.go | 19 +--
trie/secure_trie_test.go | 3 +-
trie/trie.go | 68 +++++++++
trie/utils.go | 134 ++++++++++++++++++
trie/utils_test.go | 122 ++++++++++++++++
10 files changed, 364 insertions(+), 32 deletions(-)
create mode 100644 trie/utils.go
create mode 100644 trie/utils_test.go
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index 6d1ebf897c..de71ee41a4 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
@@ -38,7 +39,8 @@ func TestDeriveSha(t *testing.T) {
t.Fatal(err)
}
for len(txs) < 1000 {
- exp := types.DeriveSha(txs, new(trie.Trie))
+ tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ exp := types.DeriveSha(txs, tr)
got := types.DeriveSha(txs, trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp)
@@ -85,7 +87,8 @@ func BenchmarkDeriveSha200(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- exp = types.DeriveSha(txs, new(trie.Trie))
+ tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ exp = types.DeriveSha(txs, tr)
}
})
@@ -106,7 +109,8 @@ func TestFuzzDeriveSha(t *testing.T) {
rndSeed := mrand.Int()
for i := 0; i < 10; i++ {
seed := rndSeed + i
- exp := types.DeriveSha(newDummy(i), new(trie.Trie))
+ tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ exp := types.DeriveSha(newDummy(i), tr)
got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
printList(newDummy(seed))
@@ -134,7 +138,8 @@ func TestDerivableList(t *testing.T) {
},
}
for i, tc := range tcs[1:] {
- exp := types.DeriveSha(flatList(tc), new(trie.Trie))
+ tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ exp := types.DeriveSha(flatList(tc), tr)
got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("case %d: got %x exp %x", i, got, exp)
diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
index 09ee6bb9c7..5d7097b137 100644
--- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
+++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
@@ -24,6 +24,7 @@ import (
"sort"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/trie"
)
@@ -62,7 +63,7 @@ func (f *fuzzer) readInt() uint64 {
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
- trie := new(trie.Trie)
+ trie, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
size := f.readInt()
// Fill it with some fluff
@@ -182,8 +183,10 @@ func (f *fuzzer) fuzz() int {
// The function must return
// 1 if the fuzzer should increase priority of the
-// given input during subsequent fuzzing (for example, the input is lexically
-// correct and was parsed successfully);
+//
+// given input during subsequent fuzzing (for example, the input is lexically
+// correct and was parsed successfully);
+//
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise; other values are reserved for future use.
func Fuzz(input []byte) int {
diff --git a/trie/committer.go b/trie/committer.go
index 0721990a21..b74572ee27 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -91,7 +91,7 @@ func (c *committer) commit(n node, db *Database) (node, int, error) {
if hash != nil && !dirty {
return hash, 0, nil
}
- // Commit children, then parent, and remove remove the dirty flag.
+ // Commit children, then parent, and remove the dirty flag.
switch cn := n.(type) {
case *shortNode:
// Commit child
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 1f984c0f4b..679ae2cdcc 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -24,6 +24,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
@@ -296,7 +297,7 @@ func TestUnionIterator(t *testing.T) {
}
func TestIteratorNoDups(t *testing.T) {
- var tr Trie
+ tr, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
diff --git a/trie/proof.go b/trie/proof.go
index 51ecea0c39..2c2da9cb82 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -335,9 +334,9 @@ findFork:
// unset removes all internal node references either the left most or right most.
// It can meet these scenarios:
//
-// - The given path is existent in the trie, unset the associated nodes with the
-// specific direction
-// - The given path is non-existent in the trie
+// - The given path is existent in the trie, unset the associated nodes with the
+// specific direction
+// - The given path is non-existent in the trie
// - the fork point is a fullnode, the corresponding child pointed by path
// is nil, return
// - the fork point is a shortnode, the shortnode is included in the range,
@@ -452,15 +451,15 @@ func hasRightElement(node node, key []byte) bool {
// Expect the normal case, this function can also be used to verify the following
// range proofs:
//
-// - All elements proof. In this case the proof can be nil, but the range should
-// be all the leaves in the trie.
+// - All elements proof. In this case the proof can be nil, but the range should
+// be all the leaves in the trie.
//
-// - One element proof. In this case no matter the edge proof is a non-existent
-// proof or not, we can always verify the correctness of the proof.
+// - One element proof. In this case no matter the edge proof is a non-existent
+// proof or not, we can always verify the correctness of the proof.
//
-// - Zero element proof. In this case a single non-existent proof is enough to prove.
-// Besides, if there are still some other leaves available on the right side, then
-// an error will be returned.
+// - Zero element proof. In this case a single non-existent proof is enough to prove.
+// Besides, if there are still some other leaves available on the right side, then
+// an error will be returned.
//
// Except returning the error to indicate the proof is valid or not, the function will
// also return a flag to indicate whether there exists more accounts/slots in the trie.
@@ -553,7 +552,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
}
// Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one.
- tr := &Trie{root: root, db: NewDatabase(memorydb.New())}
+ tr := newWithRootNode(root)
if empty {
tr.root = nil
}
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 95ad6169c3..19ca51e259 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
@@ -79,7 +80,7 @@ func TestProof(t *testing.T) {
}
func TestOneElementProof(t *testing.T) {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "k", "v")
for i, prover := range makeProvers(trie) {
proof := prover([]byte("k"))
@@ -130,7 +131,7 @@ func TestBadProof(t *testing.T) {
// Tests that missing keys can also be proven. The test explicitly uses a single
// entry trie and checks for missing keys both before and after the single entry.
func TestMissingKeyProof(t *testing.T) {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "k", "v")
for i, key := range []string{"a", "j", "l", "z"} {
@@ -386,7 +387,7 @@ func TestOneElementRangeProof(t *testing.T) {
}
// Test the mini trie with only a single element.
- tinyTrie := new(Trie)
+ tinyTrie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
entry := &kv{randBytes(32), randBytes(20), false}
tinyTrie.Update(entry.k, entry.v)
@@ -458,7 +459,7 @@ func TestAllElementsProof(t *testing.T) {
// TestSingleSideRangeProof tests the range starts from zero.
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -493,7 +494,7 @@ func TestSingleSideRangeProof(t *testing.T) {
// TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff.
func TestReverseSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -600,7 +601,7 @@ func TestBadRangeProof(t *testing.T) {
// TestGappedRangeProof focuses on the small trie with embedded nodes.
// If the gapped node is embedded in the trie, it should be detected too.
func TestGappedRangeProof(t *testing.T) {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -674,7 +675,7 @@ func TestSameSideProofs(t *testing.T) {
}
func TestHasRightElement(t *testing.T) {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -1027,7 +1028,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
}
func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -1052,7 +1053,7 @@ func randBytes(n int) []byte {
}
func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie := new(Trie)
+ trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
max := uint64(0xffffffffffffffff)
for i := uint64(0); i < uint64(n); i++ {
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index fb6c38ee22..a3ece84b57 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -112,8 +112,7 @@ func TestSecureTrieConcurrency(t *testing.T) {
threads := runtime.NumCPU()
tries := make([]*SecureTrie, threads)
for i := 0; i < threads; i++ {
- cpy := *trie
- tries[i] = &cpy
+ tries[i] = trie.Copy()
}
// Start a batch of goroutines interactng with the trie
pend := new(sync.WaitGroup)
diff --git a/trie/trie.go b/trie/trie.go
index eb53258700..79ed3176f0 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -24,6 +24,7 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -66,6 +67,8 @@ type Trie struct {
// hashing operation. This number will not directly map to the number of
// actually unhashed nodes
unhashed int
+
+ tracer *tracer
}
// newFlag returns the cache flag value for a newly created node.
@@ -73,6 +76,16 @@ func (t *Trie) newFlag() nodeFlag {
return nodeFlag{dirty: true}
}
+// newWithRootNode initializes the trie with the given root node.
+// It's only used by range prover.
+func newWithRootNode(root node) *Trie {
+ return &Trie{
+ root: root,
+ //tracer: newTracer(),
+ db: NewDatabase(rawdb.NewMemoryDatabase()),
+ }
+}
+
// New creates a trie with an existing root node from db.
//
// If root is the zero hash or the sha3 hash of an empty string, the
@@ -85,6 +98,7 @@ func New(root common.Hash, db *Database) (*Trie, error) {
}
trie := &Trie{
db: db,
+ //tracer: newTracer(),
}
if root != (common.Hash{}) && root != emptyRoot {
rootnode, err := trie.resolveHash(root[:], nil)
@@ -317,6 +331,11 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
if matchlen == 0 {
return true, branch, nil
}
+
+ // New branch node is created as a child of the original short node.
+ // Track the newly inserted node in the tracer. The node identifier
+ // passed is the path from the root node.
+ t.tracer.onInsert(append(prefix, key[:matchlen]...))
// Otherwise, replace it with a short node leading up to the branch.
return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil
@@ -331,6 +350,10 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
return true, n, nil
case nil:
+ // New short node is created and track it in the tracer. The node identifier
+ // passed is the path from the root node. Note the valueNode won't be tracked
+ // since it's always embedded in its parent.
+ t.tracer.onInsert(prefix)
return true, &shortNode{key, value, t.newFlag()}, nil
case hashNode:
@@ -372,6 +395,33 @@ func (t *Trie) TryDelete(key []byte) error {
return nil
}
+// traverse method mostly for learning and testing purposes.
+func (t *Trie) traverse(n node, prefix []byte) {
+ switch n := n.(type) {
+ case *shortNode:
+ // If it's a short node, print the prefix and key
+ newPrefix := append(prefix, n.Key...)
+ fmt.Printf("[Traverse] Short Node: %+v\n", n)
+ t.traverse(n.Val, newPrefix)
+ case *fullNode:
+ // If it's a full node, print the prefix and each child
+
+ for i, _ := range n.Children {
+ if n.Children[i] != nil {
+ fmt.Printf("[Traverse] Full Node: %+v\n", n.Children[i])
+ newPrefix := append(prefix, byte(i))
+ t.traverse(n.Children[i], newPrefix)
+ }
+ }
+ case valueNode:
+ // If it's a value node, print the prefix and value
+ fmt.Printf("Value Node: %s -> %s\n", string(prefix), string(n))
+ case hashNode:
+ // If it's a hash node, resolve it and traverse the result
+ fmt.Printf("Hash Node: %s -> %s \n", string(prefix), string(n))
+ }
+}
+
// delete returns the new root of the trie with key deleted.
// It reduces the trie to minimal form by simplifying
// nodes on the way up after deleting recursively.
@@ -383,6 +433,10 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
return false, n, nil // don't replace n on mismatch
}
if matchlen == len(key) {
+ // It means that matched short node is deleted entirely, and track
+ // it in the deletion set. The same the valueNode doesn't need
+ // to be tracked at all since it's always be embedded in its parent.
+ t.tracer.onDelete(prefix)
return true, nil, nil // remove n entirely for whole matches
}
// The key is longer than n.Key. Remove the remaining suffix
@@ -395,6 +449,10 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
}
switch child := child.(type) {
case *shortNode:
+ // The child shortNode is merged into its parent, track
+ // is deleted as well.
+ t.tracer.onDelete(append(prefix, n.Key...))
+
// Deleting from the subtrie reduced it to another
// short node. Merge the nodes to avoid creating a
// shortNode{..., shortNode{...}}. Use concat (which
@@ -456,6 +514,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
return false, nil, err
}
if cnode, ok := cnode.(*shortNode); ok {
+ // Replace the entire full node with the short node.
+ // Mark the original short nodes as delete since the value
+ // is embedded in its parent now.
+ t.tracer.onDelete(append(prefix, byte(pos)))
+
k := append([]byte{byte(pos)}, cnode.Key...)
return true, &shortNode{k, cnode.Val, t.newFlag()}, nil
}
@@ -528,6 +591,9 @@ func (t *Trie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
if t.db == nil {
panic("commit called on trie with nil database")
}
+
+ defer t.tracer.reset()
+
if t.root == nil {
return emptyRoot, 0, nil
}
@@ -586,6 +652,7 @@ func (t *Trie) hashRoot() (node, node, error) {
func (t *Trie) Reset() {
t.root = nil
t.unhashed = 0
+ t.tracer.reset()
}
// Copy returns a copy of Trie.
@@ -594,5 +661,6 @@ func (t *Trie) Copy() *Trie {
db: t.db,
root: t.root,
unhashed: t.unhashed,
+ tracer: t.tracer.copy(),
}
}
diff --git a/trie/utils.go b/trie/utils.go
new file mode 100644
index 0000000000..be5e491bd8
--- /dev/null
+++ b/trie/utils.go
@@ -0,0 +1,134 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+// tracer tracks the changes of trie nodes. During the trie operations,
+// some nodes can be deleted from the trie, while these deleted nodes
+// won't be captured by trie.Hasher or trie.Commiter. Thus, these deleted
+// nodes won't be removed from the disk at all. Tracer is an auxiliary tool
+// used to track all insert and delete operations on trie and capture all
+// deleted nodes eventually.
+//
+// The changed nodes can be mainly divided into two categories: the leaf
+// nodes and intermediate nodes. The fromer is inserted/deleted by callers
+// white the latter is iserted/deleted in order to follow the rule of trie.
+// This tool can track all of them no matter is embedded in its
+// parent or nit, but the valueNode is never tracked.
+//
+// Note tracer is not thread-safe, callers should be responsible for handling
+// the concurrency issues by themselves.
+type tracer struct {
+ insert map[string]struct{}
+ delete map[string]struct{}
+}
+
+// newTracer initlializes tride node diff tracer.
+func newTracer() *tracer {
+ return &tracer{
+ insert: make(map[string]struct{}),
+ delete: make(map[string]struct{}),
+ }
+}
+
+// onInsert tracks the newly inserted trie node. If it's already
+// in the delete set(resurrected node), then just wipe it from
+// the deletion set as it's untouched.
+func (t *tracer) onInsert(key []byte) {
+ // Tracer isn't used right now, remove this check latter.
+ if t == nil {
+ return
+ }
+ // If the key is in the delete set, then it's a resurrected node, then wipe it.
+ if _, present := t.delete[string(key)]; present {
+ delete(t.delete, string(key))
+ return
+ }
+ t.insert[string(key)] = struct{}{}
+}
+
+// OnDelete tracks the newly deleted trie node. If it's already
+// in the addition set, then just wipe it from the addtion set
+// as it's untouched.
+func (t *tracer) onDelete(key []byte) {
+ // Tracer isn't used right now, remove this check latter.
+ if t == nil {
+ return
+ }
+ if _, present := t.insert[string(key)]; present {
+ delete(t.insert, string(key))
+ return
+ }
+ t.delete[string(key)] = struct{}{}
+}
+
+// insertList returns the tracked inserted trie nodes in list format.
+func (t *tracer) insertList() [][]byte {
+ // Tracer isn't used right now, remove this check later.
+ if t == nil {
+ return nil
+ }
+ var ret [][]byte
+ for key := range t.insert {
+ ret = append(ret, []byte(key))
+ }
+ return ret
+}
+
+// deleteList returns the tracked deleted trie nodes in list format.
+func (t *tracer) deleteList() [][]byte {
+ // Tracer isn't used right now, remove this check later.
+ if t == nil {
+ return nil
+ }
+ var ret [][]byte
+ for key := range t.delete {
+ ret = append(ret, []byte(key))
+ }
+ return ret
+}
+
+// reset clears the content tracked by tracer.
+func (t *tracer) reset() {
+ // Tracer isn't used right now, remove this check later.
+ if t == nil {
+ return
+ }
+ t.insert = make(map[string]struct{})
+ t.delete = make(map[string]struct{})
+}
+
+// copy returns a deep copied tracer instance.
+func (t *tracer) copy() *tracer {
+ // Tracer isn't used right now, remove this check later.
+ if t == nil {
+ return nil
+ }
+ var (
+ insert = make(map[string]struct{})
+ delete = make(map[string]struct{})
+ )
+ for key := range t.insert {
+ insert[key] = struct{}{}
+ }
+ for key := range t.delete {
+ delete[key] = struct{}{}
+ }
+ return &tracer{
+ insert: insert,
+ delete: delete,
+ }
+}
diff --git a/trie/utils_test.go b/trie/utils_test.go
new file mode 100644
index 0000000000..fadb0553b5
--- /dev/null
+++ b/trie/utils_test.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+)
+
+// Tests if the trie diffs are tracked correctly.
+func TestTrieTracer(t *testing.T) {
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie, _ := New(common.Hash{}, db)
+ trie.tracer = newTracer()
+
+ // Insert a batch of entries, all the nodes should be marked as inserted
+ vals := []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ for _, val := range vals {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ trie.Hash()
+
+ seen := make(map[string]struct{})
+ it := trie.NodeIterator(nil)
+ for it.Next(true) {
+ if it.Leaf() {
+ continue
+ }
+ seen[string(it.Path())] = struct{}{}
+ }
+ inserted := trie.tracer.insertList()
+ if len(inserted) != len(seen) {
+ t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted))
+ }
+ for _, k := range inserted {
+ _, ok := seen[string(k)]
+ if !ok {
+ t.Fatalf("Unexpected inserted node")
+ }
+ }
+ deleted := trie.tracer.deleteList()
+ if len(deleted) != 0 {
+ t.Fatalf("Unexpected deleted node tracked %d", len(deleted))
+ }
+
+ // Commit the changes
+ trie.Commit(nil)
+
+ // Delete all the elements, check deletion set
+ for _, val := range vals {
+ trie.Delete([]byte(val.k))
+ }
+ trie.Hash()
+
+ inserted = trie.tracer.insertList()
+ if len(inserted) != 0 {
+ t.Fatalf("Unexpected inserted node tracked %d", len(inserted))
+ }
+ deleted = trie.tracer.deleteList()
+ if len(deleted) != len(seen) {
+ t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted))
+ }
+ for _, k := range deleted {
+ _, ok := seen[string(k)]
+ if !ok {
+ t.Fatalf("Unexpected inserted node")
+ }
+ }
+}
+
+func TestTrieTracerNoop(t *testing.T) {
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie, _ := New(common.Hash{}, db)
+ trie.tracer = newTracer()
+
+ // Insert a batch of entries, all the nodes should be marked as inserted
+ vals := []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ for _, val := range vals {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ for _, val := range vals {
+ trie.Delete([]byte(val.k))
+ }
+ if len(trie.tracer.insertList()) != 0 {
+ t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList()))
+ }
+ if len(trie.tracer.deleteList()) != 0 {
+ t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList()))
+ }
+}
From 8a8f0e49e3035d1b89adcf6f73d9df18fc71cc86 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Wed, 4 Sep 2024 13:46:30 +0700
Subject: [PATCH 03/41] all: introduce trie owner notion (#24750) (#553)
* cmd, core/state, light, trie, eth: add trie owner notion
* all: refactor
* tests: fix goimports
* core/state/snapshot: fix ineffasigns
Co-authored-by: rjl493456442
Co-authored-by: Martin Holst Swende
---
cmd/ronin/dbcmd.go | 2 +-
cmd/ronin/snapshot.go | 8 +-
core/blockchain.go | 2 +-
core/state/database.go | 4 +-
core/state/pruner/pruner.go | 4 +-
core/state/snapshot/conversion.go | 8 +-
core/state/snapshot/generate.go | 18 +-
core/state/snapshot/generate_test.go | 452 ++++++++----------
core/state/state_object.go | 6 +-
core/state/statedb.go | 21 +-
core/state/sync_test.go | 6 +-
core/state/trie_prefetcher.go | 88 ++--
core/state/trie_prefetcher_test.go | 36 +-
core/types/hashing_test.go | 12 +-
eth/api.go | 4 +-
eth/downloader/downloader_test.go | 2 +-
eth/protocols/snap/handler.go | 10 +-
eth/protocols/snap/sync.go | 10 +-
eth/protocols/snap/sync_test.go | 55 +--
les/downloader/downloader_test.go | 2 +-
les/handler_test.go | 8 +-
les/server_handler.go | 4 +-
light/odr_test.go | 2 +-
light/postprocess.go | 13 +-
light/trie.go | 12 +-
tests/fuzzers/les/les-fuzzer.go | 4 +-
tests/fuzzers/rangeproof/rangeproof-fuzzer.go | 3 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 3 +-
tests/fuzzers/trie/trie-fuzzer.go | 6 +-
trie/errors.go | 13 +-
trie/iterator_test.go | 12 +-
trie/proof_test.go | 18 +-
trie/secure_trie.go | 4 +-
trie/secure_trie_test.go | 4 +-
trie/stacktrie.go | 56 ++-
trie/stacktrie_test.go | 14 +-
trie/sync_test.go | 10 +-
trie/trie.go | 40 +-
trie/trie_test.go | 61 +--
trie/utils.go | 31 ++
trie/utils_test.go | 7 +-
41 files changed, 556 insertions(+), 519 deletions(-)
diff --git a/cmd/ronin/dbcmd.go b/cmd/ronin/dbcmd.go
index 56fe03a3bd..bfc3a405d3 100644
--- a/cmd/ronin/dbcmd.go
+++ b/cmd/ronin/dbcmd.go
@@ -500,7 +500,7 @@ func dbDumpTrie(ctx *cli.Context) error {
return err
}
}
- theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
+ theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
if err != nil {
return err
}
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index 904bb72b99..78b398a212 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -283,7 +283,7 @@ func traverseState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
- t, err := trie.NewSecure(root, triedb)
+ t, err := trie.NewSecure(common.Hash{}, root, triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
return err
@@ -304,7 +304,7 @@ func traverseState(ctx *cli.Context) error {
return err
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(acc.Root, triedb)
+ storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.Key), acc.Root, triedb)
if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err
@@ -373,7 +373,7 @@ func traverseRawState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
- t, err := trie.NewSecure(root, triedb)
+ t, err := trie.NewSecure(common.Hash{}, root, triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
return err
@@ -410,7 +410,7 @@ func traverseRawState(ctx *cli.Context) error {
return errors.New("invalid account")
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(acc.Root, triedb)
+ storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, triedb)
if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie")
diff --git a/core/blockchain.go b/core/blockchain.go
index e8688443be..0f02aece1e 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -785,7 +785,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4])
}
- if _, err := trie.NewSecure(block.Root(), bc.stateCache.TrieDB()); err != nil {
+ if _, err := trie.NewSecure(common.Hash{}, block.Root(), bc.stateCache.TrieDB()); err != nil {
return err
}
diff --git a/core/state/database.go b/core/state/database.go
index 50f96593d7..47748f3a0c 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -133,7 +133,7 @@ type cachingDB struct {
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
- tr, err := trie.NewSecure(root, db.db)
+ tr, err := trie.NewSecure(common.Hash{}, root, db.db)
if err != nil {
return nil, err
}
@@ -142,7 +142,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
- tr, err := trie.NewSecure(root, db.db)
+ tr, err := trie.NewSecure(addrHash, root, db.db)
if err != nil {
return nil, err
}
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 37772ca35c..96fbbd26b9 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -410,7 +410,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil {
return errors.New("missing genesis block")
}
- t, err := trie.NewSecure(genesis.Root(), trie.NewDatabase(db))
+ t, err := trie.NewSecure(common.Hash{}, genesis.Root(), trie.NewDatabase(db))
if err != nil {
return err
}
@@ -430,7 +430,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
return err
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(acc.Root, trie.NewDatabase(db))
+ storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db))
if err != nil {
return err
}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index f70cbf1e68..0f3934cb42 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -43,7 +43,7 @@ type trieKV struct {
type (
// trieGeneratorFn is the interface of trie generation which can
// be implemented by different trie algorithm.
- trieGeneratorFn func(db ethdb.KeyValueWriter, in chan (trieKV), out chan (common.Hash))
+ trieGeneratorFn func(db ethdb.KeyValueWriter, owner common.Hash, in chan (trieKV), out chan (common.Hash))
// leafCallbackFn is the callback invoked at the leaves of the trie,
// returns the subtrie root with the specified subtrie identifier.
@@ -253,7 +253,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
wg.Add(1)
go func() {
defer wg.Done()
- generatorFn(db, in, out)
+ generatorFn(db, account, in, out)
}()
// Spin up a go-routine for progress logging
if report && stats != nil {
@@ -360,8 +360,8 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
return stop(nil)
}
-func stackTrieGenerate(db ethdb.KeyValueWriter, in chan trieKV, out chan common.Hash) {
- t := trie.NewStackTrie(db)
+func stackTrieGenerate(db ethdb.KeyValueWriter, owner common.Hash, in chan trieKV, out chan common.Hash) {
+ t := trie.NewStackTrieWithOwner(db, owner)
for leaf := range in {
t.TryUpdate(leaf.key[:], leaf.value)
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 9d74ca4d9b..049f0e0f80 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -248,7 +248,7 @@ func (result *proofResult) forEach(callback func(key []byte, val []byte) error)
//
// The proof result will be returned if the range proving is finished, otherwise
// the error will be returned to abort the entire procedure.
-func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
+func (dl *diskLayer) proveRange(stats *generatorStats, owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
var (
keys [][]byte
vals [][]byte
@@ -306,7 +306,7 @@ func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix
// The snap state is exhausted, pass the entire key/val set for verification
if origin == nil && !diskMore {
- stackTr := trie.NewStackTrie(nil)
+ stackTr := trie.NewStackTrieWithOwner(nil, owner)
for i, key := range keys {
stackTr.TryUpdate(key, vals[i])
}
@@ -320,7 +320,7 @@ func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix
return &proofResult{keys: keys, vals: vals}, nil
}
// Snap state is chunked, generate edge proofs for verification.
- tr, err := trie.New(root, dl.triedb)
+ tr, err := trie.New(owner, root, dl.triedb)
if err != nil {
stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return nil, errMissingTrie
@@ -381,9 +381,9 @@ type onStateCallback func(key []byte, val []byte, write bool, delete bool) error
// generateRange generates the state segment with particular prefix. Generation can
// either verify the correctness of existing state through rangeproof and skip
// generation, or iterate trie to regenerate state on demand.
-func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
+func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
// Use range prover to check the validity of the flat state in the range
- result, err := dl.proveRange(stats, root, prefix, kind, origin, max, valueConvertFn)
+ result, err := dl.proveRange(stats, owner, root, prefix, kind, origin, max, valueConvertFn)
if err != nil {
return false, nil, err
}
@@ -432,7 +432,7 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
if len(result.keys) > 0 {
snapNodeCache = memorydb.New()
snapTrieDb := trie.NewDatabase(snapNodeCache)
- snapTrie, _ := trie.New(common.Hash{}, snapTrieDb)
+ snapTrie, _ := trie.New(owner, common.Hash{}, snapTrieDb)
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
}
@@ -441,7 +441,7 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
}
tr := result.tr
if tr == nil {
- tr, err = trie.New(root, dl.triedb)
+ tr, err = trie.New(owner, root, dl.triedb)
if err != nil {
stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return false, nil, errMissingTrie
@@ -698,7 +698,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
}
var storeOrigin = common.CopyBytes(storeMarker)
for {
- exhausted, last, err := dl.generateRange(acc.Root, append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil)
+ exhausted, last, err := dl.generateRange(common.Hash{}, acc.Root, append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil)
if err != nil {
return err
}
@@ -717,7 +717,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
// Global loop for regerating the entire state trie + all layered storage tries.
for {
- exhausted, last, err := dl.generateRange(dl.root, rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP)
+ exhausted, last, err := dl.generateRange(common.Hash{}, dl.root, rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP)
// The procedure it aborted, either by external signal or internal error
if err != nil {
if abort == nil { // aborted by internal error, wait the signal
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 582da6a2e7..42cef2df7e 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -26,47 +26,40 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
+func hashData(input []byte) common.Hash {
+ var hasher = sha3.NewLegacyKeccak256()
+ var hash common.Hash
+ hasher.Reset()
+ hasher.Write(input)
+ hasher.Sum(hash[:0])
+ return hash
+}
+
// Tests that snapshot generation from an empty database.
func TestGeneration(t *testing.T) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
- stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
- stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
- stTrie.Commit(nil) // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
-
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ var helper = newHelper()
+ stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
- acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
- acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- root, _, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
- triedb.Commit(root, false, nil)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ root, snap := helper.CommitAndGenerate()
if have, want := root, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"); have != want {
t.Fatalf("have %#x want %#x", have, want)
}
- snap := generateSnapshot(diskdb, triedb, 16, root)
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -75,63 +68,34 @@ func TestGeneration(t *testing.T) {
t.Errorf("Snapshot generation failed")
}
checkSnapRoot(t, snap, root)
+
// Signal abortion to the generator and wait for it to tear down
stop := make(chan *generatorStats)
snap.genAbort <- stop
<-stop
}
-func hashData(input []byte) common.Hash {
- var hasher = sha3.NewLegacyKeccak256()
- var hash common.Hash
- hasher.Reset()
- hasher.Write(input)
- hasher.Sum(hash[:0])
- return hash
-}
-
// Tests that snapshot generation with existent flat state.
func TestGenerateExistentState(t *testing.T) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
- stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
- stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
- stTrie.Commit(nil) // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
-
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
- rawdb.WriteAccountSnapshot(diskdb, hashData([]byte("acc-1")), val)
- rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-1")), hashData([]byte("key-1")), []byte("val-1"))
- rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-1")), hashData([]byte("key-2")), []byte("val-2"))
- rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-1")), hashData([]byte("key-3")), []byte("val-3"))
-
- acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
- diskdb.Put(hashData([]byte("acc-2")).Bytes(), val)
- rawdb.WriteAccountSnapshot(diskdb, hashData([]byte("acc-2")), val)
-
- acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- rawdb.WriteAccountSnapshot(diskdb, hashData([]byte("acc-3")), val)
- rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-1")), []byte("val-1"))
- rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-2")), []byte("val-2"))
- rawdb.WriteStorageSnapshot(diskdb, hashData([]byte("acc-3")), hashData([]byte("key-3")), []byte("val-3"))
-
- root, _, _ := accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
- triedb.Commit(root, false, nil)
-
- snap := generateSnapshot(diskdb, triedb, 16, root)
+ var helper = newHelper()
+
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+
+ stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ root, snap := helper.CommitAndGenerate()
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -140,6 +104,7 @@ func TestGenerateExistentState(t *testing.T) {
t.Errorf("Snapshot generation failed")
}
checkSnapRoot(t, snap, root)
+
// Signal abortion to the generator and wait for it to tear down
stop := make(chan *generatorStats)
snap.genAbort <- stop
@@ -161,7 +126,6 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
}
return hash, nil
}, newGenerateStats(), true)
-
if err != nil {
t.Fatal(err)
}
@@ -171,15 +135,15 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
}
type testHelper struct {
- diskdb *memorydb.Database
+ diskdb ethdb.Database
triedb *trie.Database
accTrie *trie.SecureTrie
}
func newHelper() *testHelper {
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(diskdb)
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
+ accTrie, _ := trie.NewSecure(common.Hash{}, common.Hash{}, triedb)
return &testHelper{
diskdb: diskdb,
triedb: triedb,
@@ -210,18 +174,28 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
}
}
-func (t *testHelper) makeStorageTrie(keys []string, vals []string) []byte {
- stTrie, _ := trie.NewSecure(common.Hash{}, t.triedb)
+func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte {
+ stTrie, _ := trie.NewSecure(owner, common.Hash{}, t.triedb)
for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i]))
}
- root, _, _ := stTrie.Commit(nil)
+ var root common.Hash
+ if !commit {
+ root = stTrie.Hash()
+ } else {
+ root, _, _ = stTrie.Commit(nil)
+ }
return root.Bytes()
}
-func (t *testHelper) Generate() (common.Hash, *diskLayer) {
+func (t *testHelper) Commit() common.Hash {
root, _, _ := t.accTrie.Commit(nil)
t.triedb.Commit(root, false, nil)
+ return root
+}
+
+func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) {
+ root := t.Commit()
snap := generateSnapshot(t.diskdb, t.triedb, 16, root)
return root, snap
}
@@ -234,36 +208,41 @@ func (t *testHelper) Generate() (common.Hash, *diskLayer) {
// - miss in the beginning
// - miss in the middle
// - miss in the end
+//
// - the contract(non-empty storage) has wrong storage slots
// - wrong slots in the beginning
// - wrong slots in the middle
// - wrong slots in the end
+//
// - the contract(non-empty storage) has extra storage slots
// - extra slots in the beginning
// - extra slots in the middle
// - extra slots in the end
func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
helper := newHelper()
- stRoot := helper.makeStorageTrie([]string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
// Account one, empty root but non-empty database
helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
// Account two, non empty root but empty database
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
// Miss slots
{
// Account three, non empty root but misses slots in the beginning
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
// Account four, non empty root but misses slots in the middle
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
// Account five, non empty root but misses slots in the end
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
}
@@ -271,18 +250,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
// Wrong storage slots
{
// Account six, non empty root but wrong slots in the beginning
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
// Account seven, non empty root but wrong slots in the middle
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
// Account eight, non empty root but wrong slots in the end
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
// Account 9, non empty root but rotated slots
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
}
@@ -290,19 +273,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
// Extra storage slots
{
// Account 10, non empty root but extra slots in the beginning
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
// Account 11, non empty root but extra slots in the middle
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
// Account 12, non empty root but extra slots in the end
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
}
- root, snap := helper.Generate()
+ root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root = 0x8746cce9fd9c658b2cfd639878ed6584b7a2b3e73bb40f607fcfa156002429a0
select {
@@ -326,7 +312,12 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
// - extra accounts
func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
helper := newHelper()
- stRoot := helper.makeStorageTrie([]string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
+
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
// Trie accounts [acc-1, acc-2, acc-3, acc-4, acc-6]
// Extra accounts [acc-0, acc-5, acc-7]
@@ -354,7 +345,7 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyRoot.Bytes()}) // after the end
}
- root, snap := helper.Generate()
+ root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root = 0x825891472281463511e7ebcc7f109e4f9200c20fa384754e11fd605cd98464e8
select {
@@ -378,29 +369,19 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// without any storage slots to keep the test smaller.
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- tr, _ := trie.NewSecure(common.Hash{}, triedb)
- acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ := rlp.EncodeToBytes(acc)
- tr.Update([]byte("acc-1"), val) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
+ helper := newHelper()
- acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- tr.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
- acc = &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- tr.Update([]byte("acc-3"), val) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
- tr.Commit(nil) // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
+ root, _, _ := helper.accTrie.Commit(nil) // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
// Delete an account trie leaf and ensure the generator chokes
- triedb.Commit(common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"), false, nil)
- diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
+ helper.triedb.Commit(root, false, nil)
+ helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
- snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978"))
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -422,45 +403,30 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
- stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
- stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
- stTrie.Commit(nil) // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
-
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
-
- acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper := newHelper()
- acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ root, _, _ := helper.accTrie.Commit(nil)
// We can only corrupt the disk database, so flush the tries out
- triedb.Reference(
- common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ helper.triedb.Reference(
+ common.BytesToHash(stRoot),
common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
)
- triedb.Reference(
- common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ helper.triedb.Reference(
+ common.BytesToHash(stRoot),
common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
)
- triedb.Commit(common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), false, nil)
+ helper.triedb.Commit(root, false, nil)
// Delete a storage trie root and ensure the generator chokes
- diskdb.Delete(common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67").Bytes())
+ helper.diskdb.Delete(stRoot)
- snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"))
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -481,45 +447,31 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- stTrie.Update([]byte("key-1"), []byte("val-1")) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0
- stTrie.Update([]byte("key-2"), []byte("val-2")) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371
- stTrie.Update([]byte("key-3"), []byte("val-3")) // 0x51c71a47af0695957647fb68766d0becee77e953df17c29b3c2f25436f055c78
- stTrie.Commit(nil) // Root: 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
-
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper := newHelper()
- acc = &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-2"), val) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
+ helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
+ helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- acc = &Account{Balance: big.NewInt(3), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- val, _ = rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-3"), val) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- accTrie.Commit(nil) // Root: 0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd
+ root, _, _ := helper.accTrie.Commit(nil)
// We can only corrupt the disk database, so flush the tries out
- triedb.Reference(
- common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ helper.triedb.Reference(
+ common.BytesToHash(stRoot),
common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
)
- triedb.Reference(
- common.HexToHash("0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67"),
+ helper.triedb.Reference(
+ common.BytesToHash(stRoot),
common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
)
- triedb.Commit(common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"), false, nil)
+ helper.triedb.Commit(root, false, nil)
// Delete a storage trie leaf and ensure the generator chokes
- diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
+ helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
- snap := generateSnapshot(diskdb, triedb, 16, common.HexToHash("0xe3712f1a226f3782caca78ca770ccc19ee000552813a9f59d479f8611db9b1fd"))
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -534,56 +486,51 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
<-stop
}
-func getStorageTrie(n int, triedb *trie.Database) *trie.SecureTrie {
- stTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- for i := 0; i < n; i++ {
- k := fmt.Sprintf("key-%d", i)
- v := fmt.Sprintf("val-%d", i)
- stTrie.Update([]byte(k), []byte(v))
- }
- stTrie.Commit(nil)
- return stTrie
-}
-
// Tests that snapshot generation when an extra account with storage exists in the snap state.
func TestGenerateWithExtraAccounts(t *testing.T) {
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- stTrie = getStorageTrie(5, triedb)
- )
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- { // Account one in the trie
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ helper := newHelper()
+ {
+ // Account one in the trie
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")),
+ []string{"key-1", "key-2", "key-3", "key-4", "key-5"},
+ []string{"val-1", "val-2", "val-3", "val-4", "val-5"},
+ true,
+ )
+ acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
// Identical in the snap
key := hashData([]byte("acc-1"))
- rawdb.WriteAccountSnapshot(diskdb, key, val)
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-4")), []byte("val-4"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-5")), []byte("val-5"))
- }
- { // Account two exists only in the snapshot
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val)
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-1")), []byte("val-1"))
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-2")), []byte("val-2"))
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-3")), []byte("val-3"))
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-4")), []byte("val-4"))
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-5")), []byte("val-5"))
+ }
+ {
+ // Account two exists only in the snapshot
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")),
+ []string{"key-1", "key-2", "key-3", "key-4", "key-5"},
+ []string{"val-1", "val-2", "val-3", "val-4", "val-5"},
+ true,
+ )
+ acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte("acc-2"))
- rawdb.WriteAccountSnapshot(diskdb, key, val)
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3"))
- }
- root, _, _ := accTrie.Commit(nil)
- t.Logf("root: %x", root)
- triedb.Commit(root, false, nil)
+ rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val)
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-1")), []byte("b-val-1"))
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-2")), []byte("b-val-2"))
+ rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-3")), []byte("b-val-3"))
+ }
+ root := helper.Commit()
+
// To verify the test: If we now inspect the snap db, there should exist extraneous storage items
- if data := rawdb.ReadStorageSnapshot(diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil {
+ if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil {
t.Fatalf("expected snap storage to exist")
}
-
- snap := generateSnapshot(diskdb, triedb, 16, root)
+ snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -592,12 +539,13 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
t.Errorf("Snapshot generation failed")
}
checkSnapRoot(t, snap, root)
+
// Signal abortion to the generator and wait for it to tear down
stop := make(chan *generatorStats)
snap.genAbort <- stop
<-stop
// If we now inspect the snap db, there should exist no extraneous storage items
- if data := rawdb.ReadStorageSnapshot(diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
+ if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
t.Fatalf("expected slot to be removed, got %v", string(data))
}
}
@@ -611,37 +559,36 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
if false {
enableLogging()
}
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- stTrie = getStorageTrie(3, triedb)
- )
- accTrie, _ := trie.NewSecure(common.Hash{}, triedb)
- { // Account one in the trie
- acc := &Account{Balance: big.NewInt(1), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ helper := newHelper()
+ {
+ // Account one in the trie
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")),
+ []string{"key-1", "key-2", "key-3"},
+ []string{"val-1", "val-2", "val-3"},
+ true,
+ )
+ acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+
// Identical in the snap
key := hashData([]byte("acc-1"))
- rawdb.WriteAccountSnapshot(diskdb, key, val)
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
- rawdb.WriteStorageSnapshot(diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
+ rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2"))
+ rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3"))
}
- { // 100 accounts exist only in snapshot
+ {
+ // 100 accounts exist only in snapshot
for i := 0; i < 1000; i++ {
//acc := &Account{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
acc := &Account{Balance: big.NewInt(int64(i)), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
- rawdb.WriteAccountSnapshot(diskdb, key, val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
}
}
- root, _, _ := accTrie.Commit(nil)
- t.Logf("root: %x", root)
- triedb.Commit(root, false, nil)
-
- snap := generateSnapshot(diskdb, triedb, 16, root)
+ root, snap := helper.CommitAndGenerate()
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -670,31 +617,22 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
if false {
enableLogging()
}
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- accTrie, _ := trie.New(common.Hash{}, triedb)
+ helper := newHelper()
{
acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update(common.HexToHash("0x03").Bytes(), val)
- accTrie.Update(common.HexToHash("0x07").Bytes(), val)
-
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x01"), val)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x02"), val)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x03"), val)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x04"), val)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), val)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x06"), val)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x07"), val)
- }
-
- root, _, _ := accTrie.Commit(nil)
- t.Logf("root: %x", root)
- triedb.Commit(root, false, nil)
-
- snap := generateSnapshot(diskdb, triedb, 16, root)
+ helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+ helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val)
+
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x01"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x03"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x04"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x05"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x06"), val)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x07"), val)
+ }
+ root, snap := helper.CommitAndGenerate()
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -716,29 +654,20 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
if false {
enableLogging()
}
- var (
- diskdb = memorydb.New()
- triedb = trie.NewDatabase(diskdb)
- )
- accTrie, _ := trie.New(common.Hash{}, triedb)
+ helper := newHelper()
{
acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
- accTrie.Update(common.HexToHash("0x03").Bytes(), val)
+ helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
junk := make([]byte, 100)
copy(junk, []byte{0xde, 0xad})
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x02"), junk)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x03"), junk)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x04"), junk)
- rawdb.WriteAccountSnapshot(diskdb, common.HexToHash("0x05"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x02"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x03"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x04"), junk)
+ rawdb.WriteAccountSnapshot(helper.diskdb, common.HexToHash("0x05"), junk)
}
-
- root, _, _ := accTrie.Commit(nil)
- t.Logf("root: %x", root)
- triedb.Commit(root, false, nil)
-
- snap := generateSnapshot(diskdb, triedb, 16, root)
+ root, snap := helper.CommitAndGenerate()
select {
case <-snap.genPending:
// Snapshot generation succeeded
@@ -752,7 +681,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
snap.genAbort <- stop
<-stop
// If we now inspect the snap db, there should exist no extraneous storage items
- if data := rawdb.ReadStorageSnapshot(diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
+ if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil {
t.Fatalf("expected slot to be removed, got %v", string(data))
}
}
@@ -762,13 +691,13 @@ func TestGenerateFromEmptySnap(t *testing.T) {
accountCheckRange = 10
storageCheckRange = 20
helper := newHelper()
- stRoot := helper.makeStorageTrie([]string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
// Add 1K accounts to the trie
for i := 0; i < 400; i++ {
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
&Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
}
- root, snap := helper.Generate()
+ root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
select {
@@ -797,12 +726,12 @@ func TestGenerateWithIncompleteStorage(t *testing.T) {
helper := newHelper()
stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"}
stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"}
- stRoot := helper.makeStorageTrie(stKeys, stVals)
// We add 8 accounts, each one is missing exactly one of the storage slots. This means
// we don't have to order the keys and figure out exactly which hash-key winds up
// on the sensitive spots at the boundaries
for i := 0; i < 8; i++ {
accKey := fmt.Sprintf("acc-%d", i)
+ stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(accKey)), stKeys, stVals, true)
helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()})
var moddedKeys []string
var moddedVals []string
@@ -814,8 +743,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) {
}
helper.addSnapStorage(accKey, moddedKeys, moddedVals)
}
-
- root, snap := helper.Generate()
+ root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root: 0xca73f6f05ba4ca3024ef340ef3dfca8fdabc1b677ff13f5a9571fd49c16e67ff
select {
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 22d90b8420..3b0a53eb01 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -157,7 +157,7 @@ func (s *stateObject) getTrie(db Database) Trie {
if s.data.Root != emptyRoot && s.db.prefetcher != nil {
// When the miner is creating the pending state, there is no
// prefetcher
- s.trie = s.db.prefetcher.trie(s.data.Root)
+ s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root)
}
if s.trie == nil {
var err error
@@ -313,7 +313,7 @@ func (s *stateObject) finalise(prefetch bool) {
}
}
if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot {
- s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch)
+ s.db.prefetcher.prefetch(s.addrHash, s.data.Root, slotsToPrefetch)
}
if len(s.dirtyStorage) > 0 {
s.dirtyStorage = make(Storage)
@@ -370,7 +370,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
}
if s.db.prefetcher != nil {
- s.db.prefetcher.used(s.data.Root, usedStorage)
+ s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage)
}
if len(s.pendingStorage) > 0 {
s.pendingStorage = make(Storage)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index de593428bc..a267962226 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -63,11 +63,14 @@ func (n *proofList) Delete(key []byte) error {
// * Contracts
// * Accounts
type StateDB struct {
- db Database
- prefetcher *triePrefetcher
- originalRoot common.Hash // The pre-state root, before any changes were made
- trie Trie
- hasher crypto.KeccakState
+ db Database
+ prefetcher *triePrefetcher
+ trie Trie
+ hasher crypto.KeccakState
+
+ // originalRoot is the pre-state root, before any changes were made.
+ // It will be updated when the Commit is called.
+ originalRoot common.Hash
snaps *snapshot.Tree
snap snapshot.Snapshot
@@ -709,6 +712,7 @@ func (s *StateDB) Copy() *StateDB {
state := &StateDB{
db: s.db,
trie: s.db.CopyTrie(s.trie),
+ originalRoot: s.originalRoot,
stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
@@ -874,7 +878,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure
}
if s.prefetcher != nil && len(addressesToPrefetch) > 0 {
- s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch)
+ s.prefetcher.prefetch(common.Hash{}, s.originalRoot, addressesToPrefetch)
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
@@ -915,7 +919,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// _untouched_. We can check with the prefetcher, if it can give us a trie
// which has the same root, but also has some content loaded into it.
if prefetcher != nil {
- if trie := prefetcher.trie(s.originalRoot); trie != nil {
+ if trie := prefetcher.trie(common.Hash{}, s.originalRoot); trie != nil {
s.trie = trie
}
}
@@ -931,7 +935,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
}
if prefetcher != nil {
- prefetcher.used(s.originalRoot, usedAddrs)
+ prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs)
}
if len(s.stateObjectsPending) > 0 {
s.stateObjectsPending = make(map[common.Address]struct{})
@@ -1045,6 +1049,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
}
+ s.originalRoot = root
return root, err
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index beb8fcfd9c..fafe21dccb 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -105,7 +105,7 @@ func checkTrieConsistency(db ethdb.Database, root common.Hash) error {
if v, _ := db.Get(root[:]); v == nil {
return nil // Consider a non existent state consistent.
}
- trie, err := trie.New(root, trie.NewDatabase(db))
+ trie, err := trie.New(common.Hash{}, root, trie.NewDatabase(db))
if err != nil {
return err
}
@@ -167,7 +167,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if commit {
srcDb.TrieDB().Commit(srcRoot, false, nil)
}
- srcTrie, _ := trie.New(srcRoot, srcDb.TrieDB())
+ srcTrie, _ := trie.New(common.Hash{}, srcRoot, srcDb.TrieDB())
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
@@ -208,7 +208,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil {
t.Fatalf("failed to decode account on path %x: %v", path, err)
}
- stTrie, err := trie.New(acc.Root, srcDb.TrieDB())
+ stTrie, err := trie.New(common.BytesToHash(path[0]), acc.Root, srcDb.TrieDB())
if err != nil {
t.Fatalf("failed to retriev storage trie for path %x: %v", path, err)
}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 472c125b77..a81872cd32 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -25,7 +25,7 @@ import (
)
var (
- // triePrefetchMetricsPrefix is the prefix under which to publis the metrics.
+ // triePrefetchMetricsPrefix is the prefix under which to publish the metrics.
triePrefetchMetricsPrefix = "trie/prefetch/"
)
@@ -35,10 +35,10 @@ var (
//
// Note, the prefetcher's API is not thread safe.
type triePrefetcher struct {
- db Database // Database to fetch trie nodes through
- root common.Hash // Root hash of theaccount trie for metrics
- fetches map[common.Hash]Trie // Partially or fully fetcher tries
- fetchers map[common.Hash]*subfetcher // Subfetchers for each trie
+ db Database // Database to fetch trie nodes through
+ root common.Hash // Root hash of the account trie for metrics
+ fetches map[string]Trie // Partially or fully fetcher tries
+ fetchers map[string]*subfetcher // Subfetchers for each trie
deliveryMissMeter metrics.Meter
accountLoadMeter metrics.Meter
@@ -51,13 +51,12 @@ type triePrefetcher struct {
storageWasteMeter metrics.Meter
}
-// newTriePrefetcher
func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher {
prefix := triePrefetchMetricsPrefix + namespace
p := &triePrefetcher{
db: db,
root: root,
- fetchers: make(map[common.Hash]*subfetcher), // Active prefetchers use the fetchers map
+ fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map
deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil),
accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil),
@@ -112,7 +111,7 @@ func (p *triePrefetcher) copy() *triePrefetcher {
copy := &triePrefetcher{
db: p.db,
root: p.root,
- fetches: make(map[common.Hash]Trie), // Active prefetchers use the fetches map
+ fetches: make(map[string]Trie), // Active prefetchers use the fetches map
deliveryMissMeter: p.deliveryMissMeter,
accountLoadMeter: p.accountLoadMeter,
@@ -135,33 +134,35 @@ func (p *triePrefetcher) copy() *triePrefetcher {
return copy
}
// Otherwise we're copying an active fetcher, retrieve the current states
- for root, fetcher := range p.fetchers {
- copy.fetches[root] = fetcher.peek()
+ for id, fetcher := range p.fetchers {
+ copy.fetches[id] = fetcher.peek()
}
return copy
}
// prefetch schedules a batch of trie items to prefetch.
-func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte) {
+func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]byte) {
// If the prefetcher is an inactive one, bail out
if p.fetches != nil {
return
}
// Active fetcher, schedule the retrievals
- fetcher := p.fetchers[root]
+ id := p.trieID(owner, root)
+ fetcher := p.fetchers[id]
if fetcher == nil {
- fetcher = newSubfetcher(p.db, root)
- p.fetchers[root] = fetcher
+ fetcher = newSubfetcher(p.db, owner, root)
+ p.fetchers[id] = fetcher
}
fetcher.schedule(keys)
}
// trie returns the trie matching the root hash, or nil if the prefetcher doesn't
// have it.
-func (p *triePrefetcher) trie(root common.Hash) Trie {
+func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie {
// If the prefetcher is inactive, return from existing deep copies
+ id := p.trieID(owner, root)
if p.fetches != nil {
- trie := p.fetches[root]
+ trie := p.fetches[id]
if trie == nil {
p.deliveryMissMeter.Mark(1)
return nil
@@ -169,7 +170,7 @@ func (p *triePrefetcher) trie(root common.Hash) Trie {
return p.db.CopyTrie(trie)
}
// Otherwise the prefetcher is active, bail if no trie was prefetched for this root
- fetcher := p.fetchers[root]
+ fetcher := p.fetchers[id]
if fetcher == nil {
p.deliveryMissMeter.Mark(1)
return nil
@@ -188,20 +189,26 @@ func (p *triePrefetcher) trie(root common.Hash) Trie {
// used marks a batch of state items used to allow creating statistics as to
// how useful or wasteful the prefetcher is.
-func (p *triePrefetcher) used(root common.Hash, used [][]byte) {
- if fetcher := p.fetchers[root]; fetcher != nil {
+func (p *triePrefetcher) used(owner common.Hash, root common.Hash, used [][]byte) {
+ if fetcher := p.fetchers[p.trieID(owner, root)]; fetcher != nil {
fetcher.used = used
}
}
+// trieID returns an unique trie identifier consists the trie owner and root hash.
+func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
+ return string(append(owner.Bytes(), root.Bytes()...))
+}
+
// subfetcher is a trie fetcher goroutine responsible for pulling entries for a
// single trie. It is spawned when a new root is encountered and lives until the
// main prefetcher is paused and either all requested items are processed or if
// the trie being worked on is retrieved from the prefetcher.
type subfetcher struct {
- db Database // Database to load trie nodes through
- root common.Hash // Root hash of the trie to prefetch
- trie Trie // Trie being populated with nodes
+ db Database // Database to load trie nodes through
+ owner common.Hash // Owner of the trie, usually account hash
+ root common.Hash // Root hash of the trie to prefetch
+ trie Trie // Trie being populated with nodes
tasks [][]byte // Items queued up for retrieval
lock sync.Mutex // Lock protecting the task queue
@@ -218,15 +225,16 @@ type subfetcher struct {
// newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash.
-func newSubfetcher(db Database, root common.Hash) *subfetcher {
+func newSubfetcher(db Database, owner common.Hash, root common.Hash) *subfetcher {
sf := &subfetcher{
- db: db,
- root: root,
- wake: make(chan struct{}, 1),
- stop: make(chan struct{}),
- term: make(chan struct{}),
- copy: make(chan chan Trie),
- seen: make(map[string]struct{}),
+ db: db,
+ owner: owner,
+ root: root,
+ wake: make(chan struct{}, 1),
+ stop: make(chan struct{}),
+ term: make(chan struct{}),
+ copy: make(chan chan Trie),
+ seen: make(map[string]struct{}),
}
go sf.loop()
return sf
@@ -282,13 +290,21 @@ func (sf *subfetcher) loop() {
defer close(sf.term)
// Start by opening the trie and stop processing if it fails
- trie, err := sf.db.OpenTrie(sf.root)
- if err != nil {
- log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
- return
+ if sf.owner == (common.Hash{}) {
+ trie, err := sf.db.OpenTrie(sf.root)
+ if err != nil {
+ log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
+ return
+ }
+ sf.trie = trie
+ } else {
+ trie, err := sf.db.OpenStorageTrie(sf.owner, sf.root)
+ if err != nil {
+ log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
+ return
+ }
+ sf.trie = trie
}
- sf.trie = trie
-
// Trie opened successfully, keep prefetching items
for {
select {
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index 35dc7a2c0d..cb0b67d7ea 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -47,20 +47,20 @@ func TestCopyAndClose(t *testing.T) {
db := filledStateDB()
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
skey := common.HexToHash("aaa")
- prefetcher.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
- prefetcher.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
time.Sleep(1 * time.Second)
- a := prefetcher.trie(db.originalRoot)
- prefetcher.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
- b := prefetcher.trie(db.originalRoot)
+ a := prefetcher.trie(common.Hash{}, db.originalRoot)
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ b := prefetcher.trie(common.Hash{}, db.originalRoot)
cpy := prefetcher.copy()
- cpy.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
- cpy.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
- c := cpy.trie(db.originalRoot)
+ cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ cpy.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ c := cpy.trie(common.Hash{}, db.originalRoot)
prefetcher.close()
cpy2 := cpy.copy()
- cpy2.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
- d := cpy2.trie(db.originalRoot)
+ cpy2.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ d := cpy2.trie(common.Hash{}, db.originalRoot)
cpy.close()
cpy2.close()
if a.Hash() != b.Hash() || a.Hash() != c.Hash() || a.Hash() != d.Hash() {
@@ -72,10 +72,10 @@ func TestUseAfterClose(t *testing.T) {
db := filledStateDB()
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
skey := common.HexToHash("aaa")
- prefetcher.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
- a := prefetcher.trie(db.originalRoot)
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
+ a := prefetcher.trie(common.Hash{}, db.originalRoot)
prefetcher.close()
- b := prefetcher.trie(db.originalRoot)
+ b := prefetcher.trie(common.Hash{}, db.originalRoot)
if a == nil {
t.Fatal("Prefetching before close should not return nil")
}
@@ -88,13 +88,13 @@ func TestCopyClose(t *testing.T) {
db := filledStateDB()
prefetcher := newTriePrefetcher(db.db, db.originalRoot, "")
skey := common.HexToHash("aaa")
- prefetcher.prefetch(db.originalRoot, [][]byte{skey.Bytes()})
+ prefetcher.prefetch(common.Hash{}, db.originalRoot, [][]byte{skey.Bytes()})
cpy := prefetcher.copy()
- a := prefetcher.trie(db.originalRoot)
- b := cpy.trie(db.originalRoot)
+ a := prefetcher.trie(common.Hash{}, db.originalRoot)
+ b := cpy.trie(common.Hash{}, db.originalRoot)
prefetcher.close()
- c := prefetcher.trie(db.originalRoot)
- d := cpy.trie(db.originalRoot)
+ c := prefetcher.trie(common.Hash{}, db.originalRoot)
+ d := cpy.trie(common.Hash{}, db.originalRoot)
if a == nil {
t.Fatal("Prefetching before close should not return nil")
}
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index de71ee41a4..44726c9cbb 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -39,8 +39,7 @@ func TestDeriveSha(t *testing.T) {
t.Fatal(err)
}
for len(txs) < 1000 {
- tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
- exp := types.DeriveSha(txs, tr)
+ exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
got := types.DeriveSha(txs, trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp)
@@ -87,8 +86,7 @@ func BenchmarkDeriveSha200(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
- exp = types.DeriveSha(txs, tr)
+ exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
}
})
@@ -109,8 +107,7 @@ func TestFuzzDeriveSha(t *testing.T) {
rndSeed := mrand.Int()
for i := 0; i < 10; i++ {
seed := rndSeed + i
- tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
- exp := types.DeriveSha(newDummy(i), tr)
+ exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
printList(newDummy(seed))
@@ -138,8 +135,7 @@ func TestDerivableList(t *testing.T) {
},
}
for i, tc := range tcs[1:] {
- tr, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
- exp := types.DeriveSha(flatList(tc), tr)
+ exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("case %d: got %x exp %x", i, got, exp)
diff --git a/eth/api.go b/eth/api.go
index 635b6fcda0..a646529e8b 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -553,11 +553,11 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc
}
triedb := api.eth.BlockChain().StateCache().TrieDB()
- oldTrie, err := trie.NewSecure(startBlock.Root(), triedb)
+ oldTrie, err := trie.NewSecure(common.Hash{}, startBlock.Root(), triedb)
if err != nil {
return nil, err
}
- newTrie, err := trie.NewSecure(endBlock.Root(), triedb)
+ newTrie, err := trie.NewSecure(common.Hash{}, endBlock.Root(), triedb)
if err != nil {
return nil, err
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 7dce33950f..010f3a11ca 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -229,7 +229,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil {
- _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
+ _, err := trie.NewSecure(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb))
return err
}
return fmt.Errorf("non existent block: %x", hash[:4])
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index c62f9cfca5..f2bd4eae9e 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -165,7 +165,7 @@ func handleMessage(backend Backend, peer *Peer) error {
req.Bytes = softResponseLimit
}
// Retrieve the requested state and bail out if non existent
- tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
+ tr, err := trie.New(common.Hash{}, req.Root, backend.Chain().StateCache().TrieDB())
if err != nil {
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
}
@@ -315,7 +315,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if origin != (common.Hash{}) || abort {
// Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs
- accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
+ accTrie, err := trie.New(common.Hash{}, req.Root, backend.Chain().StateCache().TrieDB())
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -323,7 +323,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
- stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())
+ stTrie, err := trie.New(account, acc.Root, backend.Chain().StateCache().TrieDB())
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -430,7 +430,7 @@ func handleMessage(backend Backend, peer *Peer) error {
// Make sure we have the state associated with the request
triedb := backend.Chain().StateCache().TrieDB()
- accTrie, err := trie.NewSecure(req.Root, triedb)
+ accTrie, err := trie.NewSecure(common.Hash{}, req.Root, triedb)
if err != nil {
// We don't have the requested state available, bail out
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
@@ -472,7 +472,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err != nil || account == nil {
break
}
- stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)
+ stTrie, err := trie.NewSecure(common.BytesToHash(pathset[0]), common.BytesToHash(account.Root), triedb)
loads++ // always account database reads, even for failures
if err != nil {
break
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 9e82682fb9..48a2c41a7c 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -721,7 +721,7 @@ func (s *Syncer) loadSyncStatus() {
}
task.genTrie = trie.NewStackTrie(task.genBatch)
- for _, subtasks := range task.SubTasks {
+ for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
subtask.genBatch = ethdb.HookedBatch{
Batch: s.db.NewBatch(),
@@ -729,7 +729,7 @@ func (s *Syncer) loadSyncStatus() {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
- subtask.genTrie = trie.NewStackTrie(subtask.genBatch)
+ subtask.genTrie = trie.NewStackTrieWithOwner(subtask.genBatch, accountHash)
}
}
}
@@ -1969,7 +1969,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrie(batch),
+ genTrie: trie.NewStackTrieWithOwner(batch, account),
})
for r.Next() {
batch := ethdb.HookedBatch{
@@ -1983,7 +1983,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrie(batch),
+ genTrie: trie.NewStackTrieWithOwner(batch, account),
})
}
for _, task := range tasks {
@@ -2028,7 +2028,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil {
- tr := trie.NewStackTrie(batch)
+ tr := trie.NewStackTrieWithOwner(batch, account)
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 47ab1f026d..9d58440b68 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -367,7 +367,8 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
return hashes, slots, proofs
}
-// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
+// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
+//
// supplies the proof for the last account, even if it is 'complete'.h
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var size uint64
@@ -1346,7 +1347,7 @@ func getCodeByHash(hash common.Hash) []byte {
// makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
db := trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ := trie.New(common.Hash{}, db)
+ accTrie := trie.NewEmpty(db)
var entries entrySlice
for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(types.StateAccount{
@@ -1373,8 +1374,8 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
entries entrySlice
boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- trie, _ = trie.New(common.Hash{}, db)
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ trie = trie.NewEmpty(db)
)
// Initialize boundaries
var next common.Hash
@@ -1426,7 +1427,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ = trie.New(common.Hash{}, db)
+ accTrie = trie.NewEmpty(db)
entries entrySlice
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
@@ -1439,7 +1440,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
codehash = getCodeHash(i)
}
// Create a storage trie
- stTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)
+ stTrie, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
stRoot := stTrie.Hash()
stTrie.Commit(nil)
value, _ := rlp.EncodeToBytes(types.StateAccount{
@@ -1465,23 +1466,11 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie, _ = trie.New(common.Hash{}, db)
+ accTrie = trie.NewEmpty(db)
entries entrySlice
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
)
- // Make a storage trie which we reuse for the whole lot
- var (
- stTrie *trie.Trie
- stEntries entrySlice
- )
- if boundary {
- stTrie, stEntries = makeBoundaryStorageTrie(slots, db)
- } else {
- stTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)
- }
- stRoot := stTrie.Hash()
-
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
@@ -1489,7 +1478,20 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
if code {
codehash = getCodeHash(i)
}
- value, _ := rlp.EncodeToBytes(types.StateAccount{
+ // Make a storage trie
+ var (
+ stTrie *trie.Trie
+ stEntries entrySlice
+ )
+ if boundary {
+ stTrie, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
+ } else {
+ stTrie, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
+ }
+ stRoot := stTrie.Hash()
+ stTrie.Commit(nil)
+
+ value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
Root: stRoot,
@@ -1503,7 +1505,6 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
storageEntries[common.BytesToHash(key)] = stEntries
}
sort.Sort(entries)
- stTrie.Commit(nil)
accTrie.Commit(nil)
return accTrie, entries, storageTries, storageEntries
}
@@ -1511,8 +1512,8 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
-func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
- trie, _ := trie.New(common.Hash{}, db)
+func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
+ trie, _ := trie.New(owner, common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
// store 'x' at slot 'x'
@@ -1534,11 +1535,11 @@ func makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, ent
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
-func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {
+func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
- trie, _ = trie.New(common.Hash{}, db)
+ trie, _ = trie.New(owner, common.Hash{}, db)
)
// Initialize boundaries
var next common.Hash
@@ -1585,7 +1586,7 @@ func makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice)
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
triedb := trie.NewDatabase(db)
- accTrie, err := trie.New(root, triedb)
+ accTrie, err := trie.New(common.Hash{}, root, triedb)
if err != nil {
t.Fatal(err)
}
@@ -1603,7 +1604,7 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
}
accounts++
if acc.Root != emptyRoot {
- storeTrie, err := trie.NewSecure(acc.Root, triedb)
+ storeTrie, err := trie.NewSecure(common.BytesToHash(accIt.Key), acc.Root, triedb)
if err != nil {
t.Fatal(err)
}
diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go
index 17cd3630c9..70f76956ff 100644
--- a/les/downloader/downloader_test.go
+++ b/les/downloader/downloader_test.go
@@ -229,7 +229,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil {
- _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))
+ _, err := trie.NewSecure(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb))
return err
}
return fmt.Errorf("non existent block: %x", hash[:4])
diff --git a/les/handler_test.go b/les/handler_test.go
index 1e26c8d795..0ccc71973f 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -406,7 +406,7 @@ func testGetProofs(t *testing.T, protocol int) {
accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i)
- trie, _ := trie.New(header.Root, trie.NewDatabase(server.db))
+ trie, _ := trie.New(common.Hash{}, header.Root, trie.NewDatabase(server.db))
for _, acc := range accounts {
req := ProofReq{
@@ -457,7 +457,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
var expected []rlp.RawValue
if wantOK {
proofsV2 := light.NewNodeSet()
- t, _ := trie.New(header.Root, trie.NewDatabase(server.db))
+ t, _ := trie.New(common.Hash{}, header.Root, trie.NewDatabase(server.db))
t.Prove(account, 0, proofsV2)
expected = proofsV2.NodeList()
}
@@ -513,7 +513,7 @@ func testGetCHTProofs(t *testing.T, protocol int) {
AuxData: [][]byte{rlp},
}
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
- trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
+ trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols
requestsV2 := []HelperTrieReq{{
@@ -578,7 +578,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) {
var proofs HelperTrieResps
root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
- trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
+ trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
trie.Prove(key, 0, &proofs.Proofs)
// Send the proof request and verify the response
diff --git a/les/server_handler.go b/les/server_handler.go
index 687409efaa..9cda4368ef 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -360,7 +360,7 @@ func (h *serverHandler) AddTxsSync() bool {
// getAccount retrieves an account from the state based on root.
func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccount, error) {
- trie, err := trie.New(root, triedb)
+ trie, err := trie.New(common.Hash{}, root, triedb)
if err != nil {
return types.StateAccount{}, err
}
@@ -392,7 +392,7 @@ func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie {
if root == (common.Hash{}) {
return nil
}
- trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
+ trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
return trie
}
diff --git a/light/odr_test.go b/light/odr_test.go
index 0be7e6e4ef..e89d609391 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -82,7 +82,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
req.Receipts = rawdb.ReadRawReceipts(odr.sdb, req.Hash, *number)
}
case *TrieRequest:
- t, _ := trie.New(req.Id.Root, trie.NewDatabase(odr.sdb))
+ t, _ := trie.New(common.BytesToHash(req.Id.AccKey), req.Id.Root, trie.NewDatabase(odr.sdb))
nodes := NewNodeSet()
t.Prove(req.Key, 0, nodes)
req.Proof = nodes
diff --git a/light/postprocess.go b/light/postprocess.go
index ce38d091e8..c09b00e71c 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -187,12 +187,12 @@ func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSecti
root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
}
var err error
- c.trie, err = trie.New(root, c.triedb)
+ c.trie, err = trie.New(common.Hash{}, root, c.triedb)
if err != nil && c.odr != nil {
err = c.fetchMissingNodes(ctx, section, root)
if err == nil {
- c.trie, err = trie.New(root, c.triedb)
+ c.trie, err = trie.New(common.Hash{}, root, c.triedb)
}
}
c.section = section
@@ -253,9 +253,8 @@ func (c *ChtIndexerBackend) Commit() error {
return nil
}
-// PruneSections implements core.ChainIndexerBackend which deletes all
-// chain data(except hash<->number mappings) older than the specified
-// threshold.
+// Prune implements core.ChainIndexerBackend which deletes all chain data
+// (except hash<->number mappings) older than the specified threshold.
func (c *ChtIndexerBackend) Prune(threshold uint64) error {
// Short circuit if the light pruning is disabled.
if c.disablePruning {
@@ -404,11 +403,11 @@ func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, las
root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
}
var err error
- b.trie, err = trie.New(root, b.triedb)
+ b.trie, err = trie.New(common.Hash{}, root, b.triedb)
if err != nil && b.odr != nil {
err = b.fetchMissingNodes(ctx, section, root)
if err == nil {
- b.trie, err = trie.New(root, b.triedb)
+ b.trie, err = trie.New(common.Hash{}, root, b.triedb)
}
}
b.section = section
diff --git a/light/trie.go b/light/trie.go
index 4ab6f4ace0..931ba30cb4 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -169,7 +169,11 @@ func (t *odrTrie) do(key []byte, fn func() error) error {
for {
var err error
if t.trie == nil {
- t.trie, err = trie.New(t.id.Root, trie.NewDatabase(t.db.backend.Database()))
+ var owner common.Hash
+ if len(t.id.AccKey) > 0 {
+ owner = common.BytesToHash(t.id.AccKey)
+ }
+ t.trie, err = trie.New(owner, t.id.Root, trie.NewDatabase(t.db.backend.Database()))
}
if err == nil {
err = fn()
@@ -195,7 +199,11 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator {
// Open the actual non-ODR trie if that hasn't happened yet.
if t.trie == nil {
it.do(func() error {
- t, err := trie.New(t.id.Root, trie.NewDatabase(t.db.backend.Database()))
+ var owner common.Hash
+ if len(t.id.AccKey) > 0 {
+ owner = common.BytesToHash(t.id.AccKey)
+ }
+ t, err := trie.New(owner, t.id.Root, trie.NewDatabase(t.db.backend.Database()))
if err == nil {
it.t.trie = t
}
diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go
index cb24a2746e..20dcdec083 100644
--- a/tests/fuzzers/les/les-fuzzer.go
+++ b/tests/fuzzers/les/les-fuzzer.go
@@ -90,8 +90,8 @@ func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) {
}
func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) {
- chtTrie, _ = trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
- bloomTrie, _ = trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < testChainLen; i++ {
// The element in CHT is ->
key := make([]byte, 8)
diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
index 5d7097b137..5a65152aa8 100644
--- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
+++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
@@ -62,8 +62,7 @@ func (f *fuzzer) readInt() uint64 {
}
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
-
- trie, _ := trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
size := f.readInt()
// Fill it with some fluff
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 9ed8bcbc51..772c776436 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -25,7 +25,6 @@ import (
"io"
"sort"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
@@ -144,7 +143,7 @@ func (f *fuzzer) fuzz() int {
var (
spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbA = trie.NewDatabase(spongeA)
- trieA, _ = trie.New(common.Hash{}, dbA)
+ trieA = trie.NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
trieB = trie.NewStackTrie(spongeB)
vals kvs
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index e993af47cf..2301721c93 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -144,7 +144,7 @@ func runRandTest(rt randTest) error {
triedb := trie.NewDatabase(memorydb.New())
- tr, _ := trie.New(common.Hash{}, triedb)
+ tr := trie.NewEmpty(triedb)
values := make(map[string]string) // tracks content of the trie
for i, step := range rt {
@@ -170,13 +170,13 @@ func runRandTest(rt randTest) error {
if err != nil {
return err
}
- newtr, err := trie.New(hash, triedb)
+ newtr, err := trie.New(common.Hash{}, hash, triedb)
if err != nil {
return err
}
tr = newtr
case opItercheckhash:
- checktr, _ := trie.New(common.Hash{}, triedb)
+ checktr := trie.NewEmpty(triedb)
it := trie.NewIterator(tr.NodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
diff --git a/trie/errors.go b/trie/errors.go
index 567b80078c..afe344bed2 100644
--- a/trie/errors.go
+++ b/trie/errors.go
@@ -26,10 +26,21 @@ import (
// in the case where a trie node is not present in the local database. It contains
// information necessary for retrieving the missing node.
type MissingNodeError struct {
+ Owner common.Hash // owner of the trie if it's 2-layered trie
NodeHash common.Hash // hash of the missing node
Path []byte // hex-encoded path to the missing node
+ err error // concrete error for missing trie node
+}
+
+// Unwrap returns the concrete error for missing trie node which
+// allows us for further analysis outside.
+func (err *MissingNodeError) Unwrap() error {
+ return err.err
}
func (err *MissingNodeError) Error() string {
- return fmt.Sprintf("missing trie node %x (path %x)", err.NodeHash, err.Path)
+ if err.Owner == (common.Hash{}) {
+ return fmt.Sprintf("missing trie node %x (path %x) %v", err.NodeHash, err.Path, err.err)
+ }
+ return fmt.Sprintf("missing trie node %x (owner %x) (path %x) %v", err.NodeHash, err.Owner, err.Path, err.err)
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 679ae2cdcc..2dffd8ff07 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -297,7 +297,7 @@ func TestUnionIterator(t *testing.T) {
}
func TestIteratorNoDups(t *testing.T) {
- tr, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
@@ -312,7 +312,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
diskdb := memorydb.New()
triedb := NewDatabase(diskdb)
- tr, _ := New(common.Hash{}, triedb)
+ tr := NewEmpty(triedb)
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
@@ -337,7 +337,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
}
for i := 0; i < 20; i++ {
// Create trie that will load all nodes from DB.
- tr, _ := New(tr.Hash(), triedb)
+ tr, _ := New(common.Hash{}, tr.Hash(), triedb)
// Remove a random node from the database. It can't be the root node
// because that one is already loaded.
@@ -403,7 +403,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
diskdb := memorydb.New()
triedb := NewDatabase(diskdb)
- ctr, _ := New(common.Hash{}, triedb)
+ ctr := NewEmpty(triedb)
for _, val := range testdata1 {
ctr.Update([]byte(val.k), []byte(val.v))
}
@@ -425,7 +425,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- tr, _ := New(root, triedb)
+ tr, _ := New(common.Hash{}, root, triedb)
it := tr.NodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
@@ -513,7 +513,7 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
// Create an empty trie
logDb := &loggingDb{0, memorydb.New()}
triedb := NewDatabase(logDb)
- trie, _ := NewSecure(common.Hash{}, triedb)
+ trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
// Fill it with some arbitrary data
for i := 0; i < 10000; i++ {
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 19ca51e259..f772a5e838 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -80,7 +80,7 @@ func TestProof(t *testing.T) {
}
func TestOneElementProof(t *testing.T) {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "k", "v")
for i, prover := range makeProvers(trie) {
proof := prover([]byte("k"))
@@ -131,7 +131,7 @@ func TestBadProof(t *testing.T) {
// Tests that missing keys can also be proven. The test explicitly uses a single
// entry trie and checks for missing keys both before and after the single entry.
func TestMissingKeyProof(t *testing.T) {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "k", "v")
for i, key := range []string{"a", "j", "l", "z"} {
@@ -387,7 +387,7 @@ func TestOneElementRangeProof(t *testing.T) {
}
// Test the mini trie with only a single element.
- tinyTrie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
entry := &kv{randBytes(32), randBytes(20), false}
tinyTrie.Update(entry.k, entry.v)
@@ -459,7 +459,7 @@ func TestAllElementsProof(t *testing.T) {
// TestSingleSideRangeProof tests the range starts from zero.
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -494,7 +494,7 @@ func TestSingleSideRangeProof(t *testing.T) {
// TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff.
func TestReverseSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -601,7 +601,7 @@ func TestBadRangeProof(t *testing.T) {
// TestGappedRangeProof focuses on the small trie with embedded nodes.
// If the gapped node is embedded in the trie, it should be detected too.
func TestGappedRangeProof(t *testing.T) {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -675,7 +675,7 @@ func TestSameSideProofs(t *testing.T) {
}
func TestHasRightElement(t *testing.T) {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -1028,7 +1028,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
}
func randomTrie(n int) (*Trie, map[string]*kv) {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -1053,7 +1053,7 @@ func randBytes(n int) []byte {
}
func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
max := uint64(0xffffffffffffffff)
for i := uint64(0); i < uint64(n); i++ {
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 7a16a09898..380856fa92 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -54,11 +54,11 @@ type SecureTrie struct {
// Loaded nodes are kept around until their 'cache generation' expires.
// A new cache generation is created by each call to Commit.
// cachelimit sets the number of past cache generations to keep.
-func NewSecure(root common.Hash, db *Database) (*SecureTrie, error) {
+func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) {
if db == nil {
panic("trie.NewSecure called without a database")
}
- trie, err := New(root, db)
+ trie, err := New(owner, root, db)
if err != nil {
return nil, err
}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index a3ece84b57..beea5845ad 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -28,7 +28,7 @@ import (
)
func newEmptySecure() *SecureTrie {
- trie, _ := NewSecure(common.Hash{}, NewDatabase(memorydb.New()))
+ trie, _ := NewSecure(common.Hash{}, common.Hash{}, NewDatabase(memorydb.New()))
return trie
}
@@ -36,7 +36,7 @@ func newEmptySecure() *SecureTrie {
func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
triedb := NewDatabase(memorydb.New())
- trie, _ := NewSecure(common.Hash{}, triedb)
+ trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
// Fill it with some arbitrary data
content := make(map[string][]byte)
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index f9ff10b62d..3d742d7fca 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -39,9 +39,10 @@ var stPool = sync.Pool{
},
}
-func stackTrieFromPool(db ethdb.KeyValueWriter) *StackTrie {
+func stackTrieFromPool(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie {
st := stPool.Get().(*StackTrie)
st.db = db
+ st.owner = owner
return st
}
@@ -54,6 +55,7 @@ func returnToPool(st *StackTrie) {
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
+ owner common.Hash // the owner of the trie
nodeType uint8 // node type (as in branch, ext, leaf)
val []byte // value contained by this node if it's a leaf
key []byte // key chunk covered by this (full|ext) node
@@ -70,6 +72,16 @@ func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie {
}
}
+// NewStackTrieWithOwner allocates and initializes an empty trie, but with
+// the additional owner field.
+func NewStackTrieWithOwner(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie {
+ return &StackTrie{
+ owner: owner,
+ nodeType: emptyNode,
+ db: db,
+ }
+}
+
// NewFromBinary initialises a serialized stacktrie with the given db.
func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) {
var st StackTrie
@@ -90,11 +102,13 @@ func (st *StackTrie) MarshalBinary() (data []byte, err error) {
w = bufio.NewWriter(&b)
)
if err := gob.NewEncoder(w).Encode(struct {
- Nodetype uint8
+ Owner common.Hash
+ NodeType uint8
Val []byte
Key []byte
KeyOffset uint8
}{
+ st.owner,
st.nodeType,
st.val,
st.key,
@@ -126,13 +140,15 @@ func (st *StackTrie) UnmarshalBinary(data []byte) error {
func (st *StackTrie) unmarshalBinary(r io.Reader) error {
var dec struct {
- Nodetype uint8
+ Owner common.Hash
+ NodeType uint8
Val []byte
Key []byte
KeyOffset uint8
}
gob.NewDecoder(r).Decode(&dec)
- st.nodeType = dec.Nodetype
+ st.owner = dec.Owner
+ st.nodeType = dec.NodeType
st.val = dec.Val
st.key = dec.Key
st.keyOffset = int(dec.KeyOffset)
@@ -160,8 +176,8 @@ func (st *StackTrie) setDb(db ethdb.KeyValueWriter) {
}
}
-func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
- st := stackTrieFromPool(db)
+func newLeaf(owner common.Hash, ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
+ st := stackTrieFromPool(db, owner)
st.nodeType = leafNode
st.keyOffset = ko
st.key = append(st.key, key[ko:]...)
@@ -169,8 +185,8 @@ func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
return st
}
-func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie {
- st := stackTrieFromPool(db)
+func newExt(owner common.Hash, ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie {
+ st := stackTrieFromPool(db, owner)
st.nodeType = extNode
st.keyOffset = ko
st.key = append(st.key, key[ko:]...)
@@ -204,6 +220,7 @@ func (st *StackTrie) Update(key, value []byte) {
}
func (st *StackTrie) Reset() {
+ st.owner = common.Hash{}
st.db = nil
st.key = st.key[:0]
st.val = nil
@@ -241,7 +258,7 @@ func (st *StackTrie) insert(key, value []byte) {
}
// Add new child
if st.children[idx] == nil {
- st.children[idx] = stackTrieFromPool(st.db)
+ st.children[idx] = stackTrieFromPool(st.db, st.owner)
st.children[idx].keyOffset = st.keyOffset + 1
}
st.children[idx].insert(key, value)
@@ -266,7 +283,7 @@ func (st *StackTrie) insert(key, value []byte) {
// node directly.
var n *StackTrie
if diffidx < len(st.key)-1 {
- n = newExt(diffidx+1, st.key, st.children[0], st.db)
+ n = newExt(st.owner, diffidx+1, st.key, st.children[0], st.db)
} else {
// Break on the last byte, no need to insert
// an extension node: reuse the current node
@@ -286,13 +303,13 @@ func (st *StackTrie) insert(key, value []byte) {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
- st.children[0] = stackTrieFromPool(st.db)
+ st.children[0] = stackTrieFromPool(st.db, st.owner)
st.children[0].nodeType = branchNode
st.children[0].keyOffset = st.keyOffset + diffidx
p = st.children[0]
}
// Create a leaf for the inserted part
- o := newLeaf(st.keyOffset+diffidx+1, key, value, st.db)
+ o := newLeaf(st.owner, st.keyOffset+diffidx+1, key, value, st.db)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
@@ -328,7 +345,7 @@ func (st *StackTrie) insert(key, value []byte) {
// Convert current node into an ext,
// and insert a child branch node.
st.nodeType = extNode
- st.children[0] = NewStackTrie(st.db)
+ st.children[0] = NewStackTrieWithOwner(st.db, st.owner)
st.children[0].nodeType = branchNode
st.children[0].keyOffset = st.keyOffset + diffidx
p = st.children[0]
@@ -339,11 +356,11 @@ func (st *StackTrie) insert(key, value []byte) {
// The child leave will be hashed directly in order to
// free up some memory.
origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(diffidx+1, st.key, st.val, st.db)
+ p.children[origIdx] = newLeaf(st.owner, diffidx+1, st.key, st.val, st.db)
p.children[origIdx].hash()
newIdx := key[diffidx+st.keyOffset]
- p.children[newIdx] = newLeaf(p.keyOffset+1, key, value, st.db)
+ p.children[newIdx] = newLeaf(st.owner, p.keyOffset+1, key, value, st.db)
// Finally, cut off the key part that has been passed
// over to the children.
@@ -363,11 +380,12 @@ func (st *StackTrie) insert(key, value []byte) {
// hash() hashes the node 'st' and converts it into 'hashedNode', if possible.
// Possible outcomes:
// 1. The rlp-encoded value was >= 32 bytes:
-// - Then the 32-byte `hash` will be accessible in `st.val`.
-// - And the 'st.type' will be 'hashedNode'
+// - Then the 32-byte `hash` will be accessible in `st.val`.
+// - And the 'st.type' will be 'hashedNode'
+//
// 2. The rlp-encoded value was < 32 bytes
-// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
-// - And the 'st.type' will be 'hashedNode' AGAIN
+// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
+// - And the 'st.type' will be 'hashedNode' AGAIN
//
// This method will also:
// set 'st.type' to hashedNode
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index fb39e42525..15e5cd3d16 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -188,7 +188,7 @@ func TestStackTrieInsertAndHash(t *testing.T) {
func TestSizeBug(t *testing.T) {
st := NewStackTrie(nil)
- nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(memorydb.New()))
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -203,7 +203,7 @@ func TestSizeBug(t *testing.T) {
func TestEmptyBug(t *testing.T) {
st := NewStackTrie(nil)
- nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(memorydb.New()))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -229,7 +229,7 @@ func TestEmptyBug(t *testing.T) {
func TestValLength56(t *testing.T) {
st := NewStackTrie(nil)
- nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(memorydb.New()))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -254,7 +254,8 @@ func TestValLength56(t *testing.T) {
// which causes a lot of node-within-node. This case was found via fuzzing.
func TestUpdateSmallNodes(t *testing.T) {
st := NewStackTrie(nil)
- nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(memorydb.New()))
+
kvs := []struct {
K string
V string
@@ -282,7 +283,8 @@ func TestUpdateSmallNodes(t *testing.T) {
func TestUpdateVariableKeys(t *testing.T) {
t.SkipNow()
st := NewStackTrie(nil)
- nt, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(memorydb.New()))
+
kvs := []struct {
K string
V string
@@ -352,7 +354,7 @@ func TestStacktrieNotModifyValues(t *testing.T) {
func TestStacktrieSerialization(t *testing.T) {
var (
st = NewStackTrie(nil)
- nt, _ = New(common.Hash{}, NewDatabase(memorydb.New()))
+ nt = NewEmpty(NewDatabase(memorydb.New()))
keyB = big.NewInt(1)
keyDelta = big.NewInt(1)
vals [][]byte
diff --git a/trie/sync_test.go b/trie/sync_test.go
index cb3283875d..cd29c391fc 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -29,7 +29,7 @@ import (
func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
triedb := NewDatabase(memorydb.New())
- trie, _ := NewSecure(common.Hash{}, triedb)
+ trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
// Fill it with some arbitrary data
content := make(map[string][]byte)
@@ -60,7 +60,7 @@ func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
// content map.
func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
// Check root availability and trie contents
- trie, err := NewSecure(common.BytesToHash(root), db)
+ trie, err := NewSecure(common.Hash{}, common.BytesToHash(root), db)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
@@ -77,7 +77,7 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
// checkTrieConsistency checks that all nodes in a trie are indeed present.
func checkTrieConsistency(db *Database, root common.Hash) error {
// Create and iterate a trie rooted in a subnode
- trie, err := NewSecure(root, db)
+ trie, err := NewSecure(common.Hash{}, root, db)
if err != nil {
return nil // Consider a non existent state consistent
}
@@ -91,8 +91,8 @@ func checkTrieConsistency(db *Database, root common.Hash) error {
func TestEmptySync(t *testing.T) {
dbA := NewDatabase(memorydb.New())
dbB := NewDatabase(memorydb.New())
- emptyA, _ := New(common.Hash{}, dbA)
- emptyB, _ := New(emptyRoot, dbB)
+ emptyA := NewEmpty(dbA)
+ emptyB, _ := New(common.Hash{}, emptyRoot, dbB)
for i, trie := range []*Trie{emptyA, emptyB} {
sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()))
diff --git a/trie/trie.go b/trie/trie.go
index 79ed3176f0..f23c5ad546 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -61,9 +61,11 @@ type LeafCallback func(paths [][]byte, hexpath []byte, leaf []byte, parent commo
//
// Trie is not safe for concurrent use.
type Trie struct {
- db *Database
- root node
- // Keep track of the number leafs which have been inserted since the last
+ db *Database
+ root node
+ owner common.Hash
+
+ // Keep track of the number leaves which have been inserted since the last
// hashing operation. This number will not directly map to the number of
// actually unhashed nodes
unhashed int
@@ -86,18 +88,31 @@ func newWithRootNode(root node) *Trie {
}
}
-// New creates a trie with an existing root node from db.
+// New creates a trie with an existing root node from db and an assigned
+// owner for storage proximity.
//
// If root is the zero hash or the sha3 hash of an empty string, the
// trie is initially empty and does not require a database. Otherwise,
// New will panic if db is nil and returns a MissingNodeError if root does
// not exist in the database. Accessing the trie loads nodes from db on demand.
-func New(root common.Hash, db *Database) (*Trie, error) {
+func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
+ return newTrie(owner, root, db)
+}
+
+// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
+func NewEmpty(db *Database) *Trie {
+ tr, _ := newTrie(common.Hash{}, common.Hash{}, db)
+ return tr
+}
+
+// newTrie is the internal function used to construct the trie with given parameters.
+func newTrie(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
if db == nil {
panic("trie.New called without a database")
}
trie := &Trie{
- db: db,
+ db: db,
+ owner: owner,
//tracer: newTracer(),
}
if root != (common.Hash{}) && root != emptyRoot {
@@ -574,7 +589,7 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
if node := t.db.node(hash); node != nil {
return node, nil
}
- return nil, &MissingNodeError{NodeHash: hash, Path: prefix}
+ return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix}
}
// Hash returns the root hash of the trie. It does not write to the
@@ -606,7 +621,10 @@ func (t *Trie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
// Do a quick check if we really need to commit, before we spin
// up goroutines. This can happen e.g. if we load a trie for reading storage
// values, but don't write to it.
- if _, dirty := t.root.cache(); !dirty {
+ if hashedNode, dirty := t.root.cache(); !dirty {
+ // Replace the root node with the origin hash in order to
+ // ensure all resolved nodes are dropped after the commit.
+ t.root = hashedNode
return rootHash, 0, nil
}
var wg sync.WaitGroup
@@ -651,6 +669,7 @@ func (t *Trie) hashRoot() (node, node, error) {
// Reset drops the referenced root node and cleans all internal state.
func (t *Trie) Reset() {
t.root = nil
+ t.owner = common.Hash{}
t.unhashed = 0
t.tracer.reset()
}
@@ -664,3 +683,8 @@ func (t *Trie) Copy() *Trie {
tracer: t.tracer.copy(),
}
}
+
+// Owner returns the associated trie owner.
+func (t *Trie) Owner() common.Hash {
+ return t.owner
+}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 806a8cc634..095df36481 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -32,6 +32,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@@ -48,12 +49,12 @@ func init() {
// Used for testing
func newEmpty() *Trie {
- trie, _ := New(common.Hash{}, NewDatabase(memorydb.New()))
+ trie := NewEmpty(NewDatabase(memorydb.New()))
return trie
}
func TestEmptyTrie(t *testing.T) {
- var trie Trie
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
res := trie.Hash()
exp := emptyRoot
if res != exp {
@@ -62,7 +63,7 @@ func TestEmptyTrie(t *testing.T) {
}
func TestNull(t *testing.T) {
- var trie Trie
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
key := make([]byte, 32)
value := []byte("test")
trie.Update(key, value)
@@ -72,7 +73,7 @@ func TestNull(t *testing.T) {
}
func TestMissingRoot(t *testing.T) {
- trie, err := New(common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New()))
+ trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New()))
if trie != nil {
t.Error("New returned non-nil trie for invalid root")
}
@@ -88,7 +89,7 @@ func testMissingNode(t *testing.T, memonly bool) {
diskdb := memorydb.New()
triedb := NewDatabase(diskdb)
- trie, _ := New(common.Hash{}, triedb)
+ trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, _, _ := trie.Commit(nil)
@@ -96,27 +97,27 @@ func testMissingNode(t *testing.T, memonly bool) {
triedb.Commit(root, true, nil)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
_, err := trie.TryGet([]byte("120000"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
_, err = trie.TryGet([]byte("120099"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
_, err = trie.TryGet([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
err = trie.TryDelete([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
@@ -129,27 +130,27 @@ func testMissingNode(t *testing.T, memonly bool) {
diskdb.Delete(hash[:])
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
_, err = trie.TryGet([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
_, err = trie.TryGet([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
_, err = trie.TryGet([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(root, triedb)
+ trie, _ = New(common.Hash{}, root, triedb)
err = trie.TryDelete([]byte("123456"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
@@ -277,7 +278,7 @@ func TestReplication(t *testing.T) {
}
// create a new trie on top of the database and check that lookups work.
- trie2, err := New(exp, trie.db)
+ trie2, err := New(common.Hash{}, exp, trie.db)
if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err)
}
@@ -408,10 +409,12 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
}
func runRandTest(rt randTest) bool {
- triedb := NewDatabase(memorydb.New())
-
- tr, _ := New(common.Hash{}, triedb)
- values := make(map[string]string) // tracks content of the trie
+ var (
+ triedb = NewDatabase(memorydb.New())
+ tr = NewEmpty(triedb)
+ values = make(map[string]string) // tracks content of the trie
+ )
+ tr.tracer = newTracer()
for i, step := range rt {
fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
@@ -439,14 +442,14 @@ func runRandTest(rt randTest) bool {
rt[i].err = err
return false
}
- newtr, err := New(hash, triedb)
+ newtr, err := New(common.Hash{}, hash, triedb)
if err != nil {
rt[i].err = err
return false
}
tr = newtr
case opItercheckhash:
- checktr, _ := New(common.Hash{}, triedb)
+ checktr := NewEmpty(triedb)
it := NewIterator(tr.NodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
@@ -480,10 +483,10 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
const benchElemCount = 20000
func benchGet(b *testing.B, commit bool) {
- trie := new(Trie)
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
if commit {
_, tmpdb := tempDB()
- trie, _ = New(common.Hash{}, tmpdb)
+ trie = NewEmpty(tmpdb)
}
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
@@ -602,7 +605,7 @@ func TestTinyTrie(t *testing.T) {
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp)
}
- checktr, _ := New(common.Hash{}, trie.db)
+ checktr := NewEmpty(trie.db)
it := NewIterator(trie.NodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
@@ -728,7 +731,7 @@ func TestCommitSequence(t *testing.T) {
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(s)
- trie, _ := New(common.Hash{}, db)
+ trie := NewEmpty(db)
// Another sponge is used to check the callback-sequence
callbackSponge := sha3.NewLegacyKeccak256()
// Fill the trie with elements
@@ -770,7 +773,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(s)
- trie, _ := New(common.Hash{}, db)
+ trie := NewEmpty(db)
// Another sponge is used to check the callback-sequence
callbackSponge := sha3.NewLegacyKeccak256()
// Fill the trie with elements
@@ -809,7 +812,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
db := NewDatabase(s)
- trie, _ := New(common.Hash{}, db)
+ trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
stTrie := NewStackTrie(stackTrieSponge)
@@ -865,7 +868,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
func TestCommitSequenceSmallRoot(t *testing.T) {
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
db := NewDatabase(s)
- trie, _ := New(common.Hash{}, db)
+ trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
stTrie := NewStackTrie(stackTrieSponge)
diff --git a/trie/utils.go b/trie/utils.go
index be5e491bd8..503c033fb2 100644
--- a/trie/utils.go
+++ b/trie/utils.go
@@ -29,11 +29,16 @@ package trie
// This tool can track all of them no matter is embedded in its
// parent or nit, but the valueNode is never tracked.
//
+// Besides, it's also used for recording the original value of the nodes
+// when they are resolved from the disk. The pre-value of the nodes will
+// be used to construct reverse-diffs in the future.
+//
// Note tracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
type tracer struct {
insert map[string]struct{}
delete map[string]struct{}
+ origin map[string][]byte
}
// newTracer initlializes tride node diff tracer.
@@ -41,7 +46,18 @@ func newTracer() *tracer {
return &tracer{
insert: make(map[string]struct{}),
delete: make(map[string]struct{}),
+ origin: make(map[string][]byte),
+ }
+}
+
+// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally.
+// Don't change the value outside of function since it's not deep-copied.
+func (t *tracer) onRead(key []byte, val []byte) {
+ // Tracer isn't used right now, remove this check later.
+ if t == nil {
+ return
}
+ t.origin[string(key)] = val
}
// onInsert tracks the newly inserted trie node. If it's already
@@ -101,6 +117,15 @@ func (t *tracer) deleteList() [][]byte {
return ret
}
+// getPrev returns the cached original value of the specified node.
+func (t *tracer) getPrev(key []byte) []byte {
+ // Don't panic on uninitialized tracer, it's possible in testing.
+ if t == nil {
+ return nil
+ }
+ return t.origin[string(key)]
+}
+
// reset clears the content tracked by tracer.
func (t *tracer) reset() {
// Tracer isn't used right now, remove this check later.
@@ -109,6 +134,7 @@ func (t *tracer) reset() {
}
t.insert = make(map[string]struct{})
t.delete = make(map[string]struct{})
+ t.origin = make(map[string][]byte)
}
// copy returns a deep copied tracer instance.
@@ -120,6 +146,7 @@ func (t *tracer) copy() *tracer {
var (
insert = make(map[string]struct{})
delete = make(map[string]struct{})
+ origin = make(map[string][]byte)
)
for key := range t.insert {
insert[key] = struct{}{}
@@ -127,8 +154,12 @@ func (t *tracer) copy() *tracer {
for key := range t.delete {
delete[key] = struct{}{}
}
+ for key, val := range t.origin {
+ origin[key] = val
+ }
return &tracer{
insert: insert,
delete: delete,
+ origin: origin,
}
}
diff --git a/trie/utils_test.go b/trie/utils_test.go
index fadb0553b5..589eca6242 100644
--- a/trie/utils_test.go
+++ b/trie/utils_test.go
@@ -19,14 +19,12 @@ package trie
import (
"testing"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
)
// Tests if the trie diffs are tracked correctly.
func TestTrieTracer(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- trie, _ := New(common.Hash{}, db)
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
trie.tracer = newTracer()
// Insert a batch of entries, all the nodes should be marked as inserted
@@ -93,8 +91,7 @@ func TestTrieTracer(t *testing.T) {
}
func TestTrieTracerNoop(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- trie, _ := New(common.Hash{}, db)
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
trie.tracer = newTracer()
// Insert a batch of entries, all the nodes should be marked as inserted
From 06ad1b394fd030abf4e8ac371aa9f86f9f62c8ab Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 6 Sep 2024 14:31:18 +0700
Subject: [PATCH 04/41] core, trie: rework trie commiter (#560)
* core, trie: rework trie commiter
changed the commit procedure, introduce new struct called nodeSet for
returning including all dirty nodes of a trie. Multiple nodeset will be
merged to MergedNodeSet struct. then be submitted to in-memory database
from block to block
* trie,core: fix comments
---
core/state/database.go | 8 +-
core/state/metrics.go | 12 +-
core/state/snapshot/generate.go | 5 +-
core/state/snapshot/generate_test.go | 46 +++----
core/state/state_object.go | 11 +-
core/state/statedb.go | 47 ++++---
eth/protocols/snap/sync_test.go | 103 +++++++++++----
light/postprocess.go | 27 +++-
light/trie.go | 6 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 11 +-
tests/fuzzers/trie/trie-fuzzer.go | 17 ++-
trie/committer.go | 176 +++++++++----------------
trie/database.go | 38 +++++-
trie/iterator.go | 3 +-
trie/iterator_test.go | 54 +++++---
trie/nodeset.go | 95 +++++++++++++
trie/proof.go | 14 +-
trie/secure_trie.go | 14 +-
trie/secure_trie_test.go | 4 +-
trie/sync_test.go | 9 +-
trie/trie.go | 114 ++++++----------
trie/trie_test.go | 107 ++++++++-------
trie/utils_test.go | 9 +-
23 files changed, 550 insertions(+), 380 deletions(-)
create mode 100644 trie/nodeset.go
diff --git a/core/state/database.go b/core/state/database.go
index 47748f3a0c..87461efcf1 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -88,9 +88,11 @@ type Trie interface {
// can be used even if the trie doesn't have one.
Hash() common.Hash
- // Commit writes all nodes to the trie's memory database, tracking the internal
- // and external (for account tries) references.
- Commit(onleaf trie.LeafCallback) (common.Hash, int, error)
+ // Commit collects all dirty nodes in the trie and replace them with the
+ // corresponding node hash. All collected nodes(including dirty leaves if
+ // collectLeaf is true) will be encapsulated into a nodeset for return.
+ // The returned nodeset can be nil if the trie is clean(nothing to commit).
+ Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
diff --git a/core/state/metrics.go b/core/state/metrics.go
index 7b40ff37af..35d2df92dd 100644
--- a/core/state/metrics.go
+++ b/core/state/metrics.go
@@ -19,10 +19,10 @@ package state
import "github.com/ethereum/go-ethereum/metrics"
var (
- accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
- storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
- accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
- storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
- accountCommittedMeter = metrics.NewRegisteredMeter("state/commit/account", nil)
- storageCommittedMeter = metrics.NewRegisteredMeter("state/commit/storage", nil)
+ accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
+ storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
+ accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
+ storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
+ accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil)
+ storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil)
)
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 049f0e0f80..85b667c537 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -436,7 +436,10 @@ func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix [
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
}
- root, _, _ := snapTrie.Commit(nil)
+ root, nodes, _ := snapTrie.Commit(false)
+ if nodes != nil {
+ snapTrieDb.Update(trie.NewWithNodeSet(nodes))
+ }
snapTrieDb.Commit(root, false, nil)
}
tr := result.tr
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 42cef2df7e..fc09cecbf3 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -138,6 +138,7 @@ type testHelper struct {
diskdb ethdb.Database
triedb *trie.Database
accTrie *trie.SecureTrie
+ nodes *trie.MergedNodeSet
}
func newHelper() *testHelper {
@@ -148,6 +149,7 @@ func newHelper() *testHelper {
diskdb: diskdb,
triedb: triedb,
accTrie: accTrie,
+ nodes: trie.NewMergedNodeSet(),
}
}
@@ -179,17 +181,22 @@ func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string
for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i]))
}
- var root common.Hash
if !commit {
- root = stTrie.Hash()
- } else {
- root, _, _ = stTrie.Commit(nil)
+ return stTrie.Hash().Bytes()
+ }
+ root, nodes, _ := stTrie.Commit(false)
+ if nodes != nil {
+ t.nodes.Merge(nodes)
}
return root.Bytes()
}
func (t *testHelper) Commit() common.Hash {
- root, _, _ := t.accTrie.Commit(nil)
+ root, nodes, _ := t.accTrie.Commit(true)
+ if nodes != nil {
+ t.nodes.Merge(nodes)
+ }
+ t.triedb.Update(t.nodes)
t.triedb.Commit(root, false, nil)
return root
}
@@ -375,7 +382,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
- root, _, _ := helper.accTrie.Commit(nil) // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
+ root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
// Delete an account trie leaf and ensure the generator chokes
helper.triedb.Commit(root, false, nil)
@@ -410,18 +417,7 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- root, _, _ := helper.accTrie.Commit(nil)
-
- // We can only corrupt the disk database, so flush the tries out
- helper.triedb.Reference(
- common.BytesToHash(stRoot),
- common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
- )
- helper.triedb.Reference(
- common.BytesToHash(stRoot),
- common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
- )
- helper.triedb.Commit(root, false, nil)
+ root := helper.Commit()
// Delete a storage trie root and ensure the generator chokes
helper.diskdb.Delete(stRoot)
@@ -455,19 +451,7 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
- root, _, _ := helper.accTrie.Commit(nil)
-
- // We can only corrupt the disk database, so flush the tries out
- helper.triedb.Reference(
- common.BytesToHash(stRoot),
- common.HexToHash("0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e"),
- )
- helper.triedb.Reference(
- common.BytesToHash(stRoot),
- common.HexToHash("0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2"),
- )
- helper.triedb.Commit(root, false, nil)
-
+ root := helper.Commit()
// Delete a storage trie leaf and ensure the generator chokes
helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 3b0a53eb01..a6dee5c02f 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -393,23 +394,23 @@ func (s *stateObject) updateRoot(db Database) {
// CommitTrie the storage trie of the object to db.
// This updates the trie root.
-func (s *stateObject) CommitTrie(db Database) (int, error) {
+func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
- return 0, nil
+ return nil, nil
}
if s.dbErr != nil {
- return 0, s.dbErr
+ return nil, s.dbErr
}
// Track the amount of time wasted on committing the storage trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
}
- root, committed, err := s.trie.Commit(nil)
+ root, nodes, err := s.trie.Commit(false)
if err == nil {
s.data.Root = root
}
- return committed, err
+ return nodes, err
}
// AddBalance adds amount to s's balance.
diff --git a/core/state/statedb.go b/core/state/statedb.go
index a267962226..6b041eaa65 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -972,7 +972,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
s.IntermediateRoot(deleteEmptyObjects)
// Commit objects to the trie, measuring the elapsed time
- var storageCommitted int
+ var (
+ accountTrieNodes int
+ storageTrieNodes int
+ nodes = trie.NewMergedNodeSet()
+ )
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
if obj := s.stateObjects[addr]; !obj.deleted {
@@ -982,11 +986,18 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
- committed, err := obj.CommitTrie(s.db)
+ nodeSet, err := obj.CommitTrie(s.db)
if err != nil {
return common.Hash{}, err
}
- storageCommitted += committed
+
+ // Merge the dirty nodes of storage trie into global set
+ if nodeSet != nil {
+ if err := nodes.Merge(nodeSet); err != nil {
+ return common.Hash{}, err
+ }
+ storageTrieNodes += nodeSet.Len()
+ }
}
}
if len(s.stateObjectsDirty) > 0 {
@@ -1002,21 +1013,18 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if metrics.EnabledExpensive {
start = time.Now()
}
- // The onleaf func is called _serially_, so we can reuse the same account
- // for unmarshalling every time.
- var account types.StateAccount
- root, accountCommitted, err := s.trie.Commit(func(_ [][]byte, _ []byte, leaf []byte, parent common.Hash) error {
- if err := rlp.DecodeBytes(leaf, &account); err != nil {
- return nil
- }
- if account.Root != emptyRoot {
- s.db.TrieDB().Reference(account.Root, parent)
- }
- return nil
- })
+ root, nodeSet, err := s.trie.Commit(true)
if err != nil {
return common.Hash{}, err
}
+
+ // Merge the dirty nodes of account trie into global set
+ if nodeSet != nil {
+ if err := nodes.Merge(nodeSet); err != nil {
+ return common.Hash{}, err
+ }
+ accountTrieNodes = nodeSet.Len()
+ }
if metrics.EnabledExpensive {
s.AccountCommits += time.Since(start)
@@ -1024,8 +1032,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageUpdatedMeter.Mark(int64(s.StorageUpdated))
accountDeletedMeter.Mark(int64(s.AccountDeleted))
storageDeletedMeter.Mark(int64(s.StorageDeleted))
- accountCommittedMeter.Mark(int64(accountCommitted))
- storageCommittedMeter.Mark(int64(storageCommitted))
+ accountTrieCommittedMeter.Mark(int64(accountTrieNodes))
+ storageTriesCommittedMeter.Mark(int64(storageTrieNodes))
s.AccountUpdated, s.AccountDeleted = 0, 0
s.StorageUpdated, s.StorageDeleted = 0, 0
}
@@ -1049,6 +1057,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
}
+
+ // Update Trie MergeNodeSets.
+ if err := s.db.TrieDB().Update(nodes); err != nil {
+ return common.Hash{}, err
+ }
s.originalRoot = root
return root, err
}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 9d58440b68..dc2a9a4839 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1346,9 +1346,11 @@ func getCodeByHash(hash common.Hash) []byte {
// makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
- db := trie.NewDatabase(rawdb.NewMemoryDatabase())
- accTrie := trie.NewEmpty(db)
- var entries entrySlice
+ var (
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ accTrie = trie.NewEmpty(db)
+ entries entrySlice
+ )
for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
@@ -1362,7 +1364,12 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
entries = append(entries, elem)
}
sort.Sort(entries)
- accTrie.Commit(nil)
+ // Commit the state changes into db and re-create the trie
+ // for accessing later.
+ root, nodes, _ := accTrie.Commit(false)
+ db.Update(trie.NewWithNodeSet(nodes))
+
+ accTrie, _ = trie.New(common.Hash{}, root, db)
return accTrie, entries
}
@@ -1374,8 +1381,8 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
entries entrySlice
boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
- trie = trie.NewEmpty(db)
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ accTrie = trie.NewEmpty(db)
)
// Initialize boundaries
var next common.Hash
@@ -1402,7 +1409,7 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
CodeHash: getCodeHash(uint64(i)),
})
elem := &kv{boundaries[i].Bytes(), value}
- trie.Update(elem.k, elem.v)
+ accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
// Fill other accounts if required
@@ -1414,12 +1421,17 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
CodeHash: getCodeHash(i),
})
elem := &kv{key32(i), value}
- trie.Update(elem.k, elem.v)
+ accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
sort.Sort(entries)
- trie.Commit(nil)
- return trie, entries
+ // Commit the state changes into db and re-create the trie
+ // for accessing later.
+ root, nodes, _ := accTrie.Commit(false)
+ db.Update(trie.NewWithNodeSet(nodes))
+
+ accTrie, _ = trie.New(common.Hash{}, root, db)
+ return accTrie, entries
}
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
@@ -1429,8 +1441,10 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
entries entrySlice
+ storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
+ nodes = trie.NewMergedNodeSet()
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
@@ -1440,9 +1454,8 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
codehash = getCodeHash(i)
}
// Create a storage trie
- stTrie, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
- stRoot := stTrie.Hash()
- stTrie.Commit(nil)
+ stRoot, stNodes, stEntries := makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), i, db)
+ nodes.Merge(stNodes)
value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
Balance: big.NewInt(int64(i)),
@@ -1453,12 +1466,25 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
- storageTries[common.BytesToHash(key)] = stTrie
+ storageRoots[common.BytesToHash(key)] = stRoot
storageEntries[common.BytesToHash(key)] = stEntries
}
sort.Sort(entries)
- accTrie.Commit(nil)
+ // Commit account trie
+ root, set, _ := accTrie.Commit(true)
+ nodes.Merge(set)
+
+ // Commit gathered dirty nodes into database
+ db.Update(nodes)
+
+ // Re-create tries with new root
+ accTrie, _ = trie.New(common.Hash{}, root, db)
+ for i := uint64(1); i <= uint64(accounts); i++ {
+ key := key32(i)
+ trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
+ storageTries[common.BytesToHash(key)] = trie
+ }
return accTrie, entries, storageTries, storageEntries
}
@@ -1468,8 +1494,10 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
entries entrySlice
+ storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
+ nodes = trie.NewMergedNodeSet()
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
@@ -1480,16 +1508,16 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
}
// Make a storage trie
var (
- stTrie *trie.Trie
+ stRoot common.Hash
+ stNodes *trie.NodeSet
stEntries entrySlice
)
if boundary {
- stTrie, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
+ stRoot, stNodes, stEntries = makeBoundaryStorageTrie(common.BytesToHash(key), slots, db)
} else {
- stTrie, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
+ stRoot, stNodes, stEntries = makeStorageTrieWithSeed(common.BytesToHash(key), uint64(slots), 0, db)
}
- stRoot := stTrie.Hash()
- stTrie.Commit(nil)
+ nodes.Merge(stNodes)
value, _ := rlp.EncodeToBytes(&types.StateAccount{
Nonce: i,
@@ -1501,18 +1529,37 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
// we reuse the same one for all accounts
- storageTries[common.BytesToHash(key)] = stTrie
+ storageRoots[common.BytesToHash(key)] = stRoot
storageEntries[common.BytesToHash(key)] = stEntries
}
sort.Sort(entries)
- accTrie.Commit(nil)
+ // Commit account trie
+ root, set, _ := accTrie.Commit(true)
+ nodes.Merge(set)
+
+ // Commit gathered dirty nodes into database
+ db.Update(nodes)
+
+ // Re-create tries with new root
+ accTrie, err := trie.New(common.Hash{}, root, db)
+ if err != nil {
+ panic(err)
+ }
+ for i := uint64(1); i <= uint64(accounts); i++ {
+ key := key32(i)
+ trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
+ if err != nil {
+ panic(err)
+ }
+ storageTries[common.BytesToHash(key)] = trie
+ }
return accTrie, entries, storageTries, storageEntries
}
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
-func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {
+func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
trie, _ := trie.New(owner, common.Hash{}, db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
@@ -1528,14 +1575,14 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
entries = append(entries, elem)
}
sort.Sort(entries)
- trie.Commit(nil)
- return trie, entries
+ root, nodes, _ := trie.Commit(false)
+ return root, nodes, entries
}
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
-func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie.Trie, entrySlice) {
+func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
@@ -1579,8 +1626,8 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (*trie
entries = append(entries, elem)
}
sort.Sort(entries)
- trie.Commit(nil)
- return trie, entries
+ root, nodes, _ := trie.Commit(false)
+ return root, nodes, entries
}
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
diff --git a/light/postprocess.go b/light/postprocess.go
index c09b00e71c..7d45839391 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -217,7 +217,18 @@ func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) e
// Commit implements core.ChainIndexerBackend
func (c *ChtIndexerBackend) Commit() error {
- root, _, err := c.trie.Commit(nil)
+ root, nodes, err := c.trie.Commit(false)
+ if err != nil {
+ return err
+ }
+ // Commite trie changes into trie database in case it's not nil.
+ if nodes != nil {
+ if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ return err
+ }
+ }
+ // Re-create trie with nelwy generated root and updated database.
+ c.trie, err = trie.New(common.Hash{}, root, c.triedb)
if err != nil {
return err
}
@@ -453,7 +464,19 @@ func (b *BloomTrieIndexerBackend) Commit() error {
b.trie.Delete(encKey[:])
}
}
- root, _, err := b.trie.Commit(nil)
+ root, nodes, err := b.trie.Commit(false)
+ if err != nil {
+ return err
+ }
+
+ if nodes != nil {
+ if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ return err
+ }
+ }
+
+ // Re-create trie with nelwy generated root and updated database.
+ b.trie, err = trie.New(common.Hash{}, root, b.triedb)
if err != nil {
return err
}
diff --git a/light/trie.go b/light/trie.go
index 931ba30cb4..a2ef8ebff3 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -137,11 +137,11 @@ func (t *odrTrie) TryDelete(key []byte) error {
})
}
-func (t *odrTrie) Commit(onleaf trie.LeafCallback) (common.Hash, int, error) {
+func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error) {
if t.trie == nil {
- return t.id.Root, 0, nil
+ return t.id.Root, nil, nil
}
- return t.trie.Commit(onleaf)
+ return t.trie.Commit(collectLeaf)
}
func (t *odrTrie) Hash() common.Hash {
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 772c776436..48dbd04610 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -115,8 +115,10 @@ func (k kvs) Swap(i, j int) {
// The function must return
// 1 if the fuzzer should increase priority of the
-// given input during subsequent fuzzing (for example, the input is lexically
-// correct and was parsed successfully);
+//
+// given input during subsequent fuzzing (for example, the input is lexically
+// correct and was parsed successfully);
+//
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise
// other values are reserved for future use.
@@ -174,10 +176,13 @@ func (f *fuzzer) fuzz() int {
return 0
}
// Flush trie -> database
- rootA, _, err := trieA.Commit(nil)
+ rootA, nodes, err := trieA.Commit(false)
if err != nil {
panic(err)
}
+ if nodes != nil {
+ dbA.Update(trie.NewWithNodeSet(nodes))
+ }
// Flush memdb -> disk (sponge)
dbA.Commit(rootA, false, nil)
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 2301721c93..b2f260fb58 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -124,8 +124,10 @@ func Generate(input []byte) randTest {
// The function must return
// 1 if the fuzzer should increase priority of the
-// given input during subsequent fuzzing (for example, the input is lexically
-// correct and was parsed successfully);
+//
+// given input during subsequent fuzzing (for example, the input is lexically
+// correct and was parsed successfully);
+//
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise
// other values are reserved for future use.
@@ -161,15 +163,18 @@ func runRandTest(rt randTest) error {
if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
}
- case opCommit:
- _, _, rt[i].err = tr.Commit(nil)
case opHash:
tr.Hash()
- case opReset:
- hash, _, err := tr.Commit(nil)
+ case opCommit:
+ hash, nodes, err := tr.Commit(false)
if err != nil {
return err
}
+ if nodes != nil {
+ if err := triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ return err
+ }
+ }
newtr, err := trie.New(common.Hash{}, hash, triedb)
if err != nil {
return err
diff --git a/trie/committer.go b/trie/committer.go
index b74572ee27..3b0d6c5fe2 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -17,7 +17,6 @@
package trie
import (
- "errors"
"fmt"
"sync"
@@ -32,23 +31,20 @@ const leafChanSize = 200
// leaf represents a trie leaf value
type leaf struct {
- size int // size of the rlp data (estimate)
- hash common.Hash // hash of rlp data
- node node // the node to commit
+ blob []byte // raw blob of leaf
+ parent common.Hash // the hash of parent node
}
-// committer is a type used for the trie Commit operation. A committer has some
-// internal preallocated temp space, and also a callback that is invoked when
-// leaves are committed. The leafs are passed through the `leafCh`, to allow
-// some level of parallelism.
-// By 'some level' of parallelism, it's still the case that all leaves will be
-// processed sequentially - onleaf will never be called in parallel or out of order.
+// committer is a type used for the trie Commit operation. The committer will
+// capture all dirty nodes during the commit process and keep them cached in
+// insertion order.
type committer struct {
tmp sliceBuffer
sha crypto.KeccakState
- onleaf LeafCallback
- leafCh chan *leaf
+ owner common.Hash
+ nodes *NodeSet
+ collectLeaf bool
}
// committers live in a global sync.Pool
@@ -62,34 +58,28 @@ var committerPool = sync.Pool{
}
// newCommitter creates a new committer or picks one from the pool.
-func newCommitter() *committer {
- return committerPool.Get().(*committer)
-}
-
-func returnCommitterToPool(h *committer) {
- h.onleaf = nil
- h.leafCh = nil
- committerPool.Put(h)
+func newCommitter(owner common.Hash, collectLeaf bool) *committer {
+ return &committer{
+ nodes: NewNodeSet(owner),
+ collectLeaf: collectLeaf,
+ }
}
// Commit collapses a node down into a hash node and inserts it into the database
-func (c *committer) Commit(n node, db *Database) (hashNode, int, error) {
- if db == nil {
- return nil, 0, errors.New("no db provided")
- }
- h, committed, err := c.commit(n, db)
+func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
+ h, err := c.commit(nil, n)
if err != nil {
- return nil, 0, err
+ return nil, nil, err
}
- return h.(hashNode), committed, nil
+ return h.(hashNode), c.nodes, nil
}
// commit collapses a node down into a hash node and inserts it into the database
-func (c *committer) commit(n node, db *Database) (node, int, error) {
+func (c *committer) commit(path []byte, n node) (node, error) {
// if this path is clean, use available cached data
hash, dirty := n.cache()
if hash != nil && !dirty {
- return hash, 0, nil
+ return hash, nil
}
// Commit children, then parent, and remove the dirty flag.
switch cn := n.(type) {
@@ -99,36 +89,35 @@ func (c *committer) commit(n node, db *Database) (node, int, error) {
// If the child is fullNode, recursively commit,
// otherwise it can only be hashNode or valueNode.
- var childCommitted int
if _, ok := cn.Val.(*fullNode); ok {
- childV, committed, err := c.commit(cn.Val, db)
+ childV, err := c.commit(append(path, cn.Key...), cn.Val)
if err != nil {
- return nil, 0, err
+ return nil, err
}
- collapsed.Val, childCommitted = childV, committed
+ collapsed.Val = childV
}
// The key needs to be copied, since we're delivering it to database
collapsed.Key = hexToCompact(cn.Key)
- hashedNode := c.store(collapsed, db)
+ hashedNode := c.store(path, collapsed)
if hn, ok := hashedNode.(hashNode); ok {
- return hn, childCommitted + 1, nil
+ return hn, nil
}
- return collapsed, childCommitted, nil
+ return collapsed, nil
case *fullNode:
- hashedKids, childCommitted, err := c.commitChildren(cn, db)
+ hashedKids, err := c.commitChildren(path, cn)
if err != nil {
- return nil, 0, err
+ return nil, err
}
collapsed := cn.copy()
collapsed.Children = hashedKids
- hashedNode := c.store(collapsed, db)
+ hashedNode := c.store(path, collapsed)
if hn, ok := hashedNode.(hashNode); ok {
- return hn, childCommitted + 1, nil
+ return hn, nil
}
- return collapsed, childCommitted, nil
+ return collapsed, nil
case hashNode:
- return cn, 0, nil
+ return cn, nil
default:
// nil, valuenode shouldn't be committed
panic(fmt.Sprintf("%T: invalid node: %v", n, n))
@@ -136,11 +125,8 @@ func (c *committer) commit(n node, db *Database) (node, int, error) {
}
// commitChildren commits the children of the given fullnode
-func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, error) {
- var (
- committed int
- children [17]node
- )
+func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) {
+ var children [17]node
for i := 0; i < 16; i++ {
child := n.Children[i]
if child == nil {
@@ -156,94 +142,58 @@ func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, er
// Commit the child recursively and store the "hashed" value.
// Note the returned node can be some embedded nodes, so it's
// possible the type is not hashNode.
- hashed, childCommitted, err := c.commit(child, db)
+ hashed, err := c.commit(append(path, byte(i)), child)
if err != nil {
- return children, 0, err
+ return children, err
}
children[i] = hashed
- committed += childCommitted
}
// For the 17th child, it's possible the type is valuenode.
if n.Children[16] != nil {
children[16] = n.Children[16]
}
- return children, committed, nil
+ return children, nil
}
// store hashes the node n and if we have a storage layer specified, it writes
// the key/value pair to it and tracks any node->child references as well as any
// node->external trie references.
-func (c *committer) store(n node, db *Database) node {
+func (c *committer) store(path []byte, n node) node {
// Larger nodes are replaced by their hash and stored in the database.
- var (
- hash, _ = n.cache()
- size int
- )
+ hash, _ := n.cache()
+
+ // This was not generated - must be a small node stored in the parent.
+ // In theory, we should check if the node is leaf here (embedded node
+ // usually is leaf node). But small value(less than 32bytes) is not
+ // our target(leaves in account trie only).
if hash == nil {
- // This was not generated - must be a small node stored in the parent.
- // In theory, we should apply the leafCall here if it's not nil(embedded
- // node usually contains value). But small value(less than 32bytes) is
- // not our target.
return n
- } else {
- // We have the hash already, estimate the RLP encoding-size of the node.
- // The size is used for mem tracking, does not need to be exact
- size = estimateSize(n)
}
- // If we're using channel-based leaf-reporting, send to channel.
- // The leaf channel will be active only when there an active leaf-callback
- if c.leafCh != nil {
- c.leafCh <- &leaf{
- size: size,
- hash: common.BytesToHash(hash),
- node: n,
+ // We have the hash already, estimate the RLP encoding-size of the node.
+ // The size is used for mem tracking, does not need to be exact
+ var (
+ size = estimateSize(n)
+ nhash = common.BytesToHash(hash)
+ mnode = &memoryNode{
+ hash: nhash,
+ node: simplifyNode(n),
+ size: uint16(size),
}
- } else if db != nil {
- // No leaf-callback used, but there's still a database. Do serial
- // insertion
- db.lock.Lock()
- db.insert(common.BytesToHash(hash), size, n)
- db.lock.Unlock()
- }
- return hash
-}
-
-// commitLoop does the actual insert + leaf callback for nodes.
-func (c *committer) commitLoop(db *Database) {
- for item := range c.leafCh {
- var (
- hash = item.hash
- size = item.size
- n = item.node
- )
- // We are pooling the trie nodes into an intermediate memory cache
- db.lock.Lock()
- db.insert(hash, size, n)
- db.lock.Unlock()
+ )
- if c.onleaf != nil {
- switch n := n.(type) {
- case *shortNode:
- if child, ok := n.Val.(valueNode); ok {
- c.onleaf(nil, nil, child, hash)
- }
- case *fullNode:
- // For children in range [0, 15], it's impossible
- // to contain valueNode. Only check the 17th child.
- if n.Children[16] != nil {
- c.onleaf(nil, nil, n.Children[16].(valueNode), hash)
- }
+ // Collect the dirty node to nodeset for return.
+ c.nodes.add(string(path), mnode)
+ // Collect the corresponding leaf node if it's required. We don't check
+ // full node since it's impossible to store value in fullNode. The key
+ // length of leaves should be exactly same.
+ if c.collectLeaf {
+ if sn, ok := n.(*shortNode); ok {
+ if val, ok := sn.Val.(valueNode); ok {
+ c.nodes.addLeaf(&leaf{blob: val, parent: nhash})
}
}
}
-}
-
-func (c *committer) makeHashNode(data []byte) hashNode {
- n := make(hashNode, c.sha.Size())
- c.sha.Reset()
- c.sha.Write(data)
- c.sha.Read(n)
- return n
+ return hash
}
// estimateSize estimates the size of an rlp-encoded node, without actually
diff --git a/trie/database.go b/trie/database.go
index 744c2b0f81..6453f2bf0b 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -28,6 +28,7 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -316,7 +317,7 @@ func (db *Database) DiskDB() ethdb.KeyValueStore {
return db.diskdb
}
-// insert inserts a collapsed trie node into the memory database.
+// inserts a simplified trie node into the memory database.
// The blob size must be specified to allow proper size tracking.
// All nodes inserted by this function will be reference tracked
// and in theory should only used for **trie nodes** insertion.
@@ -329,7 +330,7 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
// Create the cached entry for this node
entry := &cachedNode{
- node: simplifyNode(node),
+ node: node,
size: uint16(size),
flushPrev: db.newest,
}
@@ -773,6 +774,39 @@ func (c *cleaner) Delete(key []byte) error {
panic("not implemented")
}
+// Update inserts the dirty nodes in the provided nodeset into database and
+// link the account trie with multiple storage tries if necessary.
+func (db *Database) Update(nodes *MergedNodeSet) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+ // Insert dirty nodes into the database. In the same tree, it must be
+ // ensured that children are inserted first, then parent so that children
+ // can be linked with their parent correctly. The order of writing between
+ // different tries(account trie, storage tries) is not required.
+ for owner, subset := range nodes.sets {
+ for _, path := range subset.paths {
+ n, ok := subset.nodes[path]
+ if !ok {
+ return fmt.Errorf("missing node %x %v", owner, path)
+ }
+ db.insert(n.hash, int(n.size), n.node)
+ }
+ }
+ if set, present := nodes.sets[common.Hash{}]; present {
+ for _, leaf := range set.leaves {
+ // Looping node leaf, then reference the leaf node to the root node
+ var account types.StateAccount
+ if err := rlp.DecodeBytes(leaf.blob, &account); err != nil {
+ return err
+ }
+ if account.Root != emptyRoot {
+ db.reference(account.Root, leaf.parent)
+ }
+ }
+ }
+ return nil
+}
+
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
func (db *Database) Size() (common.StorageSize, common.StorageSize) {
diff --git a/trie/iterator.go b/trie/iterator.go
index 81316c4551..d37cccd603 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -361,8 +361,7 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
}
}
}
- resolved, err := it.trie.resolveHash(hash, path)
- return resolved, err
+ return it.trie.resolveHash(hash, path)
}
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 2dffd8ff07..77a0fd3d67 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -31,7 +31,7 @@ import (
)
func TestEmptyIterator(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
iter := trie.NodeIterator(nil)
seen := make(map[string]struct{})
@@ -44,7 +44,8 @@ func TestEmptyIterator(t *testing.T) {
}
func TestIterator(t *testing.T) {
- trie := newEmpty()
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -59,8 +60,12 @@ func TestIterator(t *testing.T) {
all[val.k] = val.v
trie.Update([]byte(val.k), []byte(val.v))
}
- trie.Commit(nil)
-
+ root, nodes, err := trie.Commit(false)
+ if err != nil {
+ t.Fatalf("Failed to commit trie %v", err)
+ }
+ db.Update(NewWithNodeSet(nodes))
+ trie, _ = New(common.Hash{}, root, db)
found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil))
for it.Next() {
@@ -80,7 +85,7 @@ type kv struct {
}
func TestIteratorLargeData(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ {
@@ -173,7 +178,7 @@ var testdata2 = []kvs{
}
func TestIteratorSeek(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for _, val := range testdata1 {
trie.Update([]byte(val.k), []byte(val.v))
}
@@ -214,17 +219,23 @@ func checkIteratorOrder(want []kvs, it *Iterator) error {
}
func TestDifferenceIterator(t *testing.T) {
- triea := newEmpty()
+ dba := NewDatabase(rawdb.NewMemoryDatabase())
+ triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.Update([]byte(val.k), []byte(val.v))
}
- triea.Commit(nil)
+ rootA, nodesA, _ := triea.Commit(false)
+ dba.Update(NewWithNodeSet(nodesA))
+ triea, _ = New(common.Hash{}, rootA, dba)
- trieb := newEmpty()
+ dbb := NewDatabase(rawdb.NewMemoryDatabase())
+ trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.Update([]byte(val.k), []byte(val.v))
}
- trieb.Commit(nil)
+ rootB, nodesB, _ := trieb.Commit(false)
+ dbb.Update(NewWithNodeSet(nodesB))
+ trieb, _ = New(common.Hash{}, rootB, dbb)
found := make(map[string]string)
di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
@@ -250,17 +261,23 @@ func TestDifferenceIterator(t *testing.T) {
}
func TestUnionIterator(t *testing.T) {
- triea := newEmpty()
+ dba := NewDatabase(rawdb.NewMemoryDatabase())
+ triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.Update([]byte(val.k), []byte(val.v))
}
- triea.Commit(nil)
+ rootA, nodesA, _ := triea.Commit(false)
+ dba.Update(NewWithNodeSet(nodesA))
+ triea, _ = New(common.Hash{}, rootA, dba)
- trieb := newEmpty()
+ dbb := NewDatabase(rawdb.NewMemoryDatabase())
+ trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.Update([]byte(val.k), []byte(val.v))
}
- trieb.Commit(nil)
+ rootB, nodesB, _ := trieb.Commit(false)
+ dbb.Update(NewWithNodeSet(nodesB))
+ trieb, _ = New(common.Hash{}, rootB, dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
it := NewIterator(di)
@@ -316,7 +333,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
- tr.Commit(nil)
+ _, nodes, _ := tr.Commit(false)
+ triedb.Update(NewWithNodeSet(nodes))
if !memonly {
triedb.Commit(tr.Hash(), true, nil)
}
@@ -407,7 +425,8 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
for _, val := range testdata1 {
ctr.Update([]byte(val.k), []byte(val.v))
}
- root, _, _ := ctr.Commit(nil)
+ root, nodes, _ := ctr.Commit(false)
+ triedb.Update(NewWithNodeSet(nodes))
if !memonly {
triedb.Commit(root, true, nil)
}
@@ -525,7 +544,8 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
val = crypto.Keccak256(val)
trie.Update(key, val)
}
- trie.Commit(nil)
+ _, nodes, _ := trie.Commit(false)
+ triedb.Update(NewWithNodeSet(nodes))
// Return the generated trie
return triedb, trie, logDb
}
diff --git a/trie/nodeset.go b/trie/nodeset.go
new file mode 100644
index 0000000000..4825ecaebf
--- /dev/null
+++ b/trie/nodeset.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// memoryNode is all the information we know about a single cached trie node
+// in the memory.
+type memoryNode struct {
+ hash common.Hash // Node hash, computed by hashing rlp value
+ size uint16 // Byte size of the useful cached data
+ node node // Cached collapsed trie node, or raw rlp data
+}
+
+// NodeSet contains all dirty nodes collected during the commit operation
+// Each node is keyed by path. It's not the thread-safe to use.
+type NodeSet struct {
+ owner common.Hash // the identifier of the trie
+ paths []string // the path of dirty nodes, sort by insertion order
+ nodes map[string]*memoryNode // the map of dirty nodes, keyed by node path
+ leaves []*leaf // the list of dirty leaves
+}
+
+// NewNodeSet initializes an empty node set to be used for tracking dirty nodes
+// from a specific account or storage trie. The owner is zero for the account
+// trie and the owning account address hash for storage tries.
+
+func NewNodeSet(owner common.Hash) *NodeSet {
+ return &NodeSet{
+ owner: owner,
+ nodes: make(map[string]*memoryNode),
+ }
+}
+
+// add caches node with provided path and node object.
+func (set *NodeSet) add(path string, node *memoryNode) {
+ set.paths = append(set.paths, path)
+ set.nodes[path] = node
+}
+
+// addLeaf caches the provided leaf node.
+func (set *NodeSet) addLeaf(leaf *leaf) {
+ set.leaves = append(set.leaves, leaf)
+}
+
+// Len returns the number of dirty nodes contained in the set.
+func (set *NodeSet) Len() int {
+ return len(set.nodes)
+}
+
+// MergedNodeSet represents a merged dirty node set for a group of tries.
+type MergedNodeSet struct {
+ sets map[common.Hash]*NodeSet
+}
+
+// NewMergedNodeSet initializes an empty merged set.
+func NewMergedNodeSet() *MergedNodeSet {
+ return &MergedNodeSet{sets: make(map[common.Hash]*NodeSet)}
+}
+
+// NewWithNodeSet constructs a merged nodeset with the provided single set.
+func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
+ merged := NewMergedNodeSet()
+ merged.Merge(set)
+ return merged
+}
+
+// Merge merges the provided dirty nodes of a trie into the set. The assumption
+// is held that no duplicated set belonging to the same trie will be merged twice.
+func (set *MergedNodeSet) Merge(other *NodeSet) error {
+ _, present := set.sets[other.owner]
+ if present {
+ return fmt.Errorf("duplicate trie for owner %#x", other.owner)
+ }
+ set.sets[other.owner] = other
+ return nil
+}
diff --git a/trie/proof.go b/trie/proof.go
index 2c2da9cb82..db113eecbd 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -22,6 +22,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -36,9 +37,12 @@ import (
// with the node that proves the absence of the key.
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
// Collect all nodes on the path to key.
+ var (
+ prefix []byte
+ nodes []node
+ tn = t.root
+ )
key = keybytesToHex(key)
- var nodes []node
- tn := t.root
for len(key) > 0 && tn != nil {
switch n := tn.(type) {
case *shortNode:
@@ -47,16 +51,18 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
tn = nil
} else {
tn = n.Val
+ prefix = append(prefix, n.Key...)
key = key[len(n.Key):]
}
nodes = append(nodes, n)
case *fullNode:
tn = n.Children[key[0]]
+ prefix = append(prefix, key[0])
key = key[1:]
nodes = append(nodes, n)
case hashNode:
var err error
- tn, err = t.resolveHash(n, nil)
+ tn, err = t.resolveHash(n, prefix)
if err != nil {
log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
return err
@@ -552,7 +558,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
}
// Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one.
- tr := newWithRootNode(root)
+ tr := &Trie{root: root, db: NewDatabase(rawdb.NewMemoryDatabase())}
if empty {
tr.root = nil
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 380856fa92..2cf1d325f8 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -161,12 +161,12 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
return t.preimages.preimage(common.BytesToHash(shaKey))
}
-// Commit writes all nodes and the secure hash pre-images to the trie's database.
-// Nodes are stored with their sha3 hash as the key.
-//
-// Committing flushes nodes from memory. Subsequent Get calls will load nodes
-// from the database.
-func (t *SecureTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
+// Commit collects all dirty nodes in the trie and replace them with the
+// corresponding node hash. All collected nodes(including dirty leaves if
+// collectLeaf is true) will be encapsulated into a nodeset for return.
+// The returned nodeset can be nil if the trie is clean(nothing to commit).
+// All cached preimages will be also flushed if preimages recording is enabled.
+func (t *SecureTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil { // Ugly direct check but avoids the below write lock
@@ -180,7 +180,7 @@ func (t *SecureTrie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
t.secKeyCache = make(map[string][]byte)
}
// Commit the trie to its intermediate node database
- return t.trie.Commit(onleaf)
+ return t.trie.Commit(collectLeaf)
}
// Hash returns the root hash of SecureTrie. It does not write to the
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index beea5845ad..c18d399543 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -57,7 +57,7 @@ func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
trie.Update(key, val)
}
}
- trie.Commit(nil)
+ trie.Commit(false)
// Return the generated trie
return triedb, trie, content
@@ -135,7 +135,7 @@ func TestSecureTrieConcurrency(t *testing.T) {
tries[index].Update(key, val)
}
}
- tries[index].Commit(nil)
+ tries[index].Commit(false)
}(i)
}
// Wait for all threads to finish
diff --git a/trie/sync_test.go b/trie/sync_test.go
index cd29c391fc..388aedd1f4 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -18,6 +18,7 @@ package trie
import (
"bytes"
+ "fmt"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -50,7 +51,13 @@ func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
trie.Update(key, val)
}
}
- trie.Commit(nil)
+ _, nodes, err := trie.Commit(false)
+ if err != nil {
+ panic(fmt.Errorf("failed to commit trie: %v", err))
+ }
+ if err := triedb.Update(NewWithNodeSet(nodes)); err != nil {
+ panic(fmt.Errorf("failed to commit db %v", err))
+ }
// Return the generated trie
return triedb, trie, content
diff --git a/trie/trie.go b/trie/trie.go
index f23c5ad546..dabe738ace 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -21,10 +21,8 @@ import (
"bytes"
"errors"
"fmt"
- "sync"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -55,21 +53,24 @@ var (
// for extracting the raw states(leaf nodes) with corresponding paths.
type LeafCallback func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
-// Trie is a Merkle Patricia Trie.
-// The zero value is an empty trie with no database.
-// Use New to create a trie that sits on top of a database.
-//
-// Trie is not safe for concurrent use.
+// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
+// top of Database. Whenever tries performance a commit operation, the generated nodes will be
+// gathered and returned in a set. Once a trie is committed, it's node usable anymore. Callers have to
+// re-create the trie with new root based on the updated trie database.
type Trie struct {
- db *Database
- root node
owner common.Hash
-
- // Keep track of the number leaves which have been inserted since the last
+ root node
+ // Keep track of the number leafs which have been inserted since the last
// hashing operation. This number will not directly map to the number of
// actually unhashed nodes
unhashed int
+ // db is the handler trie can retrieve nodes from. It's
+ // only for reading purpose and not available for writing.
+ db *Database
+ // tracer is the tool to track the trie changes.
+ // It will be reset after each commit operation.
+
tracer *tracer
}
@@ -78,16 +79,6 @@ func (t *Trie) newFlag() nodeFlag {
return nodeFlag{dirty: true}
}
-// newWithRootNode initializes the trie with the given root node.
-// It's only used by range prover.
-func newWithRootNode(root node) *Trie {
- return &Trie{
- root: root,
- //tracer: newTracer(),
- db: NewDatabase(rawdb.NewMemoryDatabase()),
- }
-}
-
// New creates a trie with an existing root node from db and an assigned
// owner for storage proximity.
//
@@ -96,23 +87,9 @@ func newWithRootNode(root node) *Trie {
// New will panic if db is nil and returns a MissingNodeError if root does
// not exist in the database. Accessing the trie loads nodes from db on demand.
func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
- return newTrie(owner, root, db)
-}
-
-// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
-func NewEmpty(db *Database) *Trie {
- tr, _ := newTrie(common.Hash{}, common.Hash{}, db)
- return tr
-}
-
-// newTrie is the internal function used to construct the trie with given parameters.
-func newTrie(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
- if db == nil {
- panic("trie.New called without a database")
- }
trie := &Trie{
- db: db,
owner: owner,
+ db: db,
//tracer: newTracer(),
}
if root != (common.Hash{}) && root != emptyRoot {
@@ -125,6 +102,12 @@ func newTrie(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
return trie, nil
}
+// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
+func NewEmpty(db *Database) *Trie {
+ tr, _ := New(common.Hash{}, common.Hash{}, db)
+ return tr
+}
+
// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
// the key after the given start key.
func (t *Trie) NodeIterator(start []byte) NodeIterator {
@@ -224,6 +207,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
return nil, origNode, 0, errors.New("non-consensus node")
}
blob, err := t.db.Node(common.BytesToHash(hash))
+
return blob, origNode, 1, err
}
// Path still needs to be traversed, descend into children
@@ -524,7 +508,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// shortNode{..., shortNode{...}}. Since the entry
// might not be loaded yet, resolve it just for this
// check.
- cnode, err := t.resolve(n.Children[pos], prefix)
+ cnode, err := t.resolve(n.Children[pos], append(prefix, byte(pos))) // Prefix mostly for tracking path.
if err != nil {
return false, nil, err
}
@@ -584,6 +568,8 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
return n, nil
}
+// resolveHash loads node from the underlying database with the provided
+// node hash and path prefix.
func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
hash := common.BytesToHash(n)
if node := t.db.node(hash); node != nil {
@@ -600,23 +586,21 @@ func (t *Trie) Hash() common.Hash {
return common.BytesToHash(hash.(hashNode))
}
-// Commit writes all nodes to the trie's memory database, tracking the internal
-// and external (for account tries) references.
-func (t *Trie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
- if t.db == nil {
- panic("commit called on trie with nil database")
- }
-
+// Commit collects all dirty nodes in the trie and replace them with the
+// corresponding node hash. All collected nodes(including dirty leaves if
+// collectLeaf is true) will be encapsulated into a nodeset for return.
+// The returned nodeset can be nil if the trie is clean(nothing to commit).
+// Once the trie is committed, it's not usable anymore. A new trie must
+// be created with new root and updated trie database for following usage
+func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
defer t.tracer.reset()
if t.root == nil {
- return emptyRoot, 0, nil
+ return emptyRoot, nil, nil
}
// Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed.
rootHash := t.Hash()
- h := newCommitter()
- defer returnCommitterToPool(h)
// Do a quick check if we really need to commit, before we spin
// up goroutines. This can happen e.g. if we load a trie for reading storage
@@ -625,32 +609,16 @@ func (t *Trie) Commit(onleaf LeafCallback) (common.Hash, int, error) {
// Replace the root node with the origin hash in order to
// ensure all resolved nodes are dropped after the commit.
t.root = hashedNode
- return rootHash, 0, nil
- }
- var wg sync.WaitGroup
- if onleaf != nil {
- h.onleaf = onleaf
- h.leafCh = make(chan *leaf, leafChanSize)
- wg.Add(1)
- go func() {
- defer wg.Done()
- h.commitLoop(t.db)
- }()
- }
- newRoot, committed, err := h.Commit(t.root, t.db)
- if onleaf != nil {
- // The leafch is created in newCommitter if there was an onleaf callback
- // provided. The commitLoop only _reads_ from it, and the commit
- // operation was the sole writer. Therefore, it's safe to close this
- // channel here.
- close(h.leafCh)
- wg.Wait()
+ return rootHash, nil, nil
}
+ h := newCommitter(t.owner, collectLeaf)
+ newRoot, nodes, err := h.Commit(t.root)
if err != nil {
- return common.Hash{}, 0, err
+ return common.Hash{}, nil, err
}
+
t.root = newRoot
- return rootHash, committed, nil
+ return rootHash, nodes, nil
}
// hashRoot calculates the root hash of the given trie
@@ -677,14 +645,10 @@ func (t *Trie) Reset() {
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- db: t.db,
root: t.root,
+ owner: t.owner,
unhashed: t.unhashed,
+ db: t.db,
tracer: t.tracer.copy(),
}
}
-
-// Owner returns the associated trie owner.
-func (t *Trie) Owner() common.Hash {
- return t.owner
-}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 095df36481..89851c3b81 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -25,7 +25,6 @@ import (
"io/ioutil"
"math/big"
"math/rand"
- "os"
"reflect"
"testing"
"testing/quick"
@@ -92,7 +91,8 @@ func testMissingNode(t *testing.T, memonly bool) {
trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
- root, _, _ := trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(NewWithNodeSet(nodes))
if !memonly {
triedb.Commit(root, true, nil)
}
@@ -158,7 +158,7 @@ func testMissingNode(t *testing.T, memonly bool) {
}
func TestInsert(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -174,7 +174,7 @@ func TestInsert(t *testing.T) {
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
- root, _, err := trie.Commit(nil)
+ root, _, err := trie.Commit(false)
if err != nil {
t.Fatalf("commit error: %v", err)
}
@@ -184,7 +184,8 @@ func TestInsert(t *testing.T) {
}
func TestGet(t *testing.T) {
- trie := newEmpty()
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
updateString(trie, "dogglesworth", "cat")
@@ -203,12 +204,14 @@ func TestGet(t *testing.T) {
if i == 1 {
return
}
- trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+ trie, _ = New(common.Hash{}, root, db)
}
}
func TestDelete(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -235,7 +238,7 @@ func TestDelete(t *testing.T) {
}
func TestEmptyValues(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -259,7 +262,8 @@ func TestEmptyValues(t *testing.T) {
}
func TestReplication(t *testing.T) {
- trie := newEmpty()
+ triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(triedb)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -272,13 +276,14 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- exp, _, err := trie.Commit(nil)
+ exp, nodes, err := trie.Commit(false)
if err != nil {
t.Fatalf("commit error: %v", err)
}
+ triedb.Update(NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work.
- trie2, err := New(common.Hash{}, exp, trie.db)
+ trie2, err := New(common.Hash{}, exp, triedb)
if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err)
}
@@ -287,13 +292,21 @@ func TestReplication(t *testing.T) {
t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v)
}
}
- hash, _, err := trie2.Commit(nil)
+ hash, nodes, err := trie2.Commit(false)
if err != nil {
t.Fatalf("commit error: %v", err)
}
if hash != exp {
t.Errorf("root failure. expected %x got %x", exp, hash)
}
+ // recreate the trie after commit
+ if nodes != nil {
+ triedb.Update(NewWithNodeSet(nodes))
+ }
+ trie2, err = New(common.Hash{}, hash, triedb)
+ if err != nil {
+ t.Fatalf("can't recreate trie at %x: %v", exp, err)
+ }
// perform some insertions on the new trie.
vals2 := []struct{ k, v string }{
@@ -316,7 +329,7 @@ func TestReplication(t *testing.T) {
}
func TestLargeValue(t *testing.T) {
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
trie.Update([]byte("key1"), []byte{99, 99, 99, 99})
trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash()
@@ -373,7 +386,6 @@ const (
opGet
opCommit
opHash
- opReset
opItercheckhash
opMax // boundary value, not an actual op
)
@@ -432,16 +444,17 @@ func runRandTest(rt randTest) bool {
if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
}
- case opCommit:
- _, _, rt[i].err = tr.Commit(nil)
case opHash:
tr.Hash()
- case opReset:
- hash, _, err := tr.Commit(nil)
+ case opCommit:
+ hash, nodes, err := tr.Commit(false)
if err != nil {
rt[i].err = err
return false
}
+ if nodes != nil {
+ triedb.Update(NewWithNodeSet(nodes))
+ }
newtr, err := New(common.Hash{}, hash, triedb)
if err != nil {
rt[i].err = err
@@ -483,7 +496,8 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
const benchElemCount = 20000
func benchGet(b *testing.B, commit bool) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(triedb)
if commit {
_, tmpdb := tempDB()
trie = NewEmpty(tmpdb)
@@ -494,21 +508,12 @@ func benchGet(b *testing.B, commit bool) {
trie.Update(k, k)
}
binary.LittleEndian.PutUint64(k, benchElemCount/2)
- if commit {
- trie.Commit(nil)
- }
b.ResetTimer()
for i := 0; i < b.N; i++ {
trie.Get(k)
}
b.StopTimer()
-
- if commit {
- ldb := trie.db.diskdb.(*leveldb.Database)
- ldb.Close()
- os.RemoveAll(ldb.Path())
- }
}
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
@@ -563,22 +568,18 @@ func BenchmarkHash(b *testing.B) {
// insert into the trie before measuring the hashing.
func BenchmarkCommitAfterHash(b *testing.B) {
b.Run("no-onleaf", func(b *testing.B) {
- benchmarkCommitAfterHash(b, nil)
+ benchmarkCommitAfterHash(b, false)
})
- var a types.StateAccount
- onleaf := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error {
- rlp.DecodeBytes(leaf, &a)
- return nil
- }
+
b.Run("with-onleaf", func(b *testing.B) {
- benchmarkCommitAfterHash(b, onleaf)
+ benchmarkCommitAfterHash(b, true)
})
}
-func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) {
+func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
// Make the random benchmark deterministic
addresses, accounts := makeAccounts(b.N)
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -586,7 +587,7 @@ func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) {
trie.Hash()
b.ResetTimer()
b.ReportAllocs()
- trie.Commit(onleaf)
+ trie.Commit(collectLeaf)
}
func TestTinyTrie(t *testing.T) {
@@ -618,19 +619,19 @@ func TestTinyTrie(t *testing.T) {
func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash
addresses, accounts := makeAccounts(1000)
- trie := newEmpty()
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Insert the accounts into the trie and hash it
trie.Hash()
- trie.Commit(nil)
+ trie.Commit(false)
root := trie.Hash()
exp := common.HexToHash("72f9d3f3fe1e1dd7b8936442e7642aef76371472d94319900790053c493f3fe6")
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
- root, _, _ = trie.Commit(nil)
+ root, _, _ = trie.Commit(false)
if exp != root {
t.Errorf("got %x, exp %x", root, exp)
}
@@ -739,7 +740,8 @@ func TestCommitSequence(t *testing.T) {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
- root, _, _ := trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
db.Commit(root, false, func(c common.Hash) {
// And spongify the callback-order
@@ -791,7 +793,8 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
trie.Update(key, val)
}
// Flush trie -> database
- root, _, _ := trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
db.Commit(root, false, func(c common.Hash) {
// And spongify the callback-order
@@ -816,8 +819,8 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
stTrie := NewStackTrie(stackTrieSponge)
- // Fill the trie with elements
- for i := 1; i < count; i++ {
+ // Fill the trie with elements, should start 0, otherwise nodes will be nil in the first time.
+ for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order
key := make([]byte, 32)
binary.BigEndian.PutUint64(key, uint64(i))
@@ -833,7 +836,8 @@ func TestCommitSequenceStackTrie(t *testing.T) {
stTrie.TryUpdate(key, val)
}
// Flush trie -> database
- root, _, _ := trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
db.Commit(root, false, nil)
// And flush stacktrie -> disk
@@ -878,7 +882,8 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie.TryUpdate(key, []byte{0x1})
stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database
- root, _, _ := trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
db.Commit(root, false, nil)
// And flush stacktrie -> disk
@@ -998,7 +1003,7 @@ func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accou
// Insert the accounts into the trie and hash it
trie.Hash()
b.StartTimer()
- trie.Commit(nil)
+ trie.Commit(false)
b.StopTimer()
}
@@ -1043,12 +1048,14 @@ func BenchmarkDerefRootFixedSize(b *testing.B) {
func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- trie := newEmpty()
+ triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(triedb)
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
- trie.Commit(nil)
+ _, nodes, _ := trie.Commit(false)
+ triedb.Update(NewWithNodeSet(nodes))
b.StartTimer()
trie.db.Dereference(h)
b.StopTimer()
diff --git a/trie/utils_test.go b/trie/utils_test.go
index 589eca6242..ffae9ffad7 100644
--- a/trie/utils_test.go
+++ b/trie/utils_test.go
@@ -19,12 +19,14 @@ package trie
import (
"testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
)
// Tests if the trie diffs are tracked correctly.
func TestTrieTracer(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
trie.tracer = newTracer()
// Insert a batch of entries, all the nodes should be marked as inserted
@@ -66,7 +68,10 @@ func TestTrieTracer(t *testing.T) {
}
// Commit the changes
- trie.Commit(nil)
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+ trie, _ = New(common.Hash{}, root, db)
+ trie.tracer = newTracer()
// Delete all the elements, check deletion set
for _, val := range vals {
From df5fc51955095abd99619490586887d9b450e803 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Tue, 10 Sep 2024 14:14:47 +0700
Subject: [PATCH 05/41] all: rework genesis api (#567)
* core: store genesis allocation and recommit them if necessary (#24460)
* core: store genesis allocation and recommit them if necessary
* core: recover predefined genesis allocation if possible
* all: cleanup the APIs for initializing genesis (#25473)
* all: polish tests
* core: apply feedback from Guillaume
* core: fix comment
---------
Co-authored-by: rjl493456442
---
cmd/devp2p/internal/ethtest/chain.go | 2 +-
cmd/evm/runner.go | 2 +-
cmd/faucet/faucet.go | 2 +-
consensus/clique/snapshot_test.go | 4 +-
core/blockchain.go | 13 +++
core/chain_makers.go | 2 +-
core/genesis.go | 133 ++++++++++++++++++------
core/genesis_test.go | 38 +++++--
core/rawdb/accessors_metadata.go | 14 +++
core/rawdb/database.go | 2 +
core/rawdb/schema.go | 10 +-
eth/catalyst/api_test.go | 5 +-
eth/downloader/queue_test.go | 14 +--
eth/downloader/testchain_test.go | 7 +-
eth/fetcher/block_fetcher_test.go | 12 ++-
eth/filters/filter_test.go | 18 +++-
eth/gasprice/gasprice_test.go | 8 +-
ethclient/ethclient_test.go | 2 +-
ethclient/gethclient/gethclient_test.go | 2 +-
les/downloader/queue_test.go | 14 +--
les/downloader/testchain_test.go | 7 +-
les/fetcher/block_fetcher_test.go | 13 ++-
les/peer_test.go | 3 +-
tests/state_test.go | 3 +-
tests/state_test_util.go | 2 +-
25 files changed, 242 insertions(+), 90 deletions(-)
diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go
index 7dcb412b53..d75d8d0a45 100644
--- a/cmd/devp2p/internal/ethtest/chain.go
+++ b/cmd/devp2p/internal/ethtest/chain.go
@@ -133,7 +133,7 @@ func loadChain(chainfile string, genesis string) (*Chain, error) {
if err != nil {
return nil, err
}
- gblock := gen.ToBlock(nil)
+ gblock := gen.ToBlock()
blocks, err := blocksFromFile(chainfile, gblock)
if err != nil {
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 9ee248f0f7..be9d37b031 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -138,7 +138,7 @@ func runCmd(ctx *cli.Context) error {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
- genesis := gen.ToBlock(db)
+ genesis := gen.MustCommit(db)
statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
chainConfig = gen.Config
} else {
diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go
index 5ec4960080..4ae98ed605 100644
--- a/cmd/faucet/faucet.go
+++ b/cmd/faucet/faucet.go
@@ -251,7 +251,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network ui
cfg.SyncMode = downloader.LightSync
cfg.NetworkId = network
cfg.Genesis = genesis
- utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash())
+ utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock().Hash())
lesBackend, err := les.New(stack, &cfg)
if err != nil {
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index 72e719a3f6..3a944d67cc 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -403,7 +403,7 @@ func TestClique(t *testing.T) {
}
// Create a pristine blockchain with the genesis injected
db := rawdb.NewMemoryDatabase()
- genesis.Commit(db)
+ genesisBlock := genesis.MustCommit(db)
// Assemble a chain of headers from the cast votes
config := *params.TestChainConfig
@@ -414,7 +414,7 @@ func TestClique(t *testing.T) {
engine := New(config.Clique, db)
engine.fakeDiff = true
- blocks, _ := core.GenerateChain(&config, genesis.ToBlock(db), engine, db, len(tt.votes), func(j int, gen *core.BlockGen) {
+ blocks, _ := core.GenerateChain(&config, genesisBlock, engine, db, len(tt.votes), func(j int, gen *core.BlockGen) {
// Cast the vote contained in this block
gen.SetCoinbase(accounts.address(tt.votes[j].voted))
if tt.votes[j].auth {
diff --git a/core/blockchain.go b/core/blockchain.go
index 0f02aece1e..86fb5764bc 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -690,6 +690,19 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
}
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
+ if newHeadBlock.NumberU64() == 0 {
+ // Recommit the genesis state into disk in case the rewinding destination
+ // is genesis block and the relevant state is gone. In the future this
+ // rewinding destination can be the earliest block stored in the chain
+ // if the historical chain pruning is enabled. In that case the logic
+ // needs to be improved here.
+ if !bc.HasState(bc.genesisBlock.Root()) {
+ if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
+ log.Crit("Failed to commit genesis state", "err", err)
+ }
+ log.Debug("Recommitted genesis state to disk")
+ }
+ }
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 1bb2079f04..9e0f63fb14 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -366,7 +366,7 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int,
if err != nil {
panic(err)
}
- blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(db), engine, db, n, gen, true)
+ blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen, true)
return db, blocks, receipts
}
diff --git a/core/genesis.go b/core/genesis.go
index 11cc039d38..461ba2e211 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -80,6 +80,96 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil
}
+// deriveHash computes the state root according to the genesis specification.
+func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
+ // Create an ephemeral in-memory database for computing hash,
+ // all the derived states will be discarded to not pollute disk.
+ db := state.NewDatabase(rawdb.NewMemoryDatabase())
+ statedb, err := state.New(common.Hash{}, db, nil)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ for addr, account := range *ga {
+ statedb.AddBalance(addr, account.Balance)
+ statedb.SetCode(addr, account.Code)
+ statedb.SetNonce(addr, account.Nonce)
+ for key, value := range account.Storage {
+ statedb.SetState(addr, key, value)
+ }
+ }
+ return statedb.Commit(false)
+}
+
+// flush is very similar with deriveHash, but the main difference is
+// all the generated states will be persisted into the given database.
+// Also, the genesis state specification will be flushed as well.
+func (ga *GenesisAlloc) flush(db ethdb.Database) error {
+ statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
+ if err != nil {
+ return err
+ }
+ for addr, account := range *ga {
+ statedb.AddBalance(addr, account.Balance)
+ statedb.SetCode(addr, account.Code)
+ statedb.SetNonce(addr, account.Nonce)
+ for key, value := range account.Storage {
+ statedb.SetState(addr, key, value)
+ }
+ }
+ root, err := statedb.Commit(false)
+ if err != nil {
+ return err
+ }
+ err = statedb.Database().TrieDB().Commit(root, true, nil)
+ if err != nil {
+ return err
+ }
+ // Marshal the genesis state specification and persist.
+ blob, err := json.Marshal(ga)
+ if err != nil {
+ return err
+ }
+ rawdb.WriteGenesisStateSpec(db, root, blob)
+ return nil
+}
+
+// CommitGenesisState loads the stored genesis state with the given block
+// hash and commits them into the given database handler.
+func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
+ var alloc GenesisAlloc
+ blob := rawdb.ReadGenesisStateSpec(db, hash)
+ if len(blob) != 0 {
+ if err := alloc.UnmarshalJSON(blob); err != nil {
+ return err
+ }
+ } else {
+ // Genesis allocation is missing and there are several possibilities:
+ // the node is legacy which doesn't persist the genesis allocation or
+ // the persisted allocation is just lost.
+ // - supported networks(mainnet, testnets), recover with defined allocations
+ // - private network, can't recover
+ var genesis *Genesis
+ switch hash {
+ case params.MainnetGenesisHash:
+ genesis = DefaultGenesisBlock()
+ case params.RopstenGenesisHash:
+ genesis = DefaultRopstenGenesisBlock()
+ case params.RinkebyGenesisHash:
+ genesis = DefaultRinkebyGenesisBlock()
+ case params.GoerliGenesisHash:
+ genesis = DefaultGoerliGenesisBlock()
+ case params.SepoliaGenesisHash:
+ genesis = DefaultSepoliaGenesisBlock()
+ }
+ if genesis != nil {
+ alloc = genesis.Alloc
+ } else {
+ return errors.New("not found")
+ }
+ }
+ return alloc.flush(db)
+}
+
// GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct {
Code []byte `json:"code,omitempty"`
@@ -185,7 +275,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
genesis = DefaultGenesisBlock()
}
// Ensure the stored genesis matches with the given one.
- hash := genesis.ToBlock(nil).Hash()
+ hash := genesis.ToBlock().Hash()
if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
}
@@ -197,7 +287,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
}
// Check whether the genesis block is already written.
if genesis != nil {
- hash := genesis.ToBlock(nil).Hash()
+ hash := genesis.ToBlock().Hash()
if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
}
@@ -262,25 +352,12 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
}
}
-// ToBlock creates the genesis block and writes state of a genesis specification
-// to the given database (or discards it if nil).
-func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
- if db == nil {
- db = rawdb.NewMemoryDatabase()
- }
- statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
+// ToBlock returns the genesis block according to genesis specification.
+func (g *Genesis) ToBlock() *types.Block {
+ root, err := g.Alloc.deriveHash()
if err != nil {
panic(err)
}
- for addr, account := range g.Alloc {
- statedb.AddBalance(addr, account.Balance)
- statedb.SetCode(addr, account.Code)
- statedb.SetNonce(addr, account.Nonce)
- for key, value := range account.Storage {
- statedb.SetState(addr, key, value)
- }
- }
- root := statedb.IntermediateRoot(false)
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
@@ -308,16 +385,13 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
}
}
- statedb.Commit(false)
- statedb.Database().TrieDB().Commit(root, true, nil)
-
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
}
// Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block.
func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
- block := g.ToBlock(db)
+ block := g.ToBlock()
if block.Number().Sign() != 0 {
return nil, errors.New("can't commit genesis block with number > 0")
}
@@ -331,6 +405,12 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) == 0 {
return nil, errors.New("can't start clique chain without signers")
}
+ // All the checks has passed, flush the states derived from the genesis
+ // specification as well as the specification itself into the provided
+ // database.
+ if err := g.Alloc.flush(db); err != nil {
+ return nil, err
+ }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
@@ -352,15 +432,6 @@ func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
return block
}
-// GenesisBlockForTesting creates and writes a block in which addr has the given wei balance.
-func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int) *types.Block {
- g := Genesis{
- Alloc: GenesisAlloc{addr: {Balance: balance}},
- BaseFee: big.NewInt(params.InitialBaseFee),
- }
- return g.MustCommit(db)
-}
-
// DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis {
return &Genesis{
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 078f22ca30..8399c377b2 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -178,7 +178,7 @@ func TestGenesisHashes(t *testing.T) {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
}
// Test via ToBlock
- if have := c.genesis.ToBlock(nil).Hash(); have != c.want {
+ if have := c.genesis.ToBlock().Hash(); have != c.want {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
}
}
@@ -192,11 +192,7 @@ func TestGenesis_Commit(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- genesisBlock, err := genesis.Commit(db)
- if err != nil {
- t.Fatal(err)
- }
-
+ genesisBlock := genesis.MustCommit(db)
if genesis.Difficulty != nil {
t.Fatalf("assumption wrong")
}
@@ -213,3 +209,33 @@ func TestGenesis_Commit(t *testing.T) {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
}
}
+
+func TestReadWriteGenesisAlloc(t *testing.T) {
+ var (
+ db = rawdb.NewMemoryDatabase()
+ alloc = &GenesisAlloc{
+ {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
+ {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
+ }
+ hash, _ = alloc.deriveHash()
+ )
+ alloc.flush(db)
+
+ var reload GenesisAlloc
+ err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
+ if err != nil {
+ t.Fatalf("Failed to load genesis state %v", err)
+ }
+ if len(reload) != len(*alloc) {
+ t.Fatal("Unexpected genesis allocation")
+ }
+ for addr, account := range reload {
+ want, ok := (*alloc)[addr]
+ if !ok {
+ t.Fatal("Account is not found")
+ }
+ if !reflect.DeepEqual(want, account) {
+ t.Fatal("Unexpected account")
+ }
+ }
+}
diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go
index cd85a0a8a9..b2dadea145 100644
--- a/core/rawdb/accessors_metadata.go
+++ b/core/rawdb/accessors_metadata.go
@@ -81,6 +81,20 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
}
}
+// ReadGenesisStateSpec retrieves the genesis state specification based on the
+// given genesis hash.
+func ReadGenesisStateSpec(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, _ := db.Get(genesisStateSpecKey(hash))
+ return data
+}
+
+// WriteGenesisStateSpec writes the genesis state specification into the disk.
+func WriteGenesisStateSpec(db ethdb.KeyValueWriter, hash common.Hash, data []byte) {
+ if err := db.Put(genesisStateSpecKey(hash), data); err != nil {
+ log.Crit("Failed to store genesis state", "err", err)
+ }
+}
+
// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
// database
type crashList struct {
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index ab2b3d99ee..3c31ef6d39 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -460,6 +460,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size)
+ case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
+ metadata.Add(size)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBits.Add(size)
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 16478b6923..6c9544ad95 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -103,8 +103,9 @@ var (
internalTxsPrefix = []byte("itxs") // internalTxsPrefix + block hash -> internal transactions
dirtyAccountsKey = []byte("dacc") // dirtyAccountsPrefix + block hash -> dirty accounts
- PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
- configPrefix = []byte("ethereum-config-") // config prefix for the db
+ PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
+ configPrefix = []byte("ethereum-config-") // config prefix for the db
+ genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
@@ -254,6 +255,11 @@ func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...)
}
+// genesisStateSpecKey = genesisPrefix + hash
+func genesisStateSpecKey(hash common.Hash) []byte {
+ return append(genesisPrefix, hash.Bytes()...)
+}
+
func snapshotConsortiumKey(hash common.Hash) []byte {
return append(snapshotConsortiumPrefix, hash.Bytes()...)
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index b09b3af1db..bc66329217 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -55,8 +55,9 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
g.OffsetTime(5)
g.SetExtra([]byte("test"))
}
- gblock := genesis.ToBlock(db)
+ gblock := genesis.ToBlock()
engine := ethash.NewFaker()
+ genesis.MustCommit(db)
blocks, _ := core.GenerateChain(config, gblock, engine, db, 10, generate, true)
blocks = append([]*types.Block{gblock}, blocks...)
return genesis, blocks
@@ -100,7 +101,7 @@ func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block,
g.OffsetTime(5)
g.SetExtra([]byte("testF"))
}
- gblock := genesis.ToBlock(db)
+ gblock := genesis.MustCommit(db)
engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(config, gblock, engine, db, n, generate)
blocks = append([]*types.Block{gblock}, blocks...)
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index 481bfe0431..7d3e9bb788 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -27,23 +27,17 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
-var (
- testdb = rawdb.NewMemoryDatabase()
- genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000))
-)
-
// makeChain creates a chain of n blocks starting at and including parent.
// the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {
- blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
+ blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed})
// Add one tx to every secondblock
if !empty && i%2 == 0 {
@@ -69,10 +63,10 @@ var emptyChain *chainData
func init() {
// Create a chain of blocks to import
targetBlocks := 128
- blocks, _ := makeChain(targetBlocks, 0, genesis, false)
+ blocks, _ := makeChain(targetBlocks, 0, testGenesis, false)
chain = &chainData{blocks, 0}
- blocks, _ = makeChain(targetBlocks, 0, genesis, true)
+ blocks, _ = makeChain(targetBlocks, 0, testGenesis, true)
emptyChain = &chainData{blocks, 0}
}
@@ -261,7 +255,7 @@ func TestEmptyBlocks(t *testing.T) {
// some more advanced scenarios
func XTestDelivery(t *testing.T) {
// the outside network, holding blocks
- blo, rec := makeChain(128, 0, genesis, false)
+ blo, rec := makeChain(128, 0, testGenesis, false)
world := newNetwork()
world.receipts = rec
world.chain = blo
diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go
index 485bbdc54a..01ebca97a4 100644
--- a/eth/downloader/testchain_test.go
+++ b/eth/downloader/testchain_test.go
@@ -35,7 +35,12 @@ var (
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
testDB = rawdb.NewMemoryDatabase()
- testGenesis = core.GenesisBlockForTesting(testDB, testAddress, big.NewInt(1000000000000000))
+
+ testGspec = core.Genesis{
+ Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ testGenesis = testGspec.MustCommit(testDB)
)
// The common prefix of all test chains:
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index 29b3b03788..ebb025d9a5 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -35,10 +35,14 @@ import (
)
var (
- testdb = rawdb.NewMemoryDatabase()
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
- genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000))
+ testdb = rawdb.NewMemoryDatabase()
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
+ gspec = core.Genesis{
+ Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ genesis = gspec.MustCommit(testdb)
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
)
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index c451604a4e..e79b3dc9a0 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -56,10 +56,17 @@ func BenchmarkFilters(b *testing.B) {
addr2 = common.BytesToAddress([]byte("jeff"))
addr3 = common.BytesToAddress([]byte("ethereum"))
addr4 = common.BytesToAddress([]byte("random addresses please"))
+
+ gspec = core.Genesis{
+ Alloc: core.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ genesis = gspec.ToBlock()
)
defer db.Close()
- genesis := core.GenesisBlockForTesting(db, addr1, big.NewInt(1000000))
+ gspec.MustCommit(db)
+
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 100010, func(i int, gen *core.BlockGen) {
switch i {
case 2403:
@@ -116,10 +123,17 @@ func TestFilters(t *testing.T) {
hash2 = common.BytesToHash([]byte("topic2"))
hash3 = common.BytesToHash([]byte("topic3"))
hash4 = common.BytesToHash([]byte("topic4"))
+
+ gspec = core.Genesis{
+ Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ genesis = gspec.ToBlock()
)
defer db.Close()
- genesis := core.GenesisBlockForTesting(db, addr, big.NewInt(1000000))
+ gspec.MustCommit(db)
+
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {
switch i {
case 1:
diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go
index 7438321e2a..b23877f653 100644
--- a/eth/gasprice/gasprice_test.go
+++ b/eth/gasprice/gasprice_test.go
@@ -110,10 +110,8 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
config.ArrowGlacierBlock = londonBlock
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis, err := gspec.Commit(db)
- if err != nil {
- t.Fatal(err)
- }
+ genesis := gspec.MustCommit(db)
+
// Generate testing blocks
blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{1})
@@ -143,7 +141,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
}, true)
// Construct testing chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.Commit(diskdb)
+ gspec.MustCommit(diskdb)
chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, &config, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create local chain, %v", err)
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 4a24c47e7a..0a33a593e3 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -287,7 +287,7 @@ func generateTestChain() ([]*types.Block, [][]*types.BlobTxSidecar, [][]common.H
blobTxHashes = append(blobTxHashes, []common.Hash{})
}
}
- gblock := genesis.ToBlock(db)
+ gblock := genesis.MustCommit(db)
engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, 2, generate, true)
// add genesis blob/sidecars/txhash to the begining of the list
diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go
index 01837c1020..a1f0cf385b 100644
--- a/ethclient/gethclient/gethclient_test.go
+++ b/ethclient/gethclient/gethclient_test.go
@@ -82,7 +82,7 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
g.OffsetTime(5)
g.SetExtra([]byte("test"))
}
- gblock := genesis.ToBlock(db)
+ gblock := genesis.MustCommit(db)
engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(config, gblock, engine, db, 1, generate, true)
blocks = append([]*types.Block{gblock}, blocks...)
diff --git a/les/downloader/queue_test.go b/les/downloader/queue_test.go
index f78a914b83..75a3c80116 100644
--- a/les/downloader/queue_test.go
+++ b/les/downloader/queue_test.go
@@ -27,23 +27,17 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
)
-var (
- testdb = rawdb.NewMemoryDatabase()
- genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000))
-)
-
// makeChain creates a chain of n blocks starting at and including parent.
// the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {
- blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {
+ blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed})
// Add one tx to every secondblock
if !empty && i%2 == 0 {
@@ -69,10 +63,10 @@ var emptyChain *chainData
func init() {
// Create a chain of blocks to import
targetBlocks := 128
- blocks, _ := makeChain(targetBlocks, 0, genesis, false)
+ blocks, _ := makeChain(targetBlocks, 0, testGenesis, false)
chain = &chainData{blocks, 0}
- blocks, _ = makeChain(targetBlocks, 0, genesis, true)
+ blocks, _ = makeChain(targetBlocks, 0, testGenesis, true)
emptyChain = &chainData{blocks, 0}
}
@@ -261,7 +255,7 @@ func TestEmptyBlocks(t *testing.T) {
// some more advanced scenarios
func XTestDelivery(t *testing.T) {
// the outside network, holding blocks
- blo, rec := makeChain(128, 0, genesis, false)
+ blo, rec := makeChain(128, 0, testGenesis, false)
world := newNetwork()
world.receipts = rec
world.chain = blo
diff --git a/les/downloader/testchain_test.go b/les/downloader/testchain_test.go
index 485bbdc54a..359dc4bad4 100644
--- a/les/downloader/testchain_test.go
+++ b/les/downloader/testchain_test.go
@@ -35,7 +35,12 @@ var (
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
testDB = rawdb.NewMemoryDatabase()
- testGenesis = core.GenesisBlockForTesting(testDB, testAddress, big.NewInt(1000000000000000))
+
+ gspec = core.Genesis{
+ Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ testGenesis = gspec.MustCommit(testDB)
)
// The common prefix of all test chains:
diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go
index bb3bfe42f8..a3d320c126 100644
--- a/les/fetcher/block_fetcher_test.go
+++ b/les/fetcher/block_fetcher_test.go
@@ -35,10 +35,15 @@ import (
)
var (
- testdb = rawdb.NewMemoryDatabase()
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
- genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000))
+ testdb = rawdb.NewMemoryDatabase()
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
+
+ gspec = core.Genesis{
+ Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ }
+ genesis = gspec.MustCommit(testdb)
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
)
diff --git a/les/peer_test.go b/les/peer_test.go
index d6551ce6b6..b8a1482a04 100644
--- a/les/peer_test.go
+++ b/les/peer_test.go
@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -100,7 +99,7 @@ type fakeChain struct{}
func (f *fakeChain) Config() *params.ChainConfig { return params.MainnetChainConfig }
func (f *fakeChain) Genesis() *types.Block {
- return core.DefaultGenesisBlock().ToBlock(rawdb.NewMemoryDatabase())
+ return core.DefaultGenesisBlock().ToBlock()
}
func (f *fakeChain) CurrentHeader() *types.Header { return &types.Header{Number: big.NewInt(10000000)} }
diff --git a/tests/state_test.go b/tests/state_test.go
index 831542e720..16975cef31 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -20,10 +20,11 @@ import (
"bufio"
"bytes"
"fmt"
- "github.com/ethereum/go-ethereum/eth/tracers/logger"
"reflect"
"testing"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
+
"github.com/ethereum/go-ethereum/core/vm"
)
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 484d688d38..ec858d882d 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -182,7 +182,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
- block := t.genesis(config).ToBlock(nil)
+ block := t.genesis(config).ToBlock()
snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
var baseFee *big.Int
From 26b2889b7e42a720c22ed5e791da6e93329f197f Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Wed, 11 Sep 2024 15:21:32 +0700
Subject: [PATCH 06/41] core, eth: port snap sync changes (#564)
core, eth, les, trie: rework snap sync
Co-authored-by: rjl493456442
---
core/state/sync.go | 16 +-
core/state/sync_test.go | 482 ++++++++++++++++++++++----------
eth/downloader/statesync.go | 69 +++--
eth/protocols/snap/sort_test.go | 26 +-
eth/protocols/snap/sync.go | 87 +++---
les/downloader/statesync.go | 69 +++--
trie/committer.go | 1 +
trie/sync.go | 306 ++++++++++++--------
trie/sync_test.go | 344 +++++++++++++++--------
trie/trie.go | 8 +-
trie/trie_test.go | 1 -
11 files changed, 893 insertions(+), 516 deletions(-)
diff --git a/core/state/sync.go b/core/state/sync.go
index 734961d9c5..a69a10dd92 100644
--- a/core/state/sync.go
+++ b/core/state/sync.go
@@ -27,20 +27,20 @@ import (
)
// NewStateSync create a new state trie download scheduler.
-func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync {
+func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(keys [][]byte, leaf []byte) error) *trie.Sync {
// Register the storage slot callback if the external callback is specified.
- var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
+ var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
if onLeaf != nil {
- onSlot = func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error {
- return onLeaf(paths, leaf)
+ onSlot = func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error {
+ return onLeaf(keys, leaf)
}
}
// Register the account callback to connect the state trie and the storage
// trie belongs to the contract.
var syncer *trie.Sync
- onAccount := func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error {
+ onAccount := func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error {
if onLeaf != nil {
- if err := onLeaf(paths, leaf); err != nil {
+ if err := onLeaf(keys, leaf); err != nil {
return err
}
}
@@ -48,8 +48,8 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S
if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil {
return err
}
- syncer.AddSubTrie(obj.Root, hexpath, parent, onSlot)
- syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent)
+ syncer.AddSubTrie(obj.Root, path, parent, parentPath, onSlot)
+ syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath)
return nil
}
syncer = trie.NewSync(root, database, onAccount, bloom)
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index fafe21dccb..f03e0ac840 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -161,6 +161,14 @@ func TestIterativeStateSyncBatchedByPath(t *testing.T) {
testIterativeStateSync(t, 100, false, true)
}
+// stateElement represents the element in the state trie(bytecode or trie node).
+type stateElement struct {
+ path string
+ hash common.Hash
+ code common.Hash
+ syncPath trie.SyncPath
+}
+
func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
// Create a random state to copy
srcDb, srcRoot, srcAccounts := makeTestState()
@@ -173,54 +181,73 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
dstDb := rawdb.NewMemoryDatabase()
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
- nodes, paths, codes := sched.Missing(count)
var (
- hashQueue []common.Hash
- pathQueue []trie.SyncPath
+ nodeElements []stateElement
+ codeElements []stateElement
)
- if !bypath {
- hashQueue = append(append(hashQueue[:0], nodes...), codes...)
- } else {
- hashQueue = append(hashQueue[:0], codes...)
- pathQueue = append(pathQueue[:0], paths...)
+ paths, nodes, codes := sched.Missing(count)
+ for i := 0; i < len(paths); i++ {
+ nodeElements = append(nodeElements, stateElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(paths[i])),
+ })
}
- for len(hashQueue)+len(pathQueue) > 0 {
- results := make([]trie.SyncResult, len(hashQueue)+len(pathQueue))
- for i, hash := range hashQueue {
- data, err := srcDb.TrieDB().Node(hash)
- if err != nil {
- data, err = srcDb.ContractCode(common.Hash{}, hash)
- }
+ for i := 0; i < len(codes); i++ {
+ codeElements = append(codeElements, stateElement{
+ code: codes[i],
+ })
+ }
+ for len(nodeElements)+len(codeElements) > 0 {
+ var (
+ nodeResults = make([]trie.NodeSyncResult, len(nodeElements))
+ codeResults = make([]trie.CodeSyncResult, len(codeElements))
+ )
+ for i, element := range codeElements {
+ data, err := srcDb.ContractCode(common.Hash{}, element.code)
if err != nil {
- t.Fatalf("failed to retrieve node data for hash %x", hash)
+ t.Fatalf("failed to retrieve contract bytecode for hash %x", element.code)
}
- results[i] = trie.SyncResult{Hash: hash, Data: data}
+ codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data}
}
- for i, path := range pathQueue {
- if len(path) == 1 {
- data, _, err := srcTrie.TryGetNode(path[0])
- if err != nil {
- t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
+ for i, node := range nodeElements {
+ if bypath {
+ if len(node.syncPath) == 1 {
+ data, _, err := srcTrie.TryGetNode(node.syncPath[0])
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[0], err)
+ }
+ nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data}
+ } else {
+ var acc types.StateAccount
+ if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil {
+ t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err)
+ }
+ stTrie, err := trie.New(common.BytesToHash(node.syncPath[0]), acc.Root, srcDb.TrieDB())
+ if err != nil {
+ t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
+ }
+ data, _, err := stTrie.TryGetNode(node.syncPath[1])
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for path %x: %v", node.syncPath[1], err)
+ }
+ nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data}
}
- results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
} else {
- var acc types.StateAccount
- if err := rlp.DecodeBytes(srcTrie.Get(path[0]), &acc); err != nil {
- t.Fatalf("failed to decode account on path %x: %v", path, err)
- }
- stTrie, err := trie.New(common.BytesToHash(path[0]), acc.Root, srcDb.TrieDB())
+ data, err := srcDb.TrieDB().Node(node.hash)
if err != nil {
- t.Fatalf("failed to retriev storage trie for path %x: %v", path, err)
+ t.Fatalf("failed to retrieve node data for key %v", []byte(node.path))
}
- data, _, err := stTrie.TryGetNode(path[1])
- if err != nil {
- t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
- }
- results[len(hashQueue)+i] = trie.SyncResult{Hash: crypto.Keccak256Hash(data), Data: data}
+ nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data}
}
}
- for _, result := range results {
- if err := sched.Process(result); err != nil {
+ for _, result := range codeResults {
+ if err := sched.ProcessCode(result); err != nil {
+ t.Errorf("failed to process result %v", err)
+ }
+ }
+ for _, result := range nodeResults {
+ if err := sched.ProcessNode(result); err != nil {
t.Errorf("failed to process result %v", err)
}
}
@@ -230,12 +257,20 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
}
batch.Write()
- nodes, paths, codes = sched.Missing(count)
- if !bypath {
- hashQueue = append(append(hashQueue[:0], nodes...), codes...)
- } else {
- hashQueue = append(hashQueue[:0], codes...)
- pathQueue = append(pathQueue[:0], paths...)
+ paths, nodes, codes = sched.Missing(count)
+ nodeElements = nodeElements[:0]
+ for i := 0; i < len(paths); i++ {
+ nodeElements = append(nodeElements, stateElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(paths[i])),
+ })
+ }
+ codeElements = codeElements[:0]
+ for i := 0; i < len(codes); i++ {
+ codeElements = append(codeElements, stateElement{
+ code: codes[i],
+ })
}
}
// Cross check that the two states are in sync
@@ -252,26 +287,58 @@ func TestIterativeDelayedStateSync(t *testing.T) {
dstDb := rawdb.NewMemoryDatabase()
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
- nodes, _, codes := sched.Missing(0)
- queue := append(append([]common.Hash{}, nodes...), codes...)
-
- for len(queue) > 0 {
+ var (
+ nodeElements []stateElement
+ codeElements []stateElement
+ )
+ paths, nodes, codes := sched.Missing(0)
+ for i := 0; i < len(paths); i++ {
+ nodeElements = append(nodeElements, stateElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(paths[i])),
+ })
+ }
+ for i := 0; i < len(codes); i++ {
+ codeElements = append(codeElements, stateElement{
+ code: codes[i],
+ })
+ }
+ for len(nodeElements)+len(codeElements) > 0 {
// Sync only half of the scheduled nodes
- results := make([]trie.SyncResult, len(queue)/2+1)
- for i, hash := range queue[:len(results)] {
- data, err := srcDb.TrieDB().Node(hash)
- if err != nil {
- data, err = srcDb.ContractCode(common.Hash{}, hash)
+ var nodeProcessd int
+ var codeProcessd int
+ if len(codeElements) > 0 {
+ codeResults := make([]trie.CodeSyncResult, len(codeElements)/2+1)
+ for i, element := range codeElements[:len(codeResults)] {
+ data, err := srcDb.ContractCode(common.Hash{}, element.code)
+ if err != nil {
+ t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
+ }
+ codeResults[i] = trie.CodeSyncResult{Hash: element.code, Data: data}
}
- if err != nil {
- t.Fatalf("failed to retrieve node data for %x", hash)
+ for _, result := range codeResults {
+ if err := sched.ProcessCode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
- results[i] = trie.SyncResult{Hash: hash, Data: data}
+ codeProcessd = len(codeResults)
}
- for _, result := range results {
- if err := sched.Process(result); err != nil {
- t.Fatalf("failed to process result %v", err)
+ if len(nodeElements) > 0 {
+ nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1)
+ for i, element := range nodeElements[:len(nodeResults)] {
+ data, err := srcDb.TrieDB().Node(element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
+ }
+ nodeResults[i] = trie.NodeSyncResult{Path: element.path, Data: data}
+ }
+ for _, result := range nodeResults {
+ if err := sched.ProcessNode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
+ nodeProcessd = len(nodeResults)
}
batch := dstDb.NewBatch()
if err := sched.Commit(batch); err != nil {
@@ -279,8 +346,21 @@ func TestIterativeDelayedStateSync(t *testing.T) {
}
batch.Write()
- nodes, _, codes = sched.Missing(0)
- queue = append(append(queue[len(results):], nodes...), codes...)
+ paths, nodes, codes = sched.Missing(0)
+ nodeElements = nodeElements[nodeProcessd:]
+ for i := 0; i < len(paths); i++ {
+ nodeElements = append(nodeElements, stateElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(paths[i])),
+ })
+ }
+ codeElements = codeElements[codeProcessd:]
+ for i := 0; i < len(codes); i++ {
+ codeElements = append(codeElements, stateElement{
+ code: codes[i],
+ })
+ }
}
// Cross check that the two states are in sync
checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
@@ -300,40 +380,70 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
dstDb := rawdb.NewMemoryDatabase()
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
- queue := make(map[common.Hash]struct{})
- nodes, _, codes := sched.Missing(count)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ nodeQueue := make(map[string]stateElement)
+ codeQueue := make(map[common.Hash]struct{})
+ paths, nodes, codes := sched.Missing(count)
+ for i, path := range paths {
+ nodeQueue[path] = stateElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(path)),
+ }
}
- for len(queue) > 0 {
+ for _, hash := range codes {
+ codeQueue[hash] = struct{}{}
+ }
+ for len(nodeQueue)+len(codeQueue) > 0 {
// Fetch all the queued nodes in a random order
- results := make([]trie.SyncResult, 0, len(queue))
- for hash := range queue {
- data, err := srcDb.TrieDB().Node(hash)
- if err != nil {
- data, err = srcDb.ContractCode(common.Hash{}, hash)
+ if len(codeQueue) > 0 {
+ results := make([]trie.CodeSyncResult, 0, len(codeQueue))
+ for hash := range codeQueue {
+ data, err := srcDb.ContractCode(common.Hash{}, hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x", hash)
+ }
+ results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
}
- if err != nil {
- t.Fatalf("failed to retrieve node data for %x", hash)
+ for _, result := range results {
+ if err := sched.ProcessCode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
- results = append(results, trie.SyncResult{Hash: hash, Data: data})
}
- // Feed the retrieved results back and queue new tasks
- for _, result := range results {
- if err := sched.Process(result); err != nil {
- t.Fatalf("failed to process result %v", err)
+ if len(nodeQueue) > 0 {
+ results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
+ for path, element := range nodeQueue {
+ data, err := srcDb.TrieDB().Node(element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path)
+ }
+ results = append(results, trie.NodeSyncResult{Path: path, Data: data})
+ }
+ for _, result := range results {
+ if err := sched.ProcessNode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
}
+ // Feed the retrieved results back and queue new tasks
batch := dstDb.NewBatch()
if err := sched.Commit(batch); err != nil {
t.Fatalf("failed to commit data: %v", err)
}
batch.Write()
- queue = make(map[common.Hash]struct{})
- nodes, _, codes = sched.Missing(count)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ nodeQueue = make(map[string]stateElement)
+ codeQueue = make(map[common.Hash]struct{})
+ paths, nodes, codes := sched.Missing(count)
+ for i, path := range paths {
+ nodeQueue[path] = stateElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(path)),
+ }
+ }
+ for _, hash := range codes {
+ codeQueue[hash] = struct{}{}
}
}
// Cross check that the two states are in sync
@@ -350,34 +460,62 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
dstDb := rawdb.NewMemoryDatabase()
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
- queue := make(map[common.Hash]struct{})
- nodes, _, codes := sched.Missing(0)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ nodeQueue := make(map[string]stateElement)
+ codeQueue := make(map[common.Hash]struct{})
+ paths, nodes, codes := sched.Missing(0)
+ for i, path := range paths {
+ nodeQueue[path] = stateElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(path)),
+ }
+ }
+ for _, hash := range codes {
+ codeQueue[hash] = struct{}{}
}
- for len(queue) > 0 {
+ for len(nodeQueue)+len(codeQueue) > 0 {
// Sync only half of the scheduled nodes, even those in random order
- results := make([]trie.SyncResult, 0, len(queue)/2+1)
- for hash := range queue {
- delete(queue, hash)
+ if len(codeQueue) > 0 {
+ results := make([]trie.CodeSyncResult, 0, len(codeQueue)/2+1)
+ for hash := range codeQueue {
+ delete(codeQueue, hash)
- data, err := srcDb.TrieDB().Node(hash)
- if err != nil {
- data, err = srcDb.ContractCode(common.Hash{}, hash)
+ data, err := srcDb.ContractCode(common.Hash{}, hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x", hash)
+ }
+ results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
+
+ if len(results) >= cap(results) {
+ break
+ }
}
- if err != nil {
- t.Fatalf("failed to retrieve node data for %x", hash)
+ for _, result := range results {
+ if err := sched.ProcessCode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
- results = append(results, trie.SyncResult{Hash: hash, Data: data})
+ }
+ if len(nodeQueue) > 0 {
+ results := make([]trie.NodeSyncResult, 0, len(nodeQueue)/2+1)
+ for path, element := range nodeQueue {
+ delete(nodeQueue, path)
- if len(results) >= cap(results) {
- break
+ data, err := srcDb.TrieDB().Node(element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x", element.hash)
+ }
+ results = append(results, trie.NodeSyncResult{Path: path, Data: data})
+
+ if len(results) >= cap(results) {
+ break
+ }
}
- }
- // Feed the retrieved results back and queue new tasks
- for _, result := range results {
- if err := sched.Process(result); err != nil {
- t.Fatalf("failed to process result %v", err)
+ // Feed the retrieved results back and queue new tasks
+ for _, result := range results {
+ if err := sched.ProcessNode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
}
batch := dstDb.NewBatch()
@@ -385,12 +523,17 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
t.Fatalf("failed to commit data: %v", err)
}
batch.Write()
- for _, result := range results {
- delete(queue, result.Hash)
+
+ paths, nodes, codes := sched.Missing(0)
+ for i, path := range paths {
+ nodeQueue[path] = stateElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(path)),
+ }
}
- nodes, _, codes = sched.Missing(0)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ for _, hash := range codes {
+ codeQueue[hash] = struct{}{}
}
}
// Cross check that the two states are in sync
@@ -417,28 +560,62 @@ func TestIncompleteStateSync(t *testing.T) {
dstDb := rawdb.NewMemoryDatabase()
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
- var added []common.Hash
-
- nodes, _, codes := sched.Missing(1)
- queue := append(append([]common.Hash{}, nodes...), codes...)
-
- for len(queue) > 0 {
+ var (
+ addedCodes []common.Hash
+ addedNodes []common.Hash
+ )
+ nodeQueue := make(map[string]stateElement)
+ codeQueue := make(map[common.Hash]struct{})
+ paths, nodes, codes := sched.Missing(1)
+ for i, path := range paths {
+ nodeQueue[path] = stateElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(path)),
+ }
+ }
+ for _, hash := range codes {
+ codeQueue[hash] = struct{}{}
+ }
+ for len(nodeQueue)+len(codeQueue) > 0 {
// Fetch a batch of state nodes
- results := make([]trie.SyncResult, len(queue))
- for i, hash := range queue {
- data, err := srcDb.TrieDB().Node(hash)
- if err != nil {
- data, err = srcDb.ContractCode(common.Hash{}, hash)
+ if len(codeQueue) > 0 {
+ results := make([]trie.CodeSyncResult, 0, len(codeQueue))
+ for hash := range codeQueue {
+ data, err := srcDb.ContractCode(common.Hash{}, hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x", hash)
+ }
+ results = append(results, trie.CodeSyncResult{Hash: hash, Data: data})
+ addedCodes = append(addedCodes, hash)
}
- if err != nil {
- t.Fatalf("failed to retrieve node data for %x", hash)
+ // Process each of the state nodes
+ for _, result := range results {
+ if err := sched.ProcessCode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
- results[i] = trie.SyncResult{Hash: hash, Data: data}
}
- // Process each of the state nodes
- for _, result := range results {
- if err := sched.Process(result); err != nil {
- t.Fatalf("failed to process result %v", err)
+ var nodehashes []common.Hash
+ if len(nodeQueue) > 0 {
+ results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
+ for key, element := range nodeQueue {
+ data, err := srcDb.TrieDB().Node(element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for %x", element.hash)
+ }
+ results = append(results, trie.NodeSyncResult{Path: key, Data: data})
+
+ if element.hash != srcRoot {
+ addedNodes = append(addedNodes, element.hash)
+ }
+ nodehashes = append(nodehashes, element.hash)
+ }
+ // Process each of the state nodes
+ for _, result := range results {
+ if err := sched.ProcessNode(result); err != nil {
+ t.Fatalf("failed to process result %v", err)
+ }
}
}
batch := dstDb.NewBatch()
@@ -446,43 +623,44 @@ func TestIncompleteStateSync(t *testing.T) {
t.Fatalf("failed to commit data: %v", err)
}
batch.Write()
- for _, result := range results {
- added = append(added, result.Hash)
- // Check that all known sub-tries added so far are complete or missing entirely.
- if _, ok := isCode[result.Hash]; ok {
- continue
- }
+
+ for _, root := range nodehashes {
// Can't use checkStateConsistency here because subtrie keys may have odd
// length and crash in LeafKey.
- if err := checkTrieConsistency(dstDb, result.Hash); err != nil {
+ if err := checkTrieConsistency(dstDb, root); err != nil {
t.Fatalf("state inconsistent: %v", err)
}
}
// Fetch the next batch to retrieve
- nodes, _, codes = sched.Missing(1)
- queue = append(append(queue[:0], nodes...), codes...)
+ nodeQueue = make(map[string]stateElement)
+ codeQueue = make(map[common.Hash]struct{})
+ paths, nodes, codes := sched.Missing(1)
+ for i, path := range paths {
+ nodeQueue[path] = stateElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: trie.NewSyncPath([]byte(path)),
+ }
+ }
+ for _, hash := range codes {
+ codeQueue[hash] = struct{}{}
+ }
}
// Sanity check that removing any node from the database is detected
- for _, node := range added[1:] {
- var (
- key = node.Bytes()
- _, code = isCode[node]
- val []byte
- )
- if code {
- val = rawdb.ReadCode(dstDb, node)
- rawdb.DeleteCode(dstDb, node)
- } else {
- val = rawdb.ReadTrieNode(dstDb, node)
- rawdb.DeleteTrieNode(dstDb, node)
+ for _, node := range addedCodes {
+ val := rawdb.ReadCode(dstDb, node)
+ rawdb.DeleteCode(dstDb, node)
+ if err := checkStateConsistency(dstDb, srcRoot); err == nil {
+ t.Errorf("trie inconsistency not caught, missing: %x", node)
}
- if err := checkStateConsistency(dstDb, added[0]); err == nil {
- t.Fatalf("trie inconsistency not caught, missing: %x", key)
- }
- if code {
- rawdb.WriteCode(dstDb, node, val)
- } else {
- rawdb.WriteTrieNode(dstDb, node, val)
+ rawdb.WriteCode(dstDb, node, val)
+ }
+ for _, node := range addedNodes {
+ val := rawdb.ReadTrieNode(dstDb, node)
+ rawdb.DeleteTrieNode(dstDb, node)
+ if err := checkStateConsistency(dstDb, srcRoot); err == nil {
+ t.Errorf("trie inconsistency not caught, missing: %v", node.Hex())
}
+ rawdb.WriteTrieNode(dstDb, node, val)
}
}
diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go
index 6c53e5577a..696089eaba 100644
--- a/eth/downloader/statesync.go
+++ b/eth/downloader/statesync.go
@@ -35,7 +35,7 @@ import (
// a single data retrieval network packet.
type stateReq struct {
nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
- trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts
+ trieTasks map[string]*trieTask // Trie node download tasks to track previous attempts
codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
timeout time.Duration // Maximum round trip time for this to complete
timer *time.Timer // Timer to fire when the RTT timeout expires
@@ -264,7 +264,7 @@ type stateSync struct {
sched *trie.Sync // State trie sync scheduler defining the tasks
keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
- trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval
+ trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval
codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval
numUncommitted int
@@ -282,6 +282,7 @@ type stateSync struct {
// trieTask represents a single trie node download task, containing a set of
// peers already attempted retrieval from to detect stalled syncs and abort.
type trieTask struct {
+ hash common.Hash
path [][]byte
attempts map[string]struct{}
}
@@ -300,7 +301,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
root: root,
sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
- trieTasks: make(map[common.Hash]*trieTask),
+ trieTasks: make(map[string]*trieTask),
codeTasks: make(map[common.Hash]*codeTask),
deliver: make(chan *stateReq),
cancel: make(chan struct{}),
@@ -456,10 +457,11 @@ func (s *stateSync) assignTasks() {
func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {
// Refill available tasks from the scheduler.
if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {
- nodes, paths, codes := s.sched.Missing(fill)
- for i, hash := range nodes {
- s.trieTasks[hash] = &trieTask{
- path: paths[i],
+ paths, hashes, codes := s.sched.Missing(fill)
+ for i, path := range paths {
+ s.trieTasks[path] = &trieTask{
+ hash: hashes[i],
+ path: trie.NewSyncPath([]byte(path)),
attempts: make(map[string]struct{}),
}
}
@@ -475,7 +477,7 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths
paths = make([]trie.SyncPath, 0, n)
codes = make([]common.Hash, 0, n)
- req.trieTasks = make(map[common.Hash]*trieTask, n)
+ req.trieTasks = make(map[string]*trieTask, n)
req.codeTasks = make(map[common.Hash]*codeTask, n)
for hash, t := range s.codeTasks {
@@ -493,7 +495,7 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths
req.codeTasks[hash] = t
delete(s.codeTasks, hash)
}
- for hash, t := range s.trieTasks {
+ for path, t := range s.trieTasks {
// Stop when we've gathered enough requests
if len(nodes)+len(codes) == n {
break
@@ -505,11 +507,11 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths
// Assign the request to this peer
t.attempts[req.peer.id] = struct{}{}
- nodes = append(nodes, hash)
+ nodes = append(nodes, t.hash)
paths = append(paths, t.path)
- req.trieTasks[hash] = t
- delete(s.trieTasks, hash)
+ req.trieTasks[path] = t
+ delete(s.trieTasks, path)
}
req.nItems = uint16(len(nodes) + len(codes))
return nodes, paths, codes
@@ -531,7 +533,7 @@ func (s *stateSync) process(req *stateReq) (int, error) {
// Iterate over all the delivered data and inject one-by-one into the trie
for _, blob := range req.response {
- hash, err := s.processNodeData(blob)
+ hash, err := s.processNodeData(req.trieTasks, req.codeTasks, blob)
switch err {
case nil:
s.numUncommitted++
@@ -544,13 +546,10 @@ func (s *stateSync) process(req *stateReq) (int, error) {
default:
return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
}
- // Delete from both queues (one delivery is enough for the syncer)
- delete(req.trieTasks, hash)
- delete(req.codeTasks, hash)
}
// Put unfulfilled tasks back into the retry queue
npeers := s.d.peers.Len()
- for hash, task := range req.trieTasks {
+ for path, task := range req.trieTasks {
// If the node did deliver something, missing items may be due to a protocol
// limit or a previous timeout + delayed delivery. Both cases should permit
// the node to retry the missing items (to avoid single-peer stalls).
@@ -560,10 +559,10 @@ func (s *stateSync) process(req *stateReq) (int, error) {
// If we've requested the node too many times already, it may be a malicious
// sync where nobody has the right data. Abort.
if len(task.attempts) >= npeers {
- return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
+ return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", task.hash.TerminalString(), len(task.attempts), npeers)
}
// Missing item, place into the retry queue.
- s.trieTasks[hash] = task
+ s.trieTasks[path] = task
}
for hash, task := range req.codeTasks {
// If the node did deliver something, missing items may be due to a protocol
@@ -586,13 +585,35 @@ func (s *stateSync) process(req *stateReq) (int, error) {
// processNodeData tries to inject a trie node data blob delivered from a remote
// peer into the state trie, returning whether anything useful was written or any
// error occurred.
-func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) {
- res := trie.SyncResult{Data: blob}
+//
+// If multiple requests correspond to the same hash, this method will inject the
+// blob as a result for the first one only, leaving the remaining duplicates to
+// be fetched again.
+func (s *stateSync) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, blob []byte) (common.Hash, error) {
+ var hash common.Hash
s.keccak.Reset()
s.keccak.Write(blob)
- s.keccak.Read(res.Hash[:])
- err := s.sched.Process(res)
- return res.Hash, err
+ s.keccak.Read(hash[:])
+
+ if _, present := codeTasks[hash]; present {
+ err := s.sched.ProcessCode(trie.CodeSyncResult{
+ Hash: hash,
+ Data: blob,
+ })
+ delete(codeTasks, hash)
+ return hash, err
+ }
+ for path, task := range nodeTasks {
+ if task.hash == hash {
+ err := s.sched.ProcessNode(trie.NodeSyncResult{
+ Path: path,
+ Data: blob,
+ })
+ delete(nodeTasks, path)
+ return hash, err
+ }
+ }
+ return common.Hash{}, trie.ErrNotRequested
}
// updateStats bumps the various state sync progress counters and displays a log
diff --git a/eth/protocols/snap/sort_test.go b/eth/protocols/snap/sort_test.go
index c625be09ea..be0a8c5706 100644
--- a/eth/protocols/snap/sort_test.go
+++ b/eth/protocols/snap/sort_test.go
@@ -22,7 +22,6 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/trie"
)
func hexToNibbles(s string) []byte {
@@ -38,22 +37,17 @@ func hexToNibbles(s string) []byte {
}
func TestRequestSorting(t *testing.T) {
-
// - Path 0x9 -> {0x19}
// - Path 0x99 -> {0x0099}
// - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19}
// - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099}
- var f = func(path string) (trie.SyncPath, TrieNodePathSet, common.Hash) {
+ var f = func(path string) string {
data := hexToNibbles(path)
- sp := trie.NewSyncPath(data)
- tnps := TrieNodePathSet([][]byte(sp))
- hash := common.Hash{}
- return sp, tnps, hash
+ return string(data)
}
var (
- hashes []common.Hash
- paths []trie.SyncPath
- pathsets []TrieNodePathSet
+ hashes []common.Hash
+ paths []string
)
for _, x := range []string{
"0x9",
@@ -67,16 +61,14 @@ func TestRequestSorting(t *testing.T) {
"0x01234567890123456789012345678901012345678901234567890123456789010",
"0x01234567890123456789012345678901012345678901234567890123456789011",
} {
- sp, tnps, hash := f(x)
- hashes = append(hashes, hash)
- paths = append(paths, sp)
- pathsets = append(pathsets, tnps)
+ paths = append(paths, f(x))
+ hashes = append(hashes, common.Hash{})
}
- _, paths, pathsets = sortByAccountPath(hashes, paths)
+ _, _, syncPaths, pathsets := sortByAccountPath(paths, hashes)
{
var b = new(bytes.Buffer)
- for i := 0; i < len(paths); i++ {
- fmt.Fprintf(b, "\n%d. paths %x", i, paths[i])
+ for i := 0; i < len(syncPaths); i++ {
+ fmt.Fprintf(b, "\n%d. paths %x", i, syncPaths[i])
}
want := `
0. paths [0099]
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 48a2c41a7c..a78ed079ce 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -255,8 +255,8 @@ type trienodeHealRequest struct {
timeout *time.Timer // Timer to track delivery timeout
stale chan struct{} // Channel to signal the request was dropped
- hashes []common.Hash // Trie node hashes to validate responses
- paths []trie.SyncPath // Trie node paths requested for rescheduling
+ paths []string // Trie node paths for identifying trie node
+ hashes []common.Hash // Trie node hashes to validate responses
task *healTask // Task which this request is filling (only access fields through the runloop!!)
}
@@ -265,9 +265,9 @@ type trienodeHealRequest struct {
type trienodeHealResponse struct {
task *healTask // Task which this request is filling
- hashes []common.Hash // Hashes of the trie nodes to avoid double hashing
- paths []trie.SyncPath // Trie node paths requested for rescheduling missing ones
- nodes [][]byte // Actual trie nodes to store into the database (nil = missing)
+ paths []string // Paths of the trie nodes
+ hashes []common.Hash // Hashes of the trie nodes to avoid double hashing
+ nodes [][]byte // Actual trie nodes to store into the database (nil = missing)
}
// bytecodeHealRequest tracks a pending bytecode request to ensure responses are to
@@ -346,8 +346,8 @@ type storageTask struct {
type healTask struct {
scheduler *trie.Sync // State trie sync scheduler defining the tasks
- trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval
- codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval
+ trieTasks map[string]common.Hash // Set of trie node tasks currently queued for retrieval, indexed by node path
+ codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash
}
// syncProgress is a database entry to allow suspending and resuming a snapshot state
@@ -564,7 +564,7 @@ func (s *Syncer) Unregister(id string) error {
return nil
}
-// Sync starts (or resumes a previous) sync cycle to iterate over an state trie
+// Sync starts (or resumes a previous) sync cycle to iterate over a state trie
// with the given root and reconstruct the nodes based on the snapshot leaves.
// Previously downloaded segments will not be redownloaded of fixed, rather any
// errors will be healed after the leaves are fully accumulated.
@@ -575,7 +575,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.root = root
s.healer = &healTask{
scheduler: state.NewStateSync(root, s.db, nil, s.onHealState),
- trieTasks: make(map[common.Hash]trie.SyncPath),
+ trieTasks: make(map[string]common.Hash),
codeTasks: make(map[common.Hash]struct{}),
}
s.statelessPeers = make(map[string]struct{})
@@ -749,7 +749,7 @@ func (s *Syncer) loadSyncStatus() {
return
}
}
- // Either we've failed to decode the previus state, or there was none.
+ // Either we've failed to decode the previous state, or there was none.
// Start a fresh sync by chunking up the account range and scheduling
// them for retrieval.
s.tasks = nil
@@ -1274,9 +1274,9 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai
want = maxTrieRequestCount + maxCodeRequestCount
)
if have < want {
- nodes, paths, codes := s.healer.scheduler.Missing(want - have)
- for i, hash := range nodes {
- s.healer.trieTasks[hash] = paths[i]
+ paths, hashes, codes := s.healer.scheduler.Missing(want - have)
+ for i, path := range paths {
+ s.healer.trieTasks[path] = hashes[i]
}
for _, hash := range codes {
s.healer.codeTasks[hash] = struct{}{}
@@ -1321,21 +1321,20 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai
}
var (
hashes = make([]common.Hash, 0, cap)
- paths = make([]trie.SyncPath, 0, cap)
+ paths = make([]string, 0, cap)
pathsets = make([]TrieNodePathSet, 0, cap)
)
- for hash, pathset := range s.healer.trieTasks {
- delete(s.healer.trieTasks, hash)
+ for path, hash := range s.healer.trieTasks {
+ delete(s.healer.trieTasks, path)
+ paths = append(paths, path)
hashes = append(hashes, hash)
- paths = append(paths, pathset)
-
- if len(hashes) >= cap {
+ if len(paths) >= cap {
break
}
}
// Group requests by account hash
- hashes, paths, pathsets = sortByAccountPath(hashes, paths)
+ paths, hashes, _, pathsets = sortByAccountPath(paths, hashes)
req := &trienodeHealRequest{
peer: idle,
id: reqid,
@@ -1344,8 +1343,8 @@ func (s *Syncer) assignTrienodeHealTasks(success chan *trienodeHealResponse, fai
revert: fail,
cancel: cancel,
stale: make(chan struct{}),
- hashes: hashes,
paths: paths,
+ hashes: hashes,
task: s.healer,
}
req.timeout = time.AfterFunc(s.rates.TargetTimeout(), func() {
@@ -1403,9 +1402,9 @@ func (s *Syncer) assignBytecodeHealTasks(success chan *bytecodeHealResponse, fai
want = maxTrieRequestCount + maxCodeRequestCount
)
if have < want {
- nodes, paths, codes := s.healer.scheduler.Missing(want - have)
- for i, hash := range nodes {
- s.healer.trieTasks[hash] = paths[i]
+ paths, hashes, codes := s.healer.scheduler.Missing(want - have)
+ for i, path := range paths {
+ s.healer.trieTasks[path] = hashes[i]
}
for _, hash := range codes {
s.healer.codeTasks[hash] = struct{}{}
@@ -1701,10 +1700,10 @@ func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {
s.lock.Unlock()
// If there's a timeout timer still running, abort it and mark the trie node
- // retrievals as not-pending, ready for resheduling
+ // retrievals as not-pending, ready for rescheduling
req.timeout.Stop()
- for i, hash := range req.hashes {
- req.task.trieTasks[hash] = req.paths[i]
+ for i, path := range req.paths {
+ req.task.trieTasks[path] = req.hashes[i]
}
}
@@ -2098,7 +2097,7 @@ func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
// If the trie node was not delivered, reschedule it
if node == nil {
- res.task.trieTasks[hash] = res.paths[i]
+ res.task.trieTasks[res.paths[i]] = res.hashes[i]
continue
}
fills++
@@ -2107,7 +2106,7 @@ func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {
s.trienodeHealSynced++
s.trienodeHealBytes += common.StorageSize(len(node))
- err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
+ err := s.healer.scheduler.ProcessNode(trie.NodeSyncResult{Path: res.paths[i], Data: node})
switch err {
case nil:
case trie.ErrAlreadyProcessed:
@@ -2187,7 +2186,7 @@ func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {
s.bytecodeHealSynced++
s.bytecodeHealBytes += common.StorageSize(len(node))
- err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})
+ err := s.healer.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node})
switch err {
case nil:
case trie.ErrAlreadyProcessed:
@@ -2738,9 +2737,9 @@ func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error
atomic.AddUint64(&s.trienodeHealPend, ^(fills - 1))
}()
response := &trienodeHealResponse{
+ paths: req.paths,
task: req.task,
hashes: req.hashes,
- paths: req.paths,
nodes: nodes,
}
select {
@@ -2986,8 +2985,9 @@ func (s *capacitySort) Swap(i, j int) {
// healRequestSort implements the Sort interface, allowing sorting trienode
// heal requests, which is a prerequisite for merging storage-requests.
type healRequestSort struct {
- hashes []common.Hash
- paths []trie.SyncPath
+ paths []string
+ hashes []common.Hash
+ syncPaths []trie.SyncPath
}
func (t *healRequestSort) Len() int {
@@ -2995,8 +2995,8 @@ func (t *healRequestSort) Len() int {
}
func (t *healRequestSort) Less(i, j int) bool {
- a := t.paths[i]
- b := t.paths[j]
+ a := t.syncPaths[i]
+ b := t.syncPaths[j]
switch bytes.Compare(a[0], b[0]) {
case -1:
return true
@@ -3017,8 +3017,9 @@ func (t *healRequestSort) Less(i, j int) bool {
}
func (t *healRequestSort) Swap(i, j int) {
- t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
t.paths[i], t.paths[j] = t.paths[j], t.paths[i]
+ t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
+ t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i]
}
// Merge merges the pathsets, so that several storage requests concerning the
@@ -3026,7 +3027,7 @@ func (t *healRequestSort) Swap(i, j int) {
// OBS: This operation is moot if t has not first been sorted.
func (t *healRequestSort) Merge() []TrieNodePathSet {
var result []TrieNodePathSet
- for _, path := range t.paths {
+ for _, path := range t.syncPaths {
pathset := TrieNodePathSet([][]byte(path))
if len(path) == 1 {
// It's an account reference.
@@ -3035,7 +3036,7 @@ func (t *healRequestSort) Merge() []TrieNodePathSet {
// It's a storage reference.
end := len(result) - 1
if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) {
- // The account doesn't doesn't match last, create a new entry.
+ // The account doesn't match last, create a new entry.
result = append(result, pathset)
} else {
// It's the same account as the previous one, add to the storage
@@ -3049,9 +3050,13 @@ func (t *healRequestSort) Merge() []TrieNodePathSet {
// sortByAccountPath takes hashes and paths, and sorts them. After that, it generates
// the TrieNodePaths and merges paths which belongs to the same account path.
-func sortByAccountPath(hashes []common.Hash, paths []trie.SyncPath) ([]common.Hash, []trie.SyncPath, []TrieNodePathSet) {
- n := &healRequestSort{hashes, paths}
+func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []TrieNodePathSet) {
+ var syncPaths []trie.SyncPath
+ for _, path := range paths {
+ syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path)))
+ }
+ n := &healRequestSort{paths, hashes, syncPaths}
sort.Sort(n)
pathsets := n.Merge()
- return n.hashes, n.paths, pathsets
+ return n.paths, n.hashes, n.syncPaths, pathsets
}
diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go
index 6c53e5577a..696089eaba 100644
--- a/les/downloader/statesync.go
+++ b/les/downloader/statesync.go
@@ -35,7 +35,7 @@ import (
// a single data retrieval network packet.
type stateReq struct {
nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
- trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts
+ trieTasks map[string]*trieTask // Trie node download tasks to track previous attempts
codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
timeout time.Duration // Maximum round trip time for this to complete
timer *time.Timer // Timer to fire when the RTT timeout expires
@@ -264,7 +264,7 @@ type stateSync struct {
sched *trie.Sync // State trie sync scheduler defining the tasks
keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
- trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval
+ trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval
codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval
numUncommitted int
@@ -282,6 +282,7 @@ type stateSync struct {
// trieTask represents a single trie node download task, containing a set of
// peers already attempted retrieval from to detect stalled syncs and abort.
type trieTask struct {
+ hash common.Hash
path [][]byte
attempts map[string]struct{}
}
@@ -300,7 +301,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync {
root: root,
sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
- trieTasks: make(map[common.Hash]*trieTask),
+ trieTasks: make(map[string]*trieTask),
codeTasks: make(map[common.Hash]*codeTask),
deliver: make(chan *stateReq),
cancel: make(chan struct{}),
@@ -456,10 +457,11 @@ func (s *stateSync) assignTasks() {
func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {
// Refill available tasks from the scheduler.
if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {
- nodes, paths, codes := s.sched.Missing(fill)
- for i, hash := range nodes {
- s.trieTasks[hash] = &trieTask{
- path: paths[i],
+ paths, hashes, codes := s.sched.Missing(fill)
+ for i, path := range paths {
+ s.trieTasks[path] = &trieTask{
+ hash: hashes[i],
+ path: trie.NewSyncPath([]byte(path)),
attempts: make(map[string]struct{}),
}
}
@@ -475,7 +477,7 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths
paths = make([]trie.SyncPath, 0, n)
codes = make([]common.Hash, 0, n)
- req.trieTasks = make(map[common.Hash]*trieTask, n)
+ req.trieTasks = make(map[string]*trieTask, n)
req.codeTasks = make(map[common.Hash]*codeTask, n)
for hash, t := range s.codeTasks {
@@ -493,7 +495,7 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths
req.codeTasks[hash] = t
delete(s.codeTasks, hash)
}
- for hash, t := range s.trieTasks {
+ for path, t := range s.trieTasks {
// Stop when we've gathered enough requests
if len(nodes)+len(codes) == n {
break
@@ -505,11 +507,11 @@ func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths
// Assign the request to this peer
t.attempts[req.peer.id] = struct{}{}
- nodes = append(nodes, hash)
+ nodes = append(nodes, t.hash)
paths = append(paths, t.path)
- req.trieTasks[hash] = t
- delete(s.trieTasks, hash)
+ req.trieTasks[path] = t
+ delete(s.trieTasks, path)
}
req.nItems = uint16(len(nodes) + len(codes))
return nodes, paths, codes
@@ -531,7 +533,7 @@ func (s *stateSync) process(req *stateReq) (int, error) {
// Iterate over all the delivered data and inject one-by-one into the trie
for _, blob := range req.response {
- hash, err := s.processNodeData(blob)
+ hash, err := s.processNodeData(req.trieTasks, req.codeTasks, blob)
switch err {
case nil:
s.numUncommitted++
@@ -544,13 +546,10 @@ func (s *stateSync) process(req *stateReq) (int, error) {
default:
return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
}
- // Delete from both queues (one delivery is enough for the syncer)
- delete(req.trieTasks, hash)
- delete(req.codeTasks, hash)
}
// Put unfulfilled tasks back into the retry queue
npeers := s.d.peers.Len()
- for hash, task := range req.trieTasks {
+ for path, task := range req.trieTasks {
// If the node did deliver something, missing items may be due to a protocol
// limit or a previous timeout + delayed delivery. Both cases should permit
// the node to retry the missing items (to avoid single-peer stalls).
@@ -560,10 +559,10 @@ func (s *stateSync) process(req *stateReq) (int, error) {
// If we've requested the node too many times already, it may be a malicious
// sync where nobody has the right data. Abort.
if len(task.attempts) >= npeers {
- return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers)
+ return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", task.hash.TerminalString(), len(task.attempts), npeers)
}
// Missing item, place into the retry queue.
- s.trieTasks[hash] = task
+ s.trieTasks[path] = task
}
for hash, task := range req.codeTasks {
// If the node did deliver something, missing items may be due to a protocol
@@ -586,13 +585,35 @@ func (s *stateSync) process(req *stateReq) (int, error) {
// processNodeData tries to inject a trie node data blob delivered from a remote
// peer into the state trie, returning whether anything useful was written or any
// error occurred.
-func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) {
- res := trie.SyncResult{Data: blob}
+//
+// If multiple requests correspond to the same hash, this method will inject the
+// blob as a result for the first one only, leaving the remaining duplicates to
+// be fetched again.
+func (s *stateSync) processNodeData(nodeTasks map[string]*trieTask, codeTasks map[common.Hash]*codeTask, blob []byte) (common.Hash, error) {
+ var hash common.Hash
s.keccak.Reset()
s.keccak.Write(blob)
- s.keccak.Read(res.Hash[:])
- err := s.sched.Process(res)
- return res.Hash, err
+ s.keccak.Read(hash[:])
+
+ if _, present := codeTasks[hash]; present {
+ err := s.sched.ProcessCode(trie.CodeSyncResult{
+ Hash: hash,
+ Data: blob,
+ })
+ delete(codeTasks, hash)
+ return hash, err
+ }
+ for path, task := range nodeTasks {
+ if task.hash == hash {
+ err := s.sched.ProcessNode(trie.NodeSyncResult{
+ Path: path,
+ Data: blob,
+ })
+ delete(nodeTasks, path)
+ return hash, err
+ }
+ }
+ return common.Hash{}, trie.ErrNotRequested
}
// updateStats bumps the various state sync progress counters and displays a log
diff --git a/trie/committer.go b/trie/committer.go
index 3b0d6c5fe2..495da8a1fc 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -33,6 +33,7 @@ const leafChanSize = 200
type leaf struct {
blob []byte // raw blob of leaf
parent common.Hash // the hash of parent node
+ path []byte // the path from the root node
}
// committer is a type used for the trie Commit operation. The committer will
diff --git a/trie/sync.go b/trie/sync.go
index 820ef29e7d..579da76130 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
)
// ErrNotRequested is returned by the trie sync when it's requested to process a
@@ -39,19 +40,6 @@ var ErrAlreadyProcessed = errors.New("already processed")
// memory if the node was configured with a significant number of peers.
const maxFetchesPerDepth = 16384
-// request represents a scheduled or already in-flight state retrieval request.
-type request struct {
- path []byte // Merkle path leading to this node for prioritization
- hash common.Hash // Hash of the node data content to retrieve
- data []byte // Data content of the node, cached until all subtrees complete
- code bool // Whether this is a code entry
-
- parents []*request // Parent state nodes referencing this entry (notify all upon completion)
- deps int // Number of dependencies before allowed to commit this node
-
- callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
-}
-
// SyncPath is a path tuple identifying a particular trie node either in a single
// trie (account) or a layered trie (account -> storage).
//
@@ -85,30 +73,57 @@ func NewSyncPath(path []byte) SyncPath {
return SyncPath{hexToKeybytes(path[:64]), hexToCompact(path[64:])}
}
-// SyncResult is a response with requested data along with it's hash.
-type SyncResult struct {
- Hash common.Hash // Hash of the originally unknown trie node
- Data []byte // Data content of the retrieved node
+// nodeRequest represents a scheduled or already in-flight trie node retrieval request.
+type nodeRequest struct {
+ hash common.Hash // Hash of the trie node to retrieve
+ path []byte // Merkle path leading to this node for prioritization
+ data []byte // Data content of the node, cached until all subtrees complete
+
+ parent *nodeRequest // Parent state node referencing this entry
+ deps int // Number of dependencies before allowed to commit this node
+ callback LeafCallback // Callback to invoke if a leaf node it reached on this branch
+}
+
+// codeRequest represents a scheduled or already in-flight bytecode retrieval request.
+type codeRequest struct {
+ hash common.Hash // Hash of the contract bytecode to retrieve
+ path []byte // Merkle path leading to this node for prioritization
+ data []byte // Data content of the node, cached until all subtrees complete
+ parents []*nodeRequest // Parent state nodes referencing this entry (notify all upon completion)
+}
+
+// NodeSyncResult is a response with requested trie node along with its node path.
+type NodeSyncResult struct {
+ Path string // Path of the originally unknown trie node
+ Data []byte // Data content of the retrieved trie node
+}
+
+// CodeSyncResult is a response with requested bytecode along with its hash.
+type CodeSyncResult struct {
+ Hash common.Hash // Hash the originally unknown bytecode
+ Data []byte // Data content of the retrieved bytecode
}
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
// persisted data items.
type syncMemBatch struct {
- nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes
- codes map[common.Hash][]byte // In-memory membatch of recently completed codes
+ nodes map[string][]byte // In-memory membatch of recently completed nodes
+ hashes map[string]common.Hash // Hashes of recently completed nodes
+ codes map[common.Hash][]byte // In-memory membatch of recently completed codes
}
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
func newSyncMemBatch() *syncMemBatch {
return &syncMemBatch{
- nodes: make(map[common.Hash][]byte),
- codes: make(map[common.Hash][]byte),
+ nodes: make(map[string][]byte),
+ hashes: make(map[string]common.Hash),
+ codes: make(map[common.Hash][]byte),
}
}
-// hasNode reports the trie node with specific hash is already cached.
-func (batch *syncMemBatch) hasNode(hash common.Hash) bool {
- _, ok := batch.nodes[hash]
+// hasNode reports the trie node with specific path is already cached.
+func (batch *syncMemBatch) hasNode(path []byte) bool {
+ _, ok := batch.nodes[string(path)]
return ok
}
@@ -122,13 +137,13 @@ func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type Sync struct {
- database ethdb.KeyValueReader // Persistent database to check for existing entries
- membatch *syncMemBatch // Memory buffer to avoid frequent database writes
- nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash
- codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash
- queue *prque.Prque // Priority queue with the pending requests
- fetches map[int]int // Number of active fetches per trie node depth
- bloom *SyncBloom // Bloom filter for fast state existence checks
+ database ethdb.KeyValueReader // Persistent database to check for existing entries
+ membatch *syncMemBatch // Memory buffer to avoid frequent database writes
+ nodeReqs map[string]*nodeRequest // Pending requests pertaining to a trie node path
+ codeReqs map[common.Hash]*codeRequest // Pending requests pertaining to a code hash
+ queue *prque.Prque // Priority queue with the pending requests
+ fetches map[int]int // Number of active fetches per trie node depth
+ bloom *SyncBloom // Bloom filter for fast state existence checks
}
// NewSync creates a new trie data download scheduler.
@@ -136,23 +151,25 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb
ts := &Sync{
database: database,
membatch: newSyncMemBatch(),
- nodeReqs: make(map[common.Hash]*request),
- codeReqs: make(map[common.Hash]*request),
+ nodeReqs: make(map[string]*nodeRequest),
+ codeReqs: make(map[common.Hash]*codeRequest),
queue: prque.New(nil),
fetches: make(map[int]int),
bloom: bloom,
}
- ts.AddSubTrie(root, nil, common.Hash{}, callback)
+ ts.AddSubTrie(root, nil, common.Hash{}, nil, callback)
return ts
}
-// AddSubTrie registers a new trie to the sync code, rooted at the designated parent.
-func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) {
+// AddSubTrie registers a new trie to the sync code, rooted at the designated
+// parent for completion tracking. The given path is a unique node path in
+// hex format and contain all the parent path if it's layered trie node.
+func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, parentPath []byte, callback LeafCallback) {
// Short circuit if the trie is empty or already known
if root == emptyRoot {
return
}
- if s.membatch.hasNode(root) {
+ if s.membatch.hasNode(path) {
return
}
if s.bloom == nil || s.bloom.Contains(root[:]) {
@@ -167,27 +184,27 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, cal
bloomFaultMeter.Mark(1)
}
// Assemble the new sub-trie sync request
- req := &request{
- path: path,
+ req := &nodeRequest{
hash: root,
+ path: path,
callback: callback,
}
// If this sub-trie has a designated parent, link them together
if parent != (common.Hash{}) {
- ancestor := s.nodeReqs[parent]
+ ancestor := s.nodeReqs[string(parentPath)]
if ancestor == nil {
panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent))
}
ancestor.deps++
- req.parents = append(req.parents, ancestor)
+ req.parent = ancestor
}
- s.schedule(req)
+ s.scheduleNodeRequest(req)
}
// AddCodeEntry schedules the direct retrieval of a contract code that should not
// be interpreted as a trie node, but rather accepted and stored into the database
// as is.
-func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
+func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash, parentPath []byte) {
// Short circuit if the entry is empty or already known
if hash == emptyState {
return
@@ -209,30 +226,29 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
bloomFaultMeter.Mark(1)
}
// Assemble the new sub-trie sync request
- req := &request{
+ req := &codeRequest{
path: path,
hash: hash,
- code: true,
}
// If this sub-trie has a designated parent, link them together
if parent != (common.Hash{}) {
- ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq
+ ancestor := s.nodeReqs[string(parentPath)] // the parent of codereq can ONLY be nodereq
if ancestor == nil {
panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent))
}
ancestor.deps++
req.parents = append(req.parents, ancestor)
}
- s.schedule(req)
+ s.scheduleCodeRequest(req)
}
// Missing retrieves the known missing nodes from the trie for retrieval. To aid
// both eth/6x style fast sync and snap/1x style state sync, the paths of trie
// nodes are returned too, as well as separate hash list for codes.
-func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []common.Hash) {
+func (s *Sync) Missing(max int) ([]string, []common.Hash, []common.Hash) {
var (
+ nodePaths []string
nodeHashes []common.Hash
- nodePaths []SyncPath
codeHashes []common.Hash
)
for !s.queue.Empty() && (max == 0 || len(nodeHashes)+len(codeHashes) < max) {
@@ -248,62 +264,77 @@ func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []
s.queue.Pop()
s.fetches[depth]++
- hash := item.(common.Hash)
- if req, ok := s.nodeReqs[hash]; ok {
- nodeHashes = append(nodeHashes, hash)
- nodePaths = append(nodePaths, NewSyncPath(req.path))
- } else {
- codeHashes = append(codeHashes, hash)
+ switch item.(type) {
+ case common.Hash:
+ codeHashes = append(codeHashes, item.(common.Hash))
+ case string:
+ path := item.(string)
+ req, ok := s.nodeReqs[path]
+ if !ok {
+ log.Error("Missing node request", "path", path)
+ continue // System very wrong, shouldn't happen
+ }
+ nodePaths = append(nodePaths, path)
+ nodeHashes = append(nodeHashes, req.hash)
}
}
- return nodeHashes, nodePaths, codeHashes
+ return nodePaths, nodeHashes, codeHashes
}
-// Process injects the received data for requested item. Note it can
+// ProcessCode injects the received data for requested item. Note it can
// happpen that the single response commits two pending requests(e.g.
// there are two requests one for code and one for node but the hash
// is same). In this case the second response for the same hash will
// be treated as "non-requested" item or "already-processed" item but
// there is no downside.
-func (s *Sync) Process(result SyncResult) error {
- // If the item was not requested either for code or node, bail out
- if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil {
+func (s *Sync) ProcessCode(result CodeSyncResult) error {
+ // If the code was not requested or it's already processed, bail out
+ req := s.codeReqs[result.Hash]
+ if req == nil {
return ErrNotRequested
}
- // There is an pending code request for this data, commit directly
- var filled bool
- if req := s.codeReqs[result.Hash]; req != nil && req.data == nil {
- filled = true
- req.data = result.Data
- s.commit(req)
+ if req.data != nil {
+ return ErrAlreadyProcessed
}
- // There is an pending node request for this data, fill it.
- if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil {
- filled = true
- // Decode the node data content and update the request
- node, err := decodeNode(result.Hash[:], result.Data)
- if err != nil {
- return err
- }
- req.data = result.Data
+ req.data = result.Data
+ return s.commitCodeRequest(req)
+}
- // Create and schedule a request for all the children nodes
- requests, err := s.children(req, node)
- if err != nil {
- return err
- }
- if len(requests) == 0 && req.deps == 0 {
- s.commit(req)
- } else {
- req.deps += len(requests)
- for _, child := range requests {
- s.schedule(child)
- }
- }
+// ProcessNode injects the received data for requested item. Note it can
+// happen that the single response commits two pending requests(e.g.
+// there are two requests one for code and one for node but the hash
+// is same). In this case the second response for the same hash will
+// be treated as "non-requested" item or "already-processed" item but
+// there is no downside.
+func (s *Sync) ProcessNode(result NodeSyncResult) error {
+ // If the trie node was not requested or it's already processed, bail out
+ req := s.nodeReqs[result.Path]
+ if req == nil {
+ return ErrNotRequested
}
- if !filled {
+ if req.data != nil {
return ErrAlreadyProcessed
}
+ // Decode the node data content and update the request
+ node, err := decodeNode(req.hash.Bytes(), result.Data)
+ if err != nil {
+ return err
+ }
+ req.data = result.Data
+
+ // Create and schedule a request for all the children nodes
+ requests, err := s.children(req, node)
+ if err != nil {
+ return err
+ }
+ if len(requests) == 0 && req.deps == 0 {
+ s.commitNodeRequest(req)
+ } else {
+ req.deps += len(requests)
+ for _, child := range requests {
+ s.scheduleNodeRequest(child)
+ }
+ }
return nil
}
@@ -311,16 +342,17 @@ func (s *Sync) Process(result SyncResult) error {
// storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error {
// Dump the membatch into a database dbw
- for key, value := range s.membatch.nodes {
- rawdb.WriteTrieNode(dbw, key, value)
+ for path, value := range s.membatch.nodes {
+ hash := s.membatch.hashes[path]
+ rawdb.WriteTrieNode(dbw, hash, value)
if s.bloom != nil {
- s.bloom.Add(key[:])
+ s.bloom.Add(hash[:])
}
}
- for key, value := range s.membatch.codes {
- rawdb.WriteCode(dbw, key, value)
+ for hash, value := range s.membatch.codes {
+ rawdb.WriteCode(dbw, hash, value)
if s.bloom != nil {
- s.bloom.Add(key[:])
+ s.bloom.Add(hash[:])
}
}
// Drop the membatch data and return
@@ -336,23 +368,31 @@ func (s *Sync) Pending() int {
// schedule inserts a new state retrieval request into the fetch queue. If there
// is already a pending request for this node, the new request will be discarded
// and only a parent reference added to the old one.
-func (s *Sync) schedule(req *request) {
- var reqset = s.nodeReqs
- if req.code {
- reqset = s.codeReqs
+func (s *Sync) scheduleNodeRequest(req *nodeRequest) {
+ s.nodeReqs[string(req.path)] = req
+
+ // Schedule the request for future retrieval. This queue is shared
+ // by both node requests and code requests.
+ prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents
+ for i := 0; i < 14 && i < len(req.path); i++ {
+ prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order
}
+ s.queue.Push(string(req.path), prio)
+}
+
+// schedule inserts a new state retrieval request into the fetch queue. If there
+// is already a pending request for this node, the new request will be discarded
+// and only a parent reference added to the old one.
+func (s *Sync) scheduleCodeRequest(req *codeRequest) {
// If we're already requesting this node, add a new reference and stop
- if old, ok := reqset[req.hash]; ok {
+ if old, ok := s.codeReqs[req.hash]; ok {
old.parents = append(old.parents, req.parents...)
return
}
- reqset[req.hash] = req
+ s.codeReqs[req.hash] = req
// Schedule the request for future retrieval. This queue is shared
- // by both node requests and code requests. It can happen that there
- // is a trie node and code has same hash. In this case two elements
- // with same hash and same or different depth will be pushed. But it's
- // ok the worst case is the second response will be treated as duplicated.
+ // by both node requests and code requests.
prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents
for i := 0; i < 14 && i < len(req.path); i++ {
prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order
@@ -362,7 +402,7 @@ func (s *Sync) schedule(req *request) {
// children retrieves all the missing children of a state trie entry for future
// retrieval scheduling.
-func (s *Sync) children(req *request, object node) ([]*request, error) {
+func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
// Gather all the children of the node, irrelevant whether known or not
type child struct {
path []byte
@@ -393,7 +433,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
panic(fmt.Sprintf("unknown node: %+v", node))
}
// Iterate over the children, and request all unknown ones
- requests := make([]*request, 0, len(children))
+ requests := make([]*nodeRequest, 0, len(children))
for _, child := range children {
// Notify any external watcher of a new key/value node
if req.callback != nil {
@@ -405,7 +445,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
paths = append(paths, hexToKeybytes(child.path[:2*common.HashLength]))
paths = append(paths, hexToKeybytes(child.path[2*common.HashLength:]))
}
- if err := req.callback(paths, child.path, node, req.hash); err != nil {
+ if err := req.callback(paths, child.path, node, req.hash, req.path); err != nil {
return nil, err
}
}
@@ -413,25 +453,25 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
// If the child references another node, resolve or schedule
if node, ok := (child.node).(hashNode); ok {
// Try to resolve the node from the local database
- hash := common.BytesToHash(node)
- if s.membatch.hasNode(hash) {
+ if s.membatch.hasNode(child.path) {
continue
}
+ chash := common.BytesToHash(node)
if s.bloom == nil || s.bloom.Contains(node) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
- if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 {
+ if blob := rawdb.ReadTrieNode(s.database, chash); len(blob) > 0 {
continue
}
// False positive, bump fault meter
bloomFaultMeter.Mark(1)
}
// Locally unknown node, schedule for retrieval
- requests = append(requests, &request{
+ requests = append(requests, &nodeRequest{
path: child.path,
- hash: hash,
- parents: []*request{req},
+ hash: chash,
+ parent: req,
callback: req.callback,
})
}
@@ -442,22 +482,40 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
// commit finalizes a retrieval request and stores it into the membatch. If any
// of the referencing parent requests complete due to this commit, they are also
// committed themselves.
-func (s *Sync) commit(req *request) (err error) {
+func (s *Sync) commitNodeRequest(req *nodeRequest) error {
// Write the node content to the membatch
- if req.code {
- s.membatch.codes[req.hash] = req.data
- delete(s.codeReqs, req.hash)
- s.fetches[len(req.path)]--
- } else {
- s.membatch.nodes[req.hash] = req.data
- delete(s.nodeReqs, req.hash)
- s.fetches[len(req.path)]--
+ s.membatch.nodes[string(req.path)] = req.data
+ s.membatch.hashes[string(req.path)] = req.hash
+
+ delete(s.nodeReqs, string(req.path))
+ s.fetches[len(req.path)]--
+
+ // Check parent for completion
+ if req.parent != nil {
+ req.parent.deps--
+ if req.parent.deps == 0 {
+ if err := s.commitNodeRequest(req.parent); err != nil {
+ return err
+ }
+ }
}
+ return nil
+}
+
+// commit finalizes a retrieval request and stores it into the membatch. If any
+// of the referencing parent requests complete due to this commit, they are also
+// committed themselves.
+func (s *Sync) commitCodeRequest(req *codeRequest) error {
+ // Write the node content to the membatch
+ s.membatch.codes[req.hash] = req.data
+ delete(s.codeReqs, req.hash)
+ s.fetches[len(req.path)]--
+
// Check all parents for completion
for _, parent := range req.parents {
parent.deps--
if parent.deps == 0 {
- if err := s.commit(parent); err != nil {
+ if err := s.commitNodeRequest(parent); err != nil {
return err
}
}
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 388aedd1f4..027f36c6de 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -94,6 +94,13 @@ func checkTrieConsistency(db *Database, root common.Hash) error {
return it.Error()
}
+// trieElement represents the element in the state trie(bytecode or trie node).
+type trieElement struct {
+ path string
+ hash common.Hash
+ syncPath SyncPath
+}
+
// Tests that an empty trie is not scheduled for syncing.
func TestEmptySync(t *testing.T) {
dbA := NewDatabase(memorydb.New())
@@ -125,35 +132,38 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- nodes, paths, codes := sched.Missing(count)
- var (
- hashQueue []common.Hash
- pathQueue []SyncPath
- )
- if !bypath {
- hashQueue = append(append(hashQueue[:0], nodes...), codes...)
- } else {
- hashQueue = append(hashQueue[:0], codes...)
- pathQueue = append(pathQueue[:0], paths...)
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(count)
+ var elements []trieElement
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
}
- for len(hashQueue)+len(pathQueue) > 0 {
- results := make([]SyncResult, len(hashQueue)+len(pathQueue))
- for i, hash := range hashQueue {
- data, err := srcDb.Node(hash)
- if err != nil {
- t.Fatalf("failed to retrieve node data for hash %x: %v", hash, err)
+ for len(elements) > 0 {
+ results := make([]NodeSyncResult, len(elements))
+ if !bypath {
+ for i, element := range elements {
+ data, err := srcDb.Node(element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
+ }
+ results[i] = NodeSyncResult{element.path, data}
}
- results[i] = SyncResult{hash, data}
- }
- for i, path := range pathQueue {
- data, _, err := srcTrie.TryGetNode(path[0])
- if err != nil {
- t.Fatalf("failed to retrieve node data for path %x: %v", path, err)
+ } else {
+ for i, element := range elements {
+ data, _, err := srcTrie.TryGetNode(element.syncPath[len(element.syncPath)-1])
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for path %x: %v", element.path, err)
+ }
+ results[i] = NodeSyncResult{element.path, data}
}
- results[len(hashQueue)+i] = SyncResult{crypto.Keccak256Hash(data), data}
}
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -163,12 +173,14 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
}
batch.Write()
- nodes, paths, codes = sched.Missing(count)
- if !bypath {
- hashQueue = append(append(hashQueue[:0], nodes...), codes...)
- } else {
- hashQueue = append(hashQueue[:0], codes...)
- pathQueue = append(pathQueue[:0], paths...)
+ paths, nodes, _ = sched.Missing(count)
+ elements = elements[:0]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
}
}
// Cross check that the two tries are in sync
@@ -186,21 +198,29 @@ func TestIterativeDelayedSync(t *testing.T) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- nodes, _, codes := sched.Missing(10000)
- queue := append(append([]common.Hash{}, nodes...), codes...)
-
- for len(queue) > 0 {
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(10000)
+ var elements []trieElement
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
+ for len(elements) > 0 {
// Sync only half of the scheduled nodes
- results := make([]SyncResult, len(queue)/2+1)
- for i, hash := range queue[:len(results)] {
- data, err := srcDb.Node(hash)
+ results := make([]NodeSyncResult, len(elements)/2+1)
+ for i, element := range elements[:len(results)] {
+ data, err := srcDb.Node(element.hash)
if err != nil {
- t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
- results[i] = SyncResult{hash, data}
+ results[i] = NodeSyncResult{element.path, data}
}
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -210,8 +230,15 @@ func TestIterativeDelayedSync(t *testing.T) {
}
batch.Write()
- nodes, _, codes = sched.Missing(10000)
- queue = append(append(queue[len(results):], nodes...), codes...)
+ paths, nodes, _ = sched.Missing(10000)
+ elements = elements[len(results):]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
}
// Cross check that the two tries are in sync
checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
@@ -232,24 +259,30 @@ func testIterativeRandomSync(t *testing.T, count int) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- queue := make(map[common.Hash]struct{})
- nodes, _, codes := sched.Missing(count)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(count)
+ queue := make(map[string]trieElement)
+ for i, path := range paths {
+ queue[path] = trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ }
}
for len(queue) > 0 {
// Fetch all the queued nodes in a random order
- results := make([]SyncResult, 0, len(queue))
- for hash := range queue {
- data, err := srcDb.Node(hash)
+ results := make([]NodeSyncResult, 0, len(queue))
+ for path, element := range queue {
+ data, err := srcDb.Node(element.hash)
if err != nil {
- t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
- results = append(results, SyncResult{hash, data})
+ results = append(results, NodeSyncResult{path, data})
}
// Feed the retrieved results back and queue new tasks
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -259,10 +292,14 @@ func testIterativeRandomSync(t *testing.T, count int) {
}
batch.Write()
- queue = make(map[common.Hash]struct{})
- nodes, _, codes = sched.Missing(count)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ paths, nodes, _ = sched.Missing(count)
+ queue = make(map[string]trieElement)
+ for i, path := range paths {
+ queue[path] = trieElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(path)),
+ }
}
}
// Cross check that the two tries are in sync
@@ -280,20 +317,26 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- queue := make(map[common.Hash]struct{})
- nodes, _, codes := sched.Missing(10000)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(10000)
+ queue := make(map[string]trieElement)
+ for i, path := range paths {
+ queue[path] = trieElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(path)),
+ }
}
for len(queue) > 0 {
// Sync only half of the scheduled nodes, even those in random order
- results := make([]SyncResult, 0, len(queue)/2+1)
- for hash := range queue {
- data, err := srcDb.Node(hash)
+ results := make([]NodeSyncResult, 0, len(queue)/2+1)
+ for path, element := range queue {
+ data, err := srcDb.Node(element.hash)
if err != nil {
- t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
- results = append(results, SyncResult{hash, data})
+ results = append(results, NodeSyncResult{path, data})
if len(results) >= cap(results) {
break
@@ -301,7 +344,7 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
}
// Feed the retrieved results back and queue new tasks
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -311,11 +354,15 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
}
batch.Write()
for _, result := range results {
- delete(queue, result.Hash)
- }
- nodes, _, codes = sched.Missing(10000)
- for _, hash := range append(nodes, codes...) {
- queue[hash] = struct{}{}
+ delete(queue, result.Path)
+ }
+ paths, nodes, _ = sched.Missing(10000)
+ for i, path := range paths {
+ queue[path] = trieElement{
+ path: path,
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(path)),
+ }
}
}
// Cross check that the two tries are in sync
@@ -333,26 +380,35 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- nodes, _, codes := sched.Missing(0)
- queue := append(append([]common.Hash{}, nodes...), codes...)
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(0)
+ var elements []trieElement
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
requested := make(map[common.Hash]struct{})
- for len(queue) > 0 {
- results := make([]SyncResult, len(queue))
- for i, hash := range queue {
- data, err := srcDb.Node(hash)
+ for len(elements) > 0 {
+ results := make([]NodeSyncResult, len(elements))
+ for i, element := range elements {
+ data, err := srcDb.Node(element.hash)
if err != nil {
- t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
- if _, ok := requested[hash]; ok {
- t.Errorf("hash %x already requested once", hash)
+ if _, ok := requested[element.hash]; ok {
+ t.Errorf("hash %x already requested once", element.hash)
}
- requested[hash] = struct{}{}
+ requested[element.hash] = struct{}{}
- results[i] = SyncResult{hash, data}
+ results[i] = NodeSyncResult{element.path, data}
}
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -362,8 +418,15 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
}
batch.Write()
- nodes, _, codes = sched.Missing(0)
- queue = append(append(queue[:0], nodes...), codes...)
+ paths, nodes, _ = sched.Missing(0)
+ elements = elements[:0]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
}
// Cross check that the two tries are in sync
checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
@@ -380,23 +443,34 @@ func TestIncompleteSync(t *testing.T) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- var added []common.Hash
-
- nodes, _, codes := sched.Missing(1)
- queue := append(append([]common.Hash{}, nodes...), codes...)
- for len(queue) > 0 {
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ var (
+ added []common.Hash
+ elements []trieElement
+ root = srcTrie.Hash()
+ )
+ paths, nodes, _ := sched.Missing(1)
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
+ for len(elements) > 0 {
// Fetch a batch of trie nodes
- results := make([]SyncResult, len(queue))
- for i, hash := range queue {
- data, err := srcDb.Node(hash)
+ results := make([]NodeSyncResult, len(elements))
+ for i, element := range elements {
+ data, err := srcDb.Node(element.hash)
if err != nil {
- t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
- results[i] = SyncResult{hash, data}
+ results[i] = NodeSyncResult{element.path, data}
}
// Process each of the trie nodes
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -405,27 +479,36 @@ func TestIncompleteSync(t *testing.T) {
t.Fatalf("failed to commit data: %v", err)
}
batch.Write()
+
for _, result := range results {
- added = append(added, result.Hash)
+ hash := crypto.Keccak256Hash(result.Data)
+ if hash != root {
+ added = append(added, hash)
+ }
// Check that all known sub-tries in the synced trie are complete
- if err := checkTrieConsistency(triedb, result.Hash); err != nil {
+ if err := checkTrieConsistency(triedb, hash); err != nil {
t.Fatalf("trie inconsistent: %v", err)
}
}
// Fetch the next batch to retrieve
- nodes, _, codes = sched.Missing(1)
- queue = append(append(queue[:0], nodes...), codes...)
+ paths, nodes, _ = sched.Missing(1)
+ elements = elements[:0]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
}
// Sanity check that removing any node from the database is detected
- for _, node := range added[1:] {
- key := node.Bytes()
- value, _ := diskdb.Get(key)
-
- diskdb.Delete(key)
- if err := checkTrieConsistency(triedb, added[0]); err == nil {
- t.Fatalf("trie inconsistency not caught, missing: %x", key)
+ for _, hash := range added {
+ value, _ := diskdb.Get(hash.Bytes())
+ diskdb.Delete(hash.Bytes())
+ if err := checkTrieConsistency(triedb, root); err == nil {
+ t.Fatalf("trie inconsistency not caught, missing: %x", hash)
}
- diskdb.Put(key, value)
+ diskdb.Put(hash.Bytes(), value)
}
}
@@ -440,21 +523,33 @@ func TestSyncOrdering(t *testing.T) {
triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
- nodes, paths, _ := sched.Missing(1)
- queue := append([]common.Hash{}, nodes...)
- reqs := append([]SyncPath{}, paths...)
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ var (
+ reqs []SyncPath
+ elements []trieElement
+ )
+ paths, nodes, _ := sched.Missing(1)
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ reqs = append(reqs, NewSyncPath([]byte(paths[i])))
+ }
- for len(queue) > 0 {
- results := make([]SyncResult, len(queue))
- for i, hash := range queue {
- data, err := srcDb.Node(hash)
+ for len(elements) > 0 {
+ results := make([]NodeSyncResult, len(elements))
+ for i, element := range elements {
+ data, err := srcDb.Node(element.hash)
if err != nil {
- t.Fatalf("failed to retrieve node data for %x: %v", hash, err)
+ t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
- results[i] = SyncResult{hash, data}
+ results[i] = NodeSyncResult{element.path, data}
}
for _, result := range results {
- if err := sched.Process(result); err != nil {
+ if err := sched.ProcessNode(result); err != nil {
t.Fatalf("failed to process result %v", err)
}
}
@@ -464,9 +559,16 @@ func TestSyncOrdering(t *testing.T) {
}
batch.Write()
- nodes, paths, _ = sched.Missing(1)
- queue = append(queue[:0], nodes...)
- reqs = append(reqs, paths...)
+ paths, nodes, _ = sched.Missing(1)
+ elements = elements[:0]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ reqs = append(reqs, NewSyncPath([]byte(paths[i])))
+ }
}
// Cross check that the two tries are in sync
checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
diff --git a/trie/trie.go b/trie/trie.go
index dabe738ace..e6d40be256 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -40,18 +40,18 @@ var (
// LeafCallback is a callback type invoked when a trie operation reaches a leaf
// node.
//
-// The paths is a path tuple identifying a particular trie node either in a single
-// trie (account) or a layered trie (account -> storage). Each path in the tuple
+// The keys is a path tuple identifying a particular trie node either in a single
+// trie (account) or a layered trie (account -> storage). Each key in the tuple
// is in the raw format(32 bytes).
//
-// The hexpath is a composite hexary path identifying the trie node. All the key
+// The path is a composite hexary path identifying the trie node. All the key
// bytes are converted to the hexary nibbles and composited with the parent path
// if the trie node is in a layered trie.
//
// It's used by state sync and commit to allow handling external references
// between account and storage tries. And also it's used in the state healing
// for extracting the raw states(leaf nodes) with corresponding paths.
-type LeafCallback func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
+type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
// top of Database. Whenever tries performance a commit operation, the generated nodes will be
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 89851c3b81..4758328c91 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -570,7 +570,6 @@ func BenchmarkCommitAfterHash(b *testing.B) {
b.Run("no-onleaf", func(b *testing.B) {
benchmarkCommitAfterHash(b, false)
})
-
b.Run("with-onleaf", func(b *testing.B) {
benchmarkCommitAfterHash(b, true)
})
From 86443371747b47e7ef8c6b53ac914d1516e130b1 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 13 Sep 2024 10:49:05 +0700
Subject: [PATCH 07/41] core/rawdb: the ancient store implementing is now
exported in package ethdb, can be used independently of the chain database,
reference by commit 1941c5e6c99689be2e27b3e537bd591688b717de (#571)
---
cmd/ronin/dbcmd.go | 2 +-
core/rawdb/accessors_chain.go | 14 +-
core/rawdb/chain_freezer.go | 295 ++++++++++++++++++++++++++++++++++
core/rawdb/database.go | 26 ++-
core/rawdb/freezer.go | 277 ++++---------------------------
core/rawdb/freezer_batch.go | 2 +-
core/rawdb/freezer_test.go | 8 +-
core/rawdb/table.go | 7 +-
ethdb/database.go | 32 ++--
9 files changed, 383 insertions(+), 280 deletions(-)
create mode 100644 core/rawdb/chain_freezer.go
diff --git a/cmd/ronin/dbcmd.go b/cmd/ronin/dbcmd.go
index bfc3a405d3..ee25dad8ed 100644
--- a/cmd/ronin/dbcmd.go
+++ b/cmd/ronin/dbcmd.go
@@ -336,7 +336,7 @@ func inspect(ctx *cli.Context) error {
return rawdb.InspectDatabase(db, prefix, start)
}
-func showLeveldbStats(db ethdb.Stater) {
+func showLeveldbStats(db ethdb.KeyValueStater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 50877048ce..1aadd5ada1 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -37,7 +37,7 @@ import (
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
data, _ = reader.Ancient(freezerHashTable, number)
if len(data) == 0 {
// Get it by hash from leveldb
@@ -301,7 +301,7 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains
// the canonical data.
@@ -380,7 +380,7 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
// isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set.
-func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool {
+func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
h, err := reader.Ancient(freezerHashTable, number)
if err != nil {
return false
@@ -394,7 +394,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
// comparison is necessary since ancient database only maintains
// the canonical data.
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerBodiesTable, number)
@@ -411,7 +411,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
// block at number, in RLP encoding.
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
data, _ = reader.Ancient(freezerBodiesTable, number)
if len(data) > 0 {
return nil
@@ -502,7 +502,7 @@ func WriteInternalTransactions(db ethdb.KeyValueWriter, hash common.Hash, intern
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerDifficultyTable, number)
@@ -562,7 +562,7 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
- db.ReadAncients(func(reader ethdb.AncientReader) error {
+ db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
data, _ = reader.Ancient(freezerReceiptTable, number)
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
new file mode 100644
index 0000000000..b3fd9f2880
--- /dev/null
+++ b/core/rawdb/chain_freezer.go
@@ -0,0 +1,295 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+const (
+ // freezerRecheckInterval is the frequency to check the key-value database for
+ // chain progression that might permit new blocks to be frozen into immutable
+ // storage.
+ freezerRecheckInterval = time.Minute
+
+ // freezerBatchLimit is the maximum number of blocks to freeze in one batch
+ // before doing an fsync and deleting it from the key-value store.
+ freezerBatchLimit = 30000
+)
+
+type chainFreezer struct {
+ *Freezer
+ quit chan struct{}
+ wg sync.WaitGroup
+ trigger chan chan struct{} // Manual blocking frezzer trigger, test determinism
+}
+
+// chainFreezer is a wrapper of freezer with additional chain freezing feature.
+// The background thread will keep moving ancient chain segments from key-value
+// database to flat files for saving space on live database.
+// newChainFreezer initializes the freezer for ancient chain data.
+func newChainFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*chainFreezer, error) {
+ freezer, err := NewFreezer(datadir, namespace, readonly, maxTableSize, tables)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chainFreezer{
+ Freezer: freezer,
+ quit: make(chan struct{}),
+ trigger: make(chan chan struct{}),
+ }, nil
+}
+
+// Close func closes the chain freezer instance and terminates the background thread.
+func (f *chainFreezer) Close() error {
+ err := f.Freezer.Close()
+ select {
+ case <-f.quit:
+ default:
+ close(f.quit)
+ }
+ f.wg.Wait()
+ return err
+}
+
+// freeze is a background thread that periodically checks the blockchain for any
+// import progress and moves ancient data from the fast database into the freezer.
+//
+// This functionality is deliberately broken off from block importing to avoid
+// incurring additional data shuffling delays on block propagation.
+func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
+ nfdb := &nofreezedb{KeyValueStore: db}
+
+ var (
+ backoff bool
+ triggered chan struct{} // Used in tests
+ )
+ for {
+ select {
+ case <-f.quit:
+ log.Info("Freezer shutting down")
+ return
+ default:
+ }
+ if backoff {
+ // If we were doing a manual trigger, notify it
+ if triggered != nil {
+ triggered <- struct{}{}
+ triggered = nil
+ }
+ select {
+ case <-time.NewTimer(freezerRecheckInterval).C:
+ backoff = false
+ case triggered = <-f.trigger:
+ backoff = false
+ case <-f.quit:
+ return
+ }
+ }
+ // Retrieve the freezing threshold.
+ hash := ReadHeadBlockHash(nfdb)
+ if hash == (common.Hash{}) {
+ log.Debug("Current full block hash unavailable") // new chain, empty database
+ backoff = true
+ continue
+ }
+ number := ReadHeaderNumber(nfdb, hash)
+ threshold := f.threshold.Load()
+
+ switch {
+ case number == nil:
+ log.Error("Current full block number unavailable", "hash", hash)
+ backoff = true
+ continue
+
+ case *number < threshold:
+ log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
+ backoff = true
+ continue
+
+ case *number-threshold <= f.frozen.Load():
+ log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen.Load())
+ backoff = true
+ continue
+ }
+ head := ReadHeader(nfdb, hash, *number)
+ if head == nil {
+ log.Error("Current full block unavailable", "number", *number, "hash", hash)
+ backoff = true
+ continue
+ }
+
+ // Seems we have data ready to be frozen, process in usable batches
+ var (
+ start = time.Now()
+ first, _ = f.Ancients()
+ limit = *number - threshold
+ )
+ if limit-first > freezerBatchLimit {
+ limit = first + freezerBatchLimit
+ }
+ ancients, err := f.freezeRange(nfdb, first, limit)
+ if err != nil {
+ log.Error("Error in block freeze operation", "err", err)
+ backoff = true
+ continue
+ }
+
+ // Batch of blocks have been frozen, flush them before wiping from leveldb
+ if err := f.Sync(); err != nil {
+ log.Crit("Failed to flush frozen tables", "err", err)
+ }
+
+ // Wipe out all data from the active database
+ batch := db.NewBatch()
+ for i := 0; i < len(ancients); i++ {
+ // Always keep the genesis block in active database
+ if first+uint64(i) != 0 {
+ DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
+ DeleteCanonicalHash(batch, first+uint64(i))
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete frozen canonical blocks", "err", err)
+ }
+ batch.Reset()
+
+ // Wipe out side chains also and track dangling side chains
+ var dangling []common.Hash
+ for number := first; number < f.frozen.Load(); number++ {
+ // Always keep the genesis block in active database
+ if number != 0 {
+ dangling = ReadAllHashes(db, number)
+ for _, hash := range dangling {
+ log.Trace("Deleting side chain", "number", number, "hash", hash)
+ DeleteBlock(batch, hash, number)
+ }
+ }
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete frozen side blocks", "err", err)
+ }
+ batch.Reset()
+
+ // Step into the future and delete and dangling side chains
+ tip := f.frozen.Load()
+ if tip > 0 {
+ for len(dangling) > 0 {
+ drop := make(map[common.Hash]struct{})
+ for _, hash := range dangling {
+ log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
+ drop[hash] = struct{}{}
+ }
+ children := ReadAllHashes(db, tip)
+ for i := 0; i < len(children); i++ {
+ // Dig up the child and ensure it's dangling
+ child := ReadHeader(nfdb, children[i], tip)
+ if child == nil {
+ log.Error("Missing dangling header", "number", tip, "hash", children[i])
+ continue
+ }
+ if _, ok := drop[child.ParentHash]; !ok {
+ children = append(children[:i], children[i+1:]...)
+ i--
+ continue
+ }
+ // Delete all block data associated with the child
+ log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
+ DeleteBlock(batch, children[i], tip)
+ }
+ dangling = children
+ tip++
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete dangling side blocks", "err", err)
+ }
+ }
+
+ // Log something friendly for the user
+ context := []interface{}{
+ "blocks", f.frozen.Load() - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen.Load() - 1,
+ }
+ if n := len(ancients); n > 0 {
+ context = append(context, []interface{}{"hash", ancients[n-1]}...)
+ }
+ log.Info("Deep froze chain segment", context...)
+
+ // Avoid database thrashing with tiny writes
+ if f.frozen.Load()-first < freezerBatchLimit {
+ backoff = true
+ }
+ }
+}
+
+func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
+ hashes = make([]common.Hash, 0, limit-number)
+
+ _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for ; number <= limit; number++ {
+ // Retrieve all the components of the canonical block.
+ hash := ReadCanonicalHash(nfdb, number)
+ if hash == (common.Hash{}) {
+ return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
+ }
+ header := ReadHeaderRLP(nfdb, hash, number)
+ if len(header) == 0 {
+ return fmt.Errorf("block header missing, can't freeze block %d", number)
+ }
+ body := ReadBodyRLP(nfdb, hash, number)
+ if len(body) == 0 {
+ return fmt.Errorf("block body missing, can't freeze block %d", number)
+ }
+ receipts := ReadReceiptsRLP(nfdb, hash, number)
+ if len(receipts) == 0 {
+ return fmt.Errorf("block receipts missing, can't freeze block %d", number)
+ }
+ td := ReadTdRLP(nfdb, hash, number)
+ if len(td) == 0 {
+ return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
+ }
+
+ // Write to the batch.
+ if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
+ return fmt.Errorf("can't write hash to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
+ return fmt.Errorf("can't write header to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
+ return fmt.Errorf("can't write body to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
+ return fmt.Errorf("can't write receipts to freezer: %v", err)
+ }
+ if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil {
+ return fmt.Errorf("can't write td to freezer: %v", err)
+ }
+
+ hashes = append(hashes, hash)
+ }
+ return nil
+ })
+
+ return hashes, err
+}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 3c31ef6d39..ba91397cc8 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -37,6 +37,12 @@ import (
type freezerdb struct {
ethdb.KeyValueStore
ethdb.AncientStore
+ ancientRoot string
+}
+
+// AncientDatadir returns the path of root ancient directory.
+func (frdb *freezerdb) AncientDatadir() (string, error) {
+ return frdb.ancientRoot, nil
}
// Close implements io.Closer, closing both the fast key-value store as well as
@@ -59,18 +65,18 @@ func (frdb *freezerdb) Close() error {
// a freeze cycle completes, without having to sleep for a minute to trigger the
// automatic background run.
func (frdb *freezerdb) Freeze(threshold uint64) error {
- if frdb.AncientStore.(*freezer).readonly {
+ if frdb.AncientStore.(*chainFreezer).readonly {
return errReadOnly
}
// Set the freezer threshold to a temporary value
defer func(old uint64) {
- frdb.AncientStore.(*freezer).threshold.Store(old)
- }(frdb.AncientStore.(*freezer).threshold.Load())
- frdb.AncientStore.(*freezer).threshold.Store(threshold)
+ frdb.AncientStore.(*chainFreezer).threshold.Store(old)
+ }(frdb.AncientStore.(*chainFreezer).threshold.Load())
+ frdb.AncientStore.(*chainFreezer).threshold.Store(threshold)
// Trigger a freeze cycle and block until it's done
trigger := make(chan struct{}, 1)
- frdb.AncientStore.(*freezer).trigger <- trigger
+ frdb.AncientStore.(*chainFreezer).trigger <- trigger
<-trigger
return nil
}
@@ -120,7 +126,7 @@ func (db *nofreezedb) Sync() error {
return errNotSupported
}
-func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
+func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
@@ -136,6 +142,11 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (e
return fn(db)
}
+// AncientDatadir returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) AncientDatadir() (string, error) {
+ return "", errNotSupported
+}
+
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@@ -147,7 +158,7 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
// storage.
func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance
- frdb, err := newFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
+ frdb, err := newChainFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
if err != nil {
return nil, err
}
@@ -224,6 +235,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
}()
}
return &freezerdb{
+ ancientRoot: freezer, // o.AncientsDirectory
KeyValueStore: db,
AncientStore: frdb,
}, nil
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index a089a01c67..a9b86a3c1c 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -24,9 +24,7 @@ import (
"path/filepath"
"sync"
"sync/atomic"
- "time"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -53,14 +51,6 @@ var (
)
const (
- // freezerRecheckInterval is the frequency to check the key-value database for
- // chain progression that might permit new blocks to be frozen into immutable
- // storage.
- freezerRecheckInterval = time.Minute
-
- // freezerBatchLimit is the maximum number of blocks to freeze in one batch
- // before doing an fsync and deleting it from the key-value store.
- freezerBatchLimit = 30000
// freezerTableSize defines the maximum size of freezer data files.
freezerTableSize = 2 * 1000 * 1000 * 1000
@@ -69,14 +59,16 @@ const (
// freezer is an memory mapped append-only database to store immutable chain data
// into flat files:
//
-// - The append only nature ensures that disk writes are minimized.
-// - The memory mapping ensures we can max out system memory for caching without
-// reserving it for go-ethereum. This would also reduce the memory requirements
-// of Geth, and thus also GC overhead.
-type freezer struct {
+// - The append only nature ensures that disk writes are minimized.
+// - The memory mapping ensures we can max out system memory for caching without
+// reserving it for go-ethereum. This would also reduce the memory requirements
+// of Geth, and thus also GC overhead.
+type Freezer struct {
frozen atomic.Uint64 // Number of items already frozen
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
+ datadir string // Path of root directory of ancient store
+
// This lock synchronizes writers and the truncate operation, as well as
// the "atomic" (batched) read operations.
writeLock sync.RWMutex
@@ -98,7 +90,7 @@ type freezer struct {
//
// The 'tables' argument defines the data tables. If the value of a map
// entry is true, snappy compression is disabled for the table.
-func newFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*freezer, error) {
+func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*Freezer, error) {
// Create the initial freezer object
var (
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
@@ -119,12 +111,13 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
return nil, err
}
// Open all the supported data tables
- freezer := &freezer{
+ freezer := &Freezer{
readonly: readonly,
tables: make(map[string]*freezerTable),
instanceLock: lock,
trigger: make(chan chan struct{}),
quit: make(chan struct{}),
+ datadir: datadir,
}
freezer.threshold.Store(params.FullImmutabilityThreshold)
@@ -158,7 +151,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
}
// Close terminates the chain freezer, unmapping all the data files.
-func (f *freezer) Close() error {
+func (f *Freezer) Close() error {
f.writeLock.Lock()
defer f.writeLock.Unlock()
@@ -184,7 +177,7 @@ func (f *freezer) Close() error {
// HasAncient returns an indicator whether the specified ancient data exists
// in the freezer.
-func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
+func (f *Freezer) HasAncient(kind string, number uint64) (bool, error) {
if table := f.tables[kind]; table != nil {
return table.has(number), nil
}
@@ -192,7 +185,7 @@ func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
}
// Ancient retrieves an ancient binary blob from the append-only immutable files.
-func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
+func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
if table := f.tables[kind]; table != nil {
return table.Retrieve(number)
}
@@ -201,10 +194,10 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return
-// - at most 'max' items,
-// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
-// return as many items as fit into maxByteSize.
-func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
+// - at most 'max' items,
+// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
+// return as many items as fit into maxByteSize.
+func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes)
}
@@ -212,12 +205,12 @@ func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]
}
// Ancients returns the length of the frozen items.
-func (f *freezer) Ancients() (uint64, error) {
+func (f *Freezer) Ancients() (uint64, error) {
return f.frozen.Load(), nil
}
// AncientSize returns the ancient size of the specified category.
-func (f *freezer) AncientSize(kind string) (uint64, error) {
+func (f *Freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields.
// Speed doesn't matter here, AncientSize is for debugging.
f.writeLock.RLock()
@@ -231,14 +224,14 @@ func (f *freezer) AncientSize(kind string) (uint64, error) {
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
-func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
+func (f *Freezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
return fn(f)
}
// ModifyAncients runs the given write operation.
-func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
+func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
if f.readonly {
return 0, errReadOnly
}
@@ -272,7 +265,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
}
// TruncateAncients discards any recent data above the provided threshold number.
-func (f *freezer) TruncateAncients(items uint64) error {
+func (f *Freezer) TruncateAncients(items uint64) error {
if f.readonly {
return errReadOnly
}
@@ -292,7 +285,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
}
// Sync flushes all data tables to disk.
-func (f *freezer) Sync() error {
+func (f *Freezer) Sync() error {
var errs []error
for _, table := range f.tables {
if err := table.Sync(); err != nil {
@@ -306,7 +299,7 @@ func (f *freezer) Sync() error {
}
// repair truncates all data tables to the same length.
-func (f *freezer) repair() error {
+func (f *Freezer) repair() error {
min := uint64(math.MaxUint64)
for _, table := range f.tables {
items := table.items.Load()
@@ -323,223 +316,7 @@ func (f *freezer) repair() error {
return nil
}
-// freeze is a background thread that periodically checks the blockchain for any
-// import progress and moves ancient data from the fast database into the freezer.
-//
-// This functionality is deliberately broken off from block importing to avoid
-// incurring additional data shuffling delays on block propagation.
-func (f *freezer) freeze(db ethdb.KeyValueStore) {
- nfdb := &nofreezedb{KeyValueStore: db}
-
- var (
- backoff bool
- triggered chan struct{} // Used in tests
- )
- for {
- select {
- case <-f.quit:
- log.Info("Freezer shutting down")
- return
- default:
- }
- if backoff {
- // If we were doing a manual trigger, notify it
- if triggered != nil {
- triggered <- struct{}{}
- triggered = nil
- }
- select {
- case <-time.NewTimer(freezerRecheckInterval).C:
- backoff = false
- case triggered = <-f.trigger:
- backoff = false
- case <-f.quit:
- return
- }
- }
- // Retrieve the freezing threshold.
- hash := ReadHeadBlockHash(nfdb)
- if hash == (common.Hash{}) {
- log.Debug("Current full block hash unavailable") // new chain, empty database
- backoff = true
- continue
- }
- number := ReadHeaderNumber(nfdb, hash)
- threshold := f.threshold.Load()
-
- switch {
- case number == nil:
- log.Error("Current full block number unavailable", "hash", hash)
- backoff = true
- continue
-
- case *number < threshold:
- log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
- backoff = true
- continue
-
- case *number-threshold <= f.frozen.Load():
- log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen.Load())
- backoff = true
- continue
- }
- head := ReadHeader(nfdb, hash, *number)
- if head == nil {
- log.Error("Current full block unavailable", "number", *number, "hash", hash)
- backoff = true
- continue
- }
-
- // Seems we have data ready to be frozen, process in usable batches
- var (
- start = time.Now()
- first, _ = f.Ancients()
- limit = *number - threshold
- )
- if limit-first > freezerBatchLimit {
- limit = first + freezerBatchLimit
- }
- ancients, err := f.freezeRange(nfdb, first, limit)
- if err != nil {
- log.Error("Error in block freeze operation", "err", err)
- backoff = true
- continue
- }
-
- // Batch of blocks have been frozen, flush them before wiping from leveldb
- if err := f.Sync(); err != nil {
- log.Crit("Failed to flush frozen tables", "err", err)
- }
-
- // Wipe out all data from the active database
- batch := db.NewBatch()
- for i := 0; i < len(ancients); i++ {
- // Always keep the genesis block in active database
- if first+uint64(i) != 0 {
- DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
- DeleteCanonicalHash(batch, first+uint64(i))
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete frozen canonical blocks", "err", err)
- }
- batch.Reset()
-
- // Wipe out side chains also and track dangling side chains
- var dangling []common.Hash
- for number := first; number < f.frozen.Load(); number++ {
- // Always keep the genesis block in active database
- if number != 0 {
- dangling = ReadAllHashes(db, number)
- for _, hash := range dangling {
- log.Trace("Deleting side chain", "number", number, "hash", hash)
- DeleteBlock(batch, hash, number)
- }
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete frozen side blocks", "err", err)
- }
- batch.Reset()
-
- // Step into the future and delete and dangling side chains
- tip := f.frozen.Load()
- if tip > 0 {
- for len(dangling) > 0 {
- drop := make(map[common.Hash]struct{})
- for _, hash := range dangling {
- log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
- drop[hash] = struct{}{}
- }
- children := ReadAllHashes(db, tip)
- for i := 0; i < len(children); i++ {
- // Dig up the child and ensure it's dangling
- child := ReadHeader(nfdb, children[i], tip)
- if child == nil {
- log.Error("Missing dangling header", "number", tip, "hash", children[i])
- continue
- }
- if _, ok := drop[child.ParentHash]; !ok {
- children = append(children[:i], children[i+1:]...)
- i--
- continue
- }
- // Delete all block data associated with the child
- log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
- DeleteBlock(batch, children[i], tip)
- }
- dangling = children
- tip++
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to delete dangling side blocks", "err", err)
- }
- }
-
- // Log something friendly for the user
- context := []interface{}{
- "blocks", f.frozen.Load() - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen.Load() - 1,
- }
- if n := len(ancients); n > 0 {
- context = append(context, []interface{}{"hash", ancients[n-1]}...)
- }
- log.Info("Deep froze chain segment", context...)
-
- // Avoid database thrashing with tiny writes
- if f.frozen.Load()-first < freezerBatchLimit {
- backoff = true
- }
- }
-}
-
-func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
- hashes = make([]common.Hash, 0, limit-number)
-
- _, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
- for ; number <= limit; number++ {
- // Retrieve all the components of the canonical block.
- hash := ReadCanonicalHash(nfdb, number)
- if hash == (common.Hash{}) {
- return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
- }
- header := ReadHeaderRLP(nfdb, hash, number)
- if len(header) == 0 {
- return fmt.Errorf("block header missing, can't freeze block %d", number)
- }
- body := ReadBodyRLP(nfdb, hash, number)
- if len(body) == 0 {
- return fmt.Errorf("block body missing, can't freeze block %d", number)
- }
- receipts := ReadReceiptsRLP(nfdb, hash, number)
- if len(receipts) == 0 {
- return fmt.Errorf("block receipts missing, can't freeze block %d", number)
- }
- td := ReadTdRLP(nfdb, hash, number)
- if len(td) == 0 {
- return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
- }
-
- // Write to the batch.
- if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
- return fmt.Errorf("can't write hash to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
- return fmt.Errorf("can't write header to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
- return fmt.Errorf("can't write body to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
- return fmt.Errorf("can't write receipts to freezer: %v", err)
- }
- if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil {
- return fmt.Errorf("can't write td to freezer: %v", err)
- }
-
- hashes = append(hashes, hash)
- }
- return nil
- })
-
- return hashes, err
+// AncientDatadir returns the root directory path of the ancient store.
+func (f *Freezer) AncientDatadir() (string, error) {
+ return f.datadir, nil
}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index 476079c5a1..dfb16a58e1 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -33,7 +33,7 @@ type freezerBatch struct {
tables map[string]*freezerTableBatch
}
-func newFreezerBatch(f *freezer) *freezerBatch {
+func newFreezerBatch(f *Freezer) *freezerBatch {
batch := &freezerBatch{tables: make(map[string]*freezerTableBatch, len(f.tables))}
for kind, table := range f.tables {
batch.tables[kind] = table.newBatch()
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index fa84f80306..474650e00d 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -115,7 +115,7 @@ func TestFreezerModifyRollback(t *testing.T) {
// Reopen and check that the rolled-back data doesn't reappear.
tables := map[string]bool{"test": true}
- f2, err := newFreezer(dir, "", false, 2049, tables)
+ f2, err := NewFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatalf("can't reopen freezer after failed ModifyAncients: %v", err)
}
@@ -253,7 +253,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
}
}
-func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
+func newFreezerForTesting(t *testing.T, tables map[string]bool) (*Freezer, string) {
t.Helper()
dir, err := ioutil.TempDir("", "freezer")
@@ -262,7 +262,7 @@ func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, strin
}
// note: using low max table size here to ensure the tests actually
// switch between multiple files.
- f, err := newFreezer(dir, "", false, 2049, tables)
+ f, err := NewFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatal("can't open freezer", err)
}
@@ -270,7 +270,7 @@ func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, strin
}
// checkAncientCount verifies that the freezer contains n items.
-func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
+func checkAncientCount(t *testing.T, f *Freezer, kind string, n uint64) {
t.Helper()
if frozen, _ := f.Ancients(); frozen != n {
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 253ed51455..05910a0557 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -85,7 +85,7 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro
return t.db.ModifyAncients(fn)
}
-func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
+func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
return t.db.ReadAncients(fn)
}
@@ -101,6 +101,11 @@ func (t *table) Sync() error {
return t.db.Sync()
}
+// AncientDatadir returns the ancient datadir of the underlying database.
+func (t *table) AncientDatadir() (string, error) {
+ return t.db.AncientDatadir()
+}
+
// Put inserts the given value into the database at a prefixed version of the
// provided key.
func (t *table) Put(key []byte, value []byte) error {
diff --git a/ethdb/database.go b/ethdb/database.go
index 1057636762..913cfc7880 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -38,7 +38,7 @@ type KeyValueWriter interface {
}
// Stater wraps the Stat method of a backing data store.
-type Stater interface {
+type KeyValueStater interface {
// Stat returns a particular internal stat of the database.
Stat(property string) (string, error)
}
@@ -60,16 +60,16 @@ type Compacter interface {
type KeyValueStore interface {
KeyValueReader
KeyValueWriter
+ KeyValueStater
Batcher
Iteratee
- Stater
Compacter
Snapshotter
io.Closer
}
// AncientReader contains the methods required to read from immutable ancient data.
-type AncientReader interface {
+type AncientReaderOp interface {
// HasAncient returns an indicator whether the specified data exists in the
// ancient store.
HasAncient(kind string, number uint64) (bool, error)
@@ -91,13 +91,13 @@ type AncientReader interface {
AncientSize(kind string) (uint64, error)
}
-// AncientBatchReader is the interface for 'batched' or 'atomic' reading.
-type AncientBatchReader interface {
- AncientReader
+// AncientReader is the extended ancient reader interface including 'batched' or 'atomic' reading.
+type AncientReader interface {
+ AncientReaderOp
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
- ReadAncients(fn func(AncientReader) error) (err error)
+ ReadAncients(fn func(AncientReaderOp) error) (err error)
}
// AncientWriter contains the methods required to write to immutable ancient data.
@@ -123,11 +123,17 @@ type AncientWriteOp interface {
AppendRaw(kind string, number uint64, item []byte) error
}
+// AncientStater wraps the Stat method of a backing data store.
+type AncientStater interface {
+ // AncientDatadir returns the root directory path of the ancient store.
+ AncientDatadir() (string, error)
+}
+
// Reader contains the methods required to read data from both key-value as well as
// immutable ancient data.
type Reader interface {
KeyValueReader
- AncientBatchReader
+ AncientReader
}
// Writer contains the methods required to write data to both key-value as well as
@@ -140,11 +146,19 @@ type Writer interface {
// AncientStore contains all the methods required to allow handling different
// ancient data stores backing immutable chain data store.
type AncientStore interface {
- AncientBatchReader
+ AncientReader
AncientWriter
+ AncientStater
io.Closer
}
+// Stater contains the methods required to retrieve states from both key-value as well as
+// immutable ancient data.
+type Stater interface {
+ KeyValueStater
+ AncientStater
+}
+
// Database contains all the methods required by the high level database to not
// only access the key-value data store but also the chain freezer.
type Database interface {
From afc506084ab34ab4964fd9a7c7066cbcaeefa038 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 13 Sep 2024 15:49:40 +0700
Subject: [PATCH 08/41] Change ancient chain segments from root ancient to sub
folders (#572)
* cmd, core, ethdb, node: rework ancient store folder reference by https://github.com/ethereum/go-ethereum/commit/e44d6551c3c872584722c366c863381f7e91df91
---
cmd/ronin/dbcmd.go | 51 +++++++++---------
cmd/utils/flags.go | 2 +-
core/rawdb/accessors_chain.go | 24 ++++-----
core/rawdb/ancient_scheme.go | 89 ++++++++++++++++++++++++++++++++
core/rawdb/chain_freezer.go | 10 ++--
core/rawdb/chain_iterator.go | 2 +-
core/rawdb/database.go | 35 +++++++++++--
core/rawdb/freezer.go | 8 ---
core/rawdb/freezer_table.go | 6 +--
core/rawdb/freezer_table_test.go | 2 +-
core/rawdb/schema.go | 27 ----------
ethdb/database.go | 1 -
node/node.go | 22 ++++----
13 files changed, 179 insertions(+), 100 deletions(-)
create mode 100644 core/rawdb/ancient_scheme.go
diff --git a/cmd/ronin/dbcmd.go b/cmd/ronin/dbcmd.go
index ee25dad8ed..c4793b25e3 100644
--- a/cmd/ronin/dbcmd.go
+++ b/cmd/ronin/dbcmd.go
@@ -23,7 +23,6 @@ import (
"os"
"os/signal"
"path/filepath"
- "sort"
"strconv"
"strings"
"syscall"
@@ -197,8 +196,8 @@ WARNING: This is a low-level operation which may cause database corruption!`,
dbDumpFreezerIndex = &cli.Command{
Action: freezerInspect,
Name: "freezer-index",
- Usage: "Dump out the index of a given freezer type",
- ArgsUsage: " ",
+ Usage: "Dump out the index of a specific freezer table",
+ ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.DBEngineFlag,
@@ -311,7 +310,7 @@ func inspect(ctx *cli.Context) error {
start []byte
)
if ctx.NArg() > 2 {
- return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
+ return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
}
if ctx.NArg() >= 1 {
if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
@@ -519,25 +518,19 @@ func dbDumpTrie(ctx *cli.Context) error {
func freezerInspect(ctx *cli.Context) error {
var (
- start, end int64
- disableSnappy bool
- err error
+ start, end int64
+ err error
)
- if ctx.NArg() < 3 {
+ if ctx.NArg() < 4 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
- kind := ctx.Args().Get(0)
- if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
- var options []string
- for opt := range rawdb.FreezerNoSnappy {
- options = append(options, opt)
- }
- sort.Strings(options)
- return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
- } else {
- disableSnappy = noSnap
- }
- if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
+
+ var (
+ freezerType = ctx.Args().Get(0)
+ tableType = ctx.Args().Get(1)
+ )
+
+ if start, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
log.Info("Could read start-param", "error", err)
return err
}
@@ -545,16 +538,20 @@ func freezerInspect(ctx *cli.Context) error {
log.Info("Could read count param", "error", err)
return err
}
+
stack, _ := makeConfigNode(ctx)
defer stack.Close()
- path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
- log.Info("Opening freezer", "location", path, "name", kind)
- if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
- return err
- } else {
- f.DumpIndex(start, end)
+ // Open the Freezer Database with mode read-only
+ db := utils.MakeChainDatabase(ctx, stack, true)
+ defer db.Close()
+
+ ancient, err := db.AncientDatadir()
+ if err != nil {
+ log.Info("Failed to retrive ancient root", "err", err)
}
- return nil
+
+ return rawdb.InspectFreezerTable(ancient, freezerType, tableType, start, end)
+
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 1a5377eeb0..5c254ad413 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -92,7 +92,7 @@ var (
}
AncientFlag = &flags.DirectoryFlag{
Name: "datadir.ancient",
- Usage: "Data directory for ancient chain segments (default = inside chaindata)",
+ Usage: "Root directory for ancient data (default = inside chaindata)",
Category: flags.EthCategory,
}
MinFreeDiskSpaceFlag = &flags.DirectoryFlag{
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 1aadd5ada1..4f1994bba3 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -38,7 +38,7 @@ import (
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
var data []byte
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- data, _ = reader.Ancient(freezerHashTable, number)
+ data, _ = reader.Ancient(chainFreezerHashTable, number)
if len(data) == 0 {
// Get it by hash from leveldb
data, _ = db.Get(headerHashKey(number))
@@ -305,7 +305,7 @@ func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValu
// First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains
// the canonical data.
- data, _ = reader.Ancient(freezerHeaderTable, number)
+ data, _ = reader.Ancient(chainFreezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return nil
}
@@ -381,7 +381,7 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
// isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set.
func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool {
- h, err := reader.Ancient(freezerHashTable, number)
+ h, err := reader.Ancient(chainFreezerHashTable, number)
if err != nil {
return false
}
@@ -397,7 +397,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
- data, _ = reader.Ancient(freezerBodiesTable, number)
+ data, _ = reader.Ancient(chainFreezerBodiesTable, number)
return nil
}
// If not, try reading from leveldb
@@ -412,7 +412,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
var data []byte
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
- data, _ = reader.Ancient(freezerBodiesTable, number)
+ data, _ = reader.Ancient(chainFreezerBodiesTable, number)
if len(data) > 0 {
return nil
}
@@ -505,7 +505,7 @@ func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
- data, _ = reader.Ancient(freezerDifficultyTable, number)
+ data, _ = reader.Ancient(chainFreezerDifficultyTable, number)
return nil
}
// If not, try reading from leveldb
@@ -565,7 +565,7 @@ func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawVa
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
- data, _ = reader.Ancient(freezerReceiptTable, number)
+ data, _ = reader.Ancient(chainFreezerReceiptTable, number)
return nil
}
// If not, try reading from leveldb
@@ -802,19 +802,19 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
num := block.NumberU64()
- if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil {
+ if err := op.AppendRaw(chainFreezerHashTable, num, block.Hash().Bytes()); err != nil {
return fmt.Errorf("can't add block %d hash: %v", num, err)
}
- if err := op.Append(freezerHeaderTable, num, header); err != nil {
+ if err := op.Append(chainFreezerHeaderTable, num, header); err != nil {
return fmt.Errorf("can't append block header %d: %v", num, err)
}
- if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil {
+ if err := op.Append(chainFreezerBodiesTable, num, block.Body()); err != nil {
return fmt.Errorf("can't append block body %d: %v", num, err)
}
- if err := op.Append(freezerReceiptTable, num, receipts); err != nil {
+ if err := op.Append(chainFreezerReceiptTable, num, receipts); err != nil {
return fmt.Errorf("can't append block %d receipts: %v", num, err)
}
- if err := op.Append(freezerDifficultyTable, num, td); err != nil {
+ if err := op.Append(chainFreezerDifficultyTable, num, td); err != nil {
return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
}
return nil
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
new file mode 100644
index 0000000000..627a611998
--- /dev/null
+++ b/core/rawdb/ancient_scheme.go
@@ -0,0 +1,89 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import "fmt"
+
+// The list of table names of chain freezer. (headers, hashes, bodies, difficulties)
+
+const (
+ // chainFreezerHeaderTable indicates the name of the freezer header table.
+ chainFreezerHeaderTable = "headers"
+
+ // chainFreezerHashTable indicates the name of the freezer canonical hash table.
+ chainFreezerHashTable = "hashes"
+
+ // chainFreezerBodiesTable indicates the name of the freezer block body table.
+ chainFreezerBodiesTable = "bodies"
+
+ // chainFreezerReceiptTable indicates the name of the freezer receipts table.
+ chainFreezerReceiptTable = "receipts"
+
+ // chainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
+ chainFreezerDifficultyTable = "diffs"
+)
+
+// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
+// Hashes and difficulties don't compress well.
+var chainFreezerNoSnappy = map[string]bool{
+ chainFreezerHeaderTable: false,
+ chainFreezerHashTable: true,
+ chainFreezerBodiesTable: false,
+ chainFreezerReceiptTable: false,
+ chainFreezerDifficultyTable: true,
+}
+
+// The list of identifiers of ancient stores. It can split more in the futures.
+var (
+ chainFreezerName = "chain" // the folder name of chain segment ancient store.
+)
+
+// freezers the collections of all builtin freezers.
+var freezers = []string{chainFreezerName}
+
+// InspectFreezerTable dumps out the index of a specific freezer table. The passed
+// ancient indicates the path of root ancient directory where the chain freezer can
+// be opened. Start and end specifiy the range for dumping out indexes.
+// Note this function can only used for debugging purpose.
+func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
+ var (
+ path string
+ tables map[string]bool
+ )
+
+ switch freezerName {
+ case chainFreezerName:
+ path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
+ default:
+ return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
+ }
+ noSnappy, exit := tables[tableName]
+ if !exit {
+ // If the tableName is not exit in the tables, return an error.
+ var names []string
+ for name := range tables {
+ names = append(names, name)
+ }
+ return fmt.Errorf("unknown table name, supported ones: %v", names)
+ }
+ table, err := newFreezerTable(path, tableName, noSnappy)
+ if err != nil {
+ return err
+ }
+ table.dumpIndexStdout(start, end)
+ return nil
+}
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
index b3fd9f2880..d6e3ac5646 100644
--- a/core/rawdb/chain_freezer.go
+++ b/core/rawdb/chain_freezer.go
@@ -270,19 +270,19 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash
}
// Write to the batch.
- if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
+ if err := op.AppendRaw(chainFreezerHashTable, number, hash[:]); err != nil {
return fmt.Errorf("can't write hash to freezer: %v", err)
}
- if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
+ if err := op.AppendRaw(chainFreezerHeaderTable, number, header); err != nil {
return fmt.Errorf("can't write header to freezer: %v", err)
}
- if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
+ if err := op.AppendRaw(chainFreezerBodiesTable, number, body); err != nil {
return fmt.Errorf("can't write body to freezer: %v", err)
}
- if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
+ if err := op.AppendRaw(chainFreezerReceiptTable, number, receipts); err != nil {
return fmt.Errorf("can't write receipts to freezer: %v", err)
}
- if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil {
+ if err := op.AppendRaw(chainFreezerDifficultyTable, number, td); err != nil {
return fmt.Errorf("can't write td to freezer: %v", err)
}
diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go
index 74ac5787b7..2cb57a04e4 100644
--- a/core/rawdb/chain_iterator.go
+++ b/core/rawdb/chain_iterator.go
@@ -50,7 +50,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
if i+count > frozen {
count = frozen - i
}
- data, err := db.AncientRange(freezerHashTable, i, count, 32*count)
+ data, err := db.AncientRange(chainFreezerHashTable, i, count, 32*count)
if err != nil {
log.Crit("Failed to init database from freezer", "err", err)
}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index ba91397cc8..14038cc077 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"os"
+ "path"
"path/filepath"
"time"
@@ -153,12 +154,36 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
return &nofreezedb{KeyValueStore: db}
}
+// resolveChainFreezerDir is a helper function which resolves the absolute path
+// of chain freezer by considering backward compatibility.
+func resolveChainFreezerDir(ancient string) string {
+ // Check if the chain freezer is already present in the specified
+ // sub folder, if not then two possiblities
+ // - chain freezer is not initialized
+ // - it's legacy location, chain freezer is present in the root ancient folder
+
+ freezer := path.Join(ancient, chainFreezerName)
+ if !common.FileExist(freezer) {
+ if !common.FileExist(ancient) {
+ // The entire ancient store is not initialized, still use the sub
+ // folder for initialization.
+ } else {
+ // Ancient root is already initialized, then we hold the assumption
+ // that chain freezer is also initialized and located in root folder.
+ // In this case fallback to legacy location.
+ freezer = ancient
+ log.Info("Found legacy ancient chain path", "location", ancient)
+ }
+ }
+ return freezer
+}
+
// NewDatabaseWithFreezer creates a high level database on top of a given key-
// value data store with a freezer moving immutable chain segments into cold
// storage.
-func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
+func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
// Create the idle freezer instance
- frdb, err := newChainFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
+ frdb, err := newChainFreezer(resolveChainFreezerDir(ancient), namespace, readonly, freezerTableSize, chainFreezerNoSnappy)
if err != nil {
return nil, err
}
@@ -189,7 +214,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// If the freezer already contains something, ensure that the genesis blocks
// match, otherwise we might mix up freezers across chains and destroy both
// the freezer and the key-value store.
- frgenesis, err := frdb.Ancient(freezerHashTable, 0)
+ frgenesis, err := frdb.Ancient(chainFreezerHashTable, 0)
if err != nil {
return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
} else if !bytes.Equal(kvgenesis, frgenesis) {
@@ -235,7 +260,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
}()
}
return &freezerdb{
- ancientRoot: freezer, // o.AncientsDirectory
+ ancientRoot: ancient, // o.AncientsDirectory
KeyValueStore: db,
AncientStore: frdb,
}, nil
@@ -517,7 +542,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
}
// Inspect append-only file store then.
ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
- for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
+ for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
if size, err := db.AncientSize(category); err == nil {
*ancientSizes[i] += common.StorageSize(size)
total += common.StorageSize(size)
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index a9b86a3c1c..df92836668 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -67,8 +67,6 @@ type Freezer struct {
frozen atomic.Uint64 // Number of items already frozen
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
- datadir string // Path of root directory of ancient store
-
// This lock synchronizes writers and the truncate operation, as well as
// the "atomic" (batched) read operations.
writeLock sync.RWMutex
@@ -117,7 +115,6 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
instanceLock: lock,
trigger: make(chan chan struct{}),
quit: make(chan struct{}),
- datadir: datadir,
}
freezer.threshold.Store(params.FullImmutabilityThreshold)
@@ -315,8 +312,3 @@ func (f *Freezer) repair() error {
f.frozen.Store(min)
return nil
}
-
-// AncientDatadir returns the root directory path of the ancient store.
-func (f *Freezer) AncientDatadir() (string, error) {
- return f.datadir, nil
-}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index c6590d2442..d15d443943 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -112,8 +112,8 @@ type freezerTable struct {
lock sync.RWMutex // Mutex protecting the data file descriptors
}
-// NewFreezerTable opens the given path as a freezer table.
-func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
+// newFreezerTable opens the given path as a freezer table.
+func newFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
}
@@ -704,7 +704,7 @@ func (t *freezerTable) Sync() error {
// DumpIndex is a debug print utility function, mainly for testing. It can also
// be used to analyse a live freezer table index.
-func (t *freezerTable) DumpIndex(start, stop int64) {
+func (t *freezerTable) dumpIndexStdout(start, stop int64) {
t.dumpIndex(os.Stdout, start, stop)
}
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index 0401b48a6d..57ddde49f7 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -732,7 +732,7 @@ func TestSequentialRead(t *testing.T) {
}
// Write 15 bytes 30 times
writeChunks(t, f, 30, 15)
- f.DumpIndex(0, 30)
+ f.dumpIndexStdout(0, 30)
f.Close()
}
{ // Open it, iterate, verify iteration
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 6c9544ad95..d92a0de0f9 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -114,33 +114,6 @@ var (
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
)
-const (
- // freezerHeaderTable indicates the name of the freezer header table.
- freezerHeaderTable = "headers"
-
- // freezerHashTable indicates the name of the freezer canonical hash table.
- freezerHashTable = "hashes"
-
- // freezerBodiesTable indicates the name of the freezer block body table.
- freezerBodiesTable = "bodies"
-
- // freezerReceiptTable indicates the name of the freezer receipts table.
- freezerReceiptTable = "receipts"
-
- // freezerDifficultyTable indicates the name of the freezer total difficulty table.
- freezerDifficultyTable = "diffs"
-)
-
-// FreezerNoSnappy configures whether compression is disabled for the ancient-tables.
-// Hashes and difficulties don't compress well.
-var FreezerNoSnappy = map[string]bool{
- freezerHeaderTable: false,
- freezerHashTable: true,
- freezerBodiesTable: false,
- freezerReceiptTable: false,
- freezerDifficultyTable: true,
-}
-
// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
// fields.
type LegacyTxLookupEntry struct {
diff --git a/ethdb/database.go b/ethdb/database.go
index 913cfc7880..3ddfbbac08 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -148,7 +148,6 @@ type Writer interface {
type AncientStore interface {
AncientReader
AncientWriter
- AncientStater
io.Closer
}
diff --git a/node/node.go b/node/node.go
index 2cf85519f6..b7000b339c 100644
--- a/node/node.go
+++ b/node/node.go
@@ -602,7 +602,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r
// also attaching a chain freezer to it that moves ancient chain data from the
// database to immutable append-only files. If the node is an ephemeral one, a
// memory database is returned.
-func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer, namespace string, readonly bool) (ethdb.Database, error) {
+func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient string, namespace string, readonly bool) (ethdb.Database, error) {
n.lock.Lock()
defer n.lock.Unlock()
if n.state == closedState {
@@ -613,17 +613,10 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer,
if n.config.DataDir == "" {
db = rawdb.NewMemoryDatabase()
} else {
- root := n.ResolvePath(name)
- switch {
- case freezer == "":
- freezer = filepath.Join(root, "ancient")
- case !filepath.IsAbs(freezer):
- freezer = n.ResolvePath(freezer)
- }
db, err = rawdb.Open(rawdb.OpenOptions{
Type: n.config.DBEngine,
Directory: n.ResolvePath(name),
- AncientsDirectory: freezer,
+ AncientsDirectory: n.ResolveAncient(name, ancient),
Namespace: namespace,
Cache: cache,
Handles: handles,
@@ -637,6 +630,17 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, freezer,
return db, err
}
+// ResolveAncient returns the absolute path of the root ancient directory.
+func (n *Node) ResolveAncient(name string, ancient string) string {
+ switch {
+ case ancient == "": // Use the default ancient directory
+ ancient = filepath.Join(n.ResolvePath(name), "ancient")
+ case !filepath.IsAbs(ancient): // ancient is relative path to the instance directory
+ ancient = n.ResolvePath(ancient)
+ }
+ return ancient
+}
+
// ResolvePath returns the absolute path of a resource in the instance directory.
func (n *Node) ResolvePath(x string) string {
return n.config.ResolvePath(x)
From 468e247a58b5e3ea6b800b8c29b82773726ad9c9 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Mon, 16 Sep 2024 11:41:29 +0700
Subject: [PATCH 09/41] all: move genesis init to blockchain (#570)
* all: move genesis initialization to blockchain
* all: fix test
---
accounts/abi/bind/backends/simulated.go | 2 +-
cmd/utils/flags.go | 5 +-
consensus/clique/clique_test.go | 7 +-
consensus/clique/snapshot_test.go | 3 +-
consensus/consortium/v2/consortium_test.go | 49 +++++----
core/bench_test.go | 7 +-
core/block_validator_test.go | 8 +-
core/blockchain.go | 14 ++-
core/blockchain_repair_test.go | 14 +--
core/blockchain_sethead_test.go | 5 +-
core/blockchain_snapshot_test.go | 35 ++++---
core/blockchain_test.go | 112 +++++++++++----------
core/chain_makers_test.go | 2 +-
core/dao_test.go | 35 +++++--
core/genesis.go | 20 ++++
core/genesis_test.go | 2 +-
core/headerchain_test.go | 3 +-
core/state_processor_test.go | 14 +--
core/vote/vote_pool_test.go | 21 ++--
eth/backend.go | 22 ++--
eth/gasprice/gasprice_test.go | 2 +-
eth/handler_eth_test.go | 4 +-
eth/handler_test.go | 9 +-
eth/protocols/eth/handler_test.go | 7 +-
eth/tracers/api_test.go | 2 +-
internal/ethapi/api_test.go | 2 +-
light/odr_test.go | 3 +-
light/trie_test.go | 3 +-
light/txpool_test.go | 3 +-
miner/miner_test.go | 12 +--
miner/worker_test.go | 4 +-
params/config.go | 30 ++++++
tests/block_test_util.go | 5 +-
tests/fuzzers/les/les-fuzzer.go | 2 +-
34 files changed, 290 insertions(+), 178 deletions(-)
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 999365734c..96e85a9fa5 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -78,7 +78,7 @@ type SimulatedBackend struct {
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
genesis.MustCommit(database)
- blockchain, _ := core.NewBlockChain(database, nil, genesis.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := core.NewBlockChain(database, nil, &genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
backend := &SimulatedBackend{
database: database,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 5c254ad413..976c8191ce 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -2272,7 +2272,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chainDb ethdb.Database) {
var err error
chainDb = MakeChainDatabase(ctx, stack, false) // TODO(rjl493456442) support read-only database
- config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx), false)
+ gpec := MakeGenesis(ctx)
+ config, err := core.LoadChainConfig(chainDb, gpec)
if err != nil {
Fatalf("%v", err)
}
@@ -2331,7 +2332,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
// TODO(rjl493456442) disable snapshot generation/wiping if the chain is read only.
// Disable transaction indexing/unindexing by default.
- chain, err = core.NewBlockChain(chainDb, cache, config, engine, vmcfg, nil, nil)
+ chain, err = core.NewBlockChain(chainDb, cache, gpec, nil, engine, vmcfg, nil, nil)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index 1e11fe0d70..b82ea54b71 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -45,6 +45,7 @@ func TestReimportMirroredState(t *testing.T) {
signer = new(types.HomesteadSigner)
)
genspec := &core.Genesis{
+ Config: params.AllCliqueProtocolChanges,
ExtraData: make([]byte, extraVanity+common.AddressLength+extraSeal),
Alloc: map[common.Address]core.GenesisAccount{
addr: {Balance: big.NewInt(10000000000000000)},
@@ -55,7 +56,7 @@ func TestReimportMirroredState(t *testing.T) {
genesis := genspec.MustCommit(db)
// Generate a batch of blocks, each properly signed
- chain, _ := core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
defer chain.Stop()
blocks, _ := core.GenerateChain(params.AllCliqueProtocolChanges, genesis, engine, db, 3, func(i int, block *core.BlockGen) {
@@ -89,7 +90,7 @@ func TestReimportMirroredState(t *testing.T) {
db = rawdb.NewMemoryDatabase()
genspec.MustCommit(db)
- chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[:2], nil); err != nil {
@@ -102,7 +103,7 @@ func TestReimportMirroredState(t *testing.T) {
// Simulate a crash by creating a new chain on top of the database, without
// flushing the dirty states out. Insert the last block, triggering a sidechain
// reimport.
- chain, _ = core.NewBlockChain(db, nil, params.AllCliqueProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
defer chain.Stop()
if _, err := chain.InsertChain(blocks[2:], nil); err != nil {
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index 3a944d67cc..e10baea556 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -411,6 +411,7 @@ func TestClique(t *testing.T) {
Period: 1,
Epoch: tt.epoch,
}
+ genesis.Config = &config
engine := New(config.Clique, db)
engine.fakeDiff = true
@@ -450,7 +451,7 @@ func TestClique(t *testing.T) {
batches[len(batches)-1] = append(batches[len(batches)-1], block)
}
// Pass all the headers through clique and ensure tallying succeeds
- chain, err := core.NewBlockChain(db, nil, &config, engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(db, nil, genesis, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Errorf("test %d: failed to create test chain: %v", i, err)
continue
diff --git a/consensus/consortium/v2/consortium_test.go b/consensus/consortium/v2/consortium_test.go
index 197b6f94a4..f80c86ca52 100644
--- a/consensus/consortium/v2/consortium_test.go
+++ b/consensus/consortium/v2/consortium_test.go
@@ -1437,11 +1437,12 @@ func TestVerifyVote(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: params.TestChainConfig,
BaseFee: big.NewInt(params.InitialBaseFee),
- }).MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ }
+ genesis := gspec.MustCommit(db)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true)
if _, err := chain.InsertChain(bs[:], nil); err != nil {
@@ -1568,9 +1569,10 @@ func TestKnownBlockReorg(t *testing.T) {
},
}
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: &chainConfig,
- }).MustCommit(db)
+ }
+ genesis := gspec.MustCommit(db)
mock := &mockContract{
validators: make(map[common.Address]blsCommon.PublicKey),
@@ -1588,7 +1590,7 @@ func TestKnownBlockReorg(t *testing.T) {
db: db,
}
- chain, _ := core.NewBlockChain(db, nil, &chainConfig, &v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, &v2, vm.Config{}, nil, nil)
extraData := [consortiumCommon.ExtraVanity + consortiumCommon.ExtraSeal]byte{}
blocks, _ := core.GenerateConsortiumChain(
@@ -1815,13 +1817,14 @@ func TestUpgradeRoninTrustedOrg(t *testing.T) {
},
}
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: &chainConfig,
Alloc: core.GenesisAlloc{
// Make proxy address non-empty to avoid being deleted
common.Address{0x10}: core.GenesisAccount{Balance: common.Big1},
},
- }).MustCommit(db)
+ }
+ genesis := gspec.MustCommit(db)
mock := &mockContract{
validators: map[common.Address]blsCommon.PublicKey{
@@ -1841,7 +1844,7 @@ func TestUpgradeRoninTrustedOrg(t *testing.T) {
},
}
- chain, _ := core.NewBlockChain(db, nil, &chainConfig, &v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, &v2, vm.Config{}, nil, nil)
extraData := [consortiumCommon.ExtraVanity + consortiumCommon.ExtraSeal]byte{}
parent := genesis
@@ -1960,9 +1963,10 @@ func TestUpgradeAxieProxyCode(t *testing.T) {
Code: code,
},
}
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: chainConfig,
- }).MustCommit(db)
+ }
+ genesis := gspec.MustCommit(db)
mock := &mockTrippContract{
checkpointValidators: []validatorWithBlsWeight{
validatorWithBlsWeight{
@@ -1989,7 +1993,7 @@ func TestUpgradeAxieProxyCode(t *testing.T) {
testTrippEffective: true,
}
- chain, _ := core.NewBlockChain(db, nil, chainConfig, v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, v2, vm.Config{}, nil, nil)
extraData := &finality.HeaderExtraData{}
parent := genesis
@@ -2082,13 +2086,14 @@ func TestSystemTransactionOrder(t *testing.T) {
},
}
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: &chainConfig,
Alloc: core.GenesisAlloc{
// Make proxy address non-empty to avoid being deleted
common.Address{0x10}: core.GenesisAccount{Balance: common.Big1},
},
- }).MustCommit(db)
+ }
+ genesis := gspec.MustCommit(db)
mock := &mockContract{
validators: map[common.Address]blsCommon.PublicKey{
@@ -2108,7 +2113,7 @@ func TestSystemTransactionOrder(t *testing.T) {
},
}
- chain, _ := core.NewBlockChain(db, nil, &chainConfig, &v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, &v2, vm.Config{}, nil, nil)
extraData := [consortiumCommon.ExtraVanity + consortiumCommon.ExtraSeal]byte{}
signer := types.NewEIP155Signer(big.NewInt(2021))
@@ -2205,12 +2210,13 @@ func TestIsPeriodBlock(t *testing.T) {
RoninValidatorSet: common.HexToAddress("0xaa"),
},
}
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: &chainConfig,
BaseFee: big.NewInt(params.InitialBaseFee),
Timestamp: midnight, // genesis at day 1
- }).MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, &chainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ }
+ genesis := gspec.MustCommit(db)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
// create chain of up to 399 blocks, all of them are not period block
bs, _ := core.GenerateChain(&chainConfig, genesis, ethash.NewFaker(), db, 399, nil, true) // create chain of up to 399 blocks
if _, err := chain.InsertChain(bs[:], nil); err != nil {
@@ -2301,12 +2307,13 @@ func TestIsTrippEffective(t *testing.T) {
},
TrippPeriod: new(big.Int).SetUint64(now / dayInSeconds),
}
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: &chainConfig,
BaseFee: big.NewInt(params.InitialBaseFee),
Timestamp: midnight, // genesis at day 1
- }).MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, &chainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ }
+ genesis := gspec.MustCommit(db)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
// create chain of up to 399 blocks, all of them are not Tripp effective
bs, _ := core.GenerateChain(&chainConfig, genesis, ethash.NewFaker(), db, 399, nil, true)
if _, err := chain.InsertChain(bs[:], nil); err != nil {
diff --git a/core/bench_test.go b/core/bench_test.go
index 600fa2e4be..0fd7082e0c 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -201,7 +201,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
- chainman, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chainman, _ := NewBlockChain(db, nil, &gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -316,7 +316,10 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, &cacheConfig, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
+ gspec := &Genesis{
+ Config: params.TestChainConfig,
+ }
+ chain, err := NewBlockChain(db, &cacheConfig, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index dcfe9af5d9..2ba2cff31e 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -42,7 +42,7 @@ func TestHeaderVerification(t *testing.T) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(testdb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@@ -106,11 +106,11 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
var results <-chan error
if valid {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(testdb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
} else {
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(testdb, nil, gspec, nil, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
_, results = chain.engine.VerifyHeaders(chain, headers, seals)
chain.Stop()
}
@@ -173,7 +173,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
defer runtime.GOMAXPROCS(old)
// Start the verifications and immediately abort
- chain, _ := NewBlockChain(testdb, nil, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(testdb, nil, gspec, nil, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
defer chain.Stop()
abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
diff --git a/core/blockchain.go b/core/blockchain.go
index 86fb5764bc..b38826618c 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -243,7 +243,7 @@ type futureBlock struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator and
// Processor.
-func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
+func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, overrideArrowGlacier *big.Int, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) {
if cacheConfig == nil {
cacheConfig = defaultCacheConfig
}
@@ -260,6 +260,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
internalTxsCache, _ := lru.New[common.Hash, []*types.InternalTransaction](internalTxsCacheLimit)
blobSidecarsCache, _ := lru.New[common.Hash, types.BlobSidecars](blobSidecarsCacheLimit)
+ chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, genesis, overrideArrowGlacier, false)
+ if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
+ return nil, genesisErr
+ }
+ log.Info("Initialised chain configuration", "config", chainConfig)
bc := &BlockChain{
chainConfig: chainConfig,
@@ -449,7 +454,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
// load the latest dirty accounts stored from last stop to cache
bc.loadLatestDirtyAccounts()
-
+ // Rewind the chain in case of an incompatible config upgrade.
+ if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
+ log.Warn("Rewinding chain to upgrade configuration", "err", compat)
+ bc.SetHead(compat.RewindTo)
+ rawdb.WriteChainConfig(db, genesisHash, chainConfig)
+ }
return bc, nil
}
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index d5e092a472..9e100a854a 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1774,7 +1774,8 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
+ genesis = gspec.MustCommit(db)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
@@ -1787,7 +1788,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
- chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -1845,7 +1846,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close()
- chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -1910,7 +1911,8 @@ func TestIssue23496(t *testing.T) {
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
+ genesis = gspec.MustCommit(db)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
@@ -1920,7 +1922,7 @@ func TestIssue23496(t *testing.T) {
SnapshotWait: true,
}
)
- chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -1968,7 +1970,7 @@ func TestIssue23496(t *testing.T) {
}
defer db.Close()
- chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 02d1fea76b..11c7693436 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -1973,7 +1973,8 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
+ genesis = gspec.MustCommit(db)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
@@ -1986,7 +1987,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
config.SnapshotLimit = 256
config.SnapshotWait = true
}
- chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index 10d9f37ddf..e5affaab9b 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -75,7 +75,8 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
}
// Initialize a fresh chain
var (
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ gspec = &Genesis{Config: params.AllEthashProtocolChanges, BaseFee: big.NewInt(params.InitialBaseFee)}
+ genesis = gspec.MustCommit(db)
engine = ethash.NewFullFaker()
gendb = rawdb.NewMemoryDatabase()
@@ -84,7 +85,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
// will happen during the block insertion.
cacheConfig = defaultCacheConfig
)
- chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
@@ -227,7 +228,8 @@ func (snaptest *snapshotTest) test(t *testing.T) {
// Restart the chain normally
chain.Stop()
- newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.AllEthashProtocolChanges}
+ newchain, err := NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -268,13 +270,14 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
// the crash, we do restart twice here: one after the crash and one
// after the normal stop. It's used to ensure the broken snapshot
// can be detected all the time.
- newchain, err := NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.AllEthashProtocolChanges}
+ newchain, err := NewBlockChain(newdb, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newchain.Stop()
- newchain, err = NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(newdb, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -310,7 +313,8 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
}
- newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.AllEthashProtocolChanges}
+ newchain, err := NewBlockChain(snaptest.db, cacheConfig, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -318,7 +322,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
newchain.Stop()
// Restart the chain with enabling the snapshot
- newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -345,8 +349,8 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
// Rewind the chain if setHead operation is required.
chain.SetHead(snaptest.setHead)
chain.Stop()
-
- newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.AllEthashProtocolChanges}
+ newchain, err := NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -376,8 +380,8 @@ func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
// Firstly, stop the chain properly, with all snapshot journal
// and state committed.
chain.Stop()
-
- newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.AllEthashProtocolChanges}
+ newchain, err := NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -394,7 +398,7 @@ func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
// journal and latest state will be committed
// Restart the chain after the crash
- newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -429,7 +433,8 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
}
- newchain, err := NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.AllEthashProtocolChanges}
+ newchain, err := NewBlockChain(snaptest.db, config, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -445,13 +450,13 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
SnapshotLimit: 256,
SnapshotWait: false, // Don't wait rebuild
}
- newchain, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, config, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
// Simulate the blockchain crash.
- newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 265393a699..bbc3d38f4e 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -60,11 +60,12 @@ var (
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
var (
db = rawdb.NewMemoryDatabase()
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
+ genesis = gspec.MustCommit(db)
)
// Initialize a fresh chain with only a genesis block
- blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@@ -530,7 +531,8 @@ func testReorgBadHashes(t *testing.T, full bool) {
blockchain.Stop()
// Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: blockchain.chainConfig}
+ ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -643,7 +645,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks, nil); err != nil {
@@ -652,7 +654,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -676,7 +678,7 @@ func TestFastVsFullChains(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -798,7 +800,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
archiveCaching := *defaultCacheConfig
archiveCaching.TrieDirtyDisabled = true
- archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if n, err := archive.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@@ -811,7 +813,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, delfn := makeDb()
defer delfn()
- fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -831,7 +833,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, delfn := makeDb()
defer delfn()
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -850,7 +852,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a light node and ensure all pointers are updated
lightDb, delfn := makeDb()
defer delfn()
- light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -919,7 +921,7 @@ func TestChainTxReorgs(t *testing.T) {
}
}, true)
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if i, err := blockchain.InsertChain(chain, nil); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -989,7 +991,7 @@ func TestLogReorgs(t *testing.T) {
signer = types.LatestSigner(gspec.Config)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@@ -1042,7 +1044,7 @@ func TestLogRebirth(t *testing.T) {
genesis = gspec.MustCommit(db)
signer = types.LatestSigner(gspec.Config)
engine = ethash.NewFaker()
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -1105,7 +1107,7 @@ func TestSideLogRebirth(t *testing.T) {
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
genesis = gspec.MustCommit(db)
signer = types.LatestSigner(gspec.Config)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -1180,7 +1182,7 @@ func TestReorgSideEvent(t *testing.T) {
signer = types.LatestSigner(gspec.Config)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {}, true)
@@ -1312,7 +1314,7 @@ func TestEIP155Transition(t *testing.T) {
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
@@ -1420,7 +1422,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
)
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, block *BlockGen) {
@@ -1494,8 +1496,9 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
// and current header consistency
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1538,8 +1541,9 @@ func TestTrieForkGC(t *testing.T) {
// Import the canonical and fork chain side by side, forcing the trie cache to cache both
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1577,8 +1581,9 @@ func TestLargeReorgTrieGC(t *testing.T) {
// Import the shared chain and the original canonical one
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1639,7 +1644,7 @@ func TestBlockchainRecovery(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -1659,7 +1664,7 @@ func TestBlockchainRecovery(t *testing.T) {
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
// Reopen broken blockchain again
- ancient, _ = NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if num := ancient.CurrentBlock().NumberU64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
@@ -1711,7 +1716,7 @@ func TestInsertReceiptChainRollback(t *testing.T) {
}
gspec := Genesis{Config: params.AllEthashProtocolChanges, BaseFee: big.NewInt(params.InitialBaseFee)}
gspec.MustCommit(ancientDb)
- ancientChain, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancientChain, _ := NewBlockChain(ancientDb, nil, &gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancientChain.Stop()
// Import the canonical header chain.
@@ -1771,8 +1776,9 @@ func TestLowDiffLongChain(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1819,7 +1825,8 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*DefaultTriesInMemory, nil, true)
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.TestChainConfig}
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1916,8 +1923,9 @@ func testInsertKnownChainData(t *testing.T, typ string) {
}
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
defer os.RemoveAll(dir)
+ gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(chaindb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2025,8 +2033,9 @@ func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyCha
}, true)
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
}
@@ -2219,7 +2228,7 @@ func TestTransactionIndices(t *testing.T) {
// Import all blocks into ancient db
l := uint64(0)
- chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2244,7 +2253,7 @@ func TestTransactionIndices(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec.MustCommit(ancientDb)
- chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ chain, err = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2268,7 +2277,7 @@ func TestTransactionIndices(t *testing.T) {
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
for i, l := range limit {
- chain, err = NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ chain, err = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2346,7 +2355,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
// Import all blocks into ancient db, only HEAD-32 indices are kept.
l := uint64(32)
- chain, err := NewBlockChain(ancientDb, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, &l)
+ chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2410,7 +2419,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, &gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
b.Fatalf("failed to create tester chain: %v", err)
}
@@ -2492,7 +2501,8 @@ func TestSideImportPrunedBlocks(t *testing.T) {
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*DefaultTriesInMemory, nil, true)
diskdb := rawdb.NewMemoryDatabase()
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ gspec := &Genesis{Config: params.TestChainConfig}
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2586,7 +2596,7 @@ func TestDeleteCreateRevert(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2699,7 +2709,7 @@ func TestDeleteRecreateSlots(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, &chainConfig, engine, vm.Config{
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
Debug: true,
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -2781,7 +2791,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, &chainConfig, engine, vm.Config{
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
Debug: true,
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -2956,7 +2966,7 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, &chainConfig, engine, vm.Config{
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -3089,7 +3099,7 @@ func TestInitThenFailCreateContract(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -3179,7 +3189,7 @@ func TestEIP2718Transition(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, gspec.Config, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3274,7 +3284,7 @@ func TestEIP1559Transition(t *testing.T) {
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, gspec.Config, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3376,7 +3386,7 @@ func TestSponsoredTxTransitionBeforeMiko(t *testing.T) {
Config: &chainConfig,
}
genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, &chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -3455,7 +3465,7 @@ func TestSponsoredTxTransition(t *testing.T) {
},
}
genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, &chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -3744,7 +3754,7 @@ func TestTransientStorageReset(t *testing.T) {
})
// Initialize the blockchain with 1153 enabled.
- chain, err := NewBlockChain(db, nil, gspec.Config, engine, vmConfig, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vmConfig, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3835,7 +3845,7 @@ func TestEIP3651(t *testing.T) {
b.AddTx(tx)
})
- chain, err := NewBlockChain(db, nil, gspec.Config, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3939,7 +3949,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
},
}
genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4024,7 +4034,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4048,7 +4058,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4096,7 +4106,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4158,7 +4168,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db := rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4192,7 +4202,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4240,7 +4250,7 @@ func TestSidecarsPruning(t *testing.T) {
},
}
genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 968c0f069e..22b59e4754 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -79,7 +79,7 @@ func ExampleGenerateChain() {
}, true)
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain, nil); err != nil {
diff --git a/core/dao_test.go b/core/dao_test.go
index 08faacd388..adf3464bd3 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -30,32 +30,47 @@ import (
// blocks based on their extradata fields.
func TestDAOForkRangeExtradata(t *testing.T) {
forkBlock := big.NewInt(32)
+ chainConfig := *params.NonActivatedConfig
+ chainConfig.HomesteadBlock = big.NewInt(0)
// Generate a common prefix for both pro-forkers and non-forkers
db := rawdb.NewMemoryDatabase()
- gspec := &Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}
+ gspec := &Genesis{
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ Config: &chainConfig,
+ }
genesis := gspec.MustCommit(db)
- prefix, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {}, true)
+ prefix, _ := GenerateChain(&chainConfig, genesis, ethash.NewFaker(), db, int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {}, true)
// Create the concurrent, conflicting two nodes
proDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(proDb)
- proConf := *params.TestChainConfig
+ proConf := *params.NonActivatedConfig
+ proConf.HomesteadBlock = big.NewInt(0)
proConf.DAOForkBlock = forkBlock
proConf.DAOForkSupport = true
+ progspec := &Genesis{
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ Config: &proConf,
+ }
- proBc, _ := NewBlockChain(proDb, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil, nil)
+ proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer proBc.Stop()
conDb := rawdb.NewMemoryDatabase()
gspec.MustCommit(conDb)
- conConf := *params.TestChainConfig
+ conConf := *params.NonActivatedConfig
+ conConf.HomesteadBlock = big.NewInt(0)
conConf.DAOForkBlock = forkBlock
conConf.DAOForkSupport = false
+ congspec := &Genesis{
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ Config: &conConf,
+ }
- conBc, _ := NewBlockChain(conDb, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil, nil)
+ conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer conBc.Stop()
if _, err := proBc.InsertChain(prefix, nil); err != nil {
@@ -69,7 +84,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(db, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -94,7 +109,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = NewBlockChain(db, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
@@ -120,7 +135,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, &conConf, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(db, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -140,7 +155,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db = rawdb.NewMemoryDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, &proConf, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = NewBlockChain(db, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
diff --git a/core/genesis.go b/core/genesis.go
index 461ba2e211..ea0b88941c 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -331,6 +331,26 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
return newcfg, stored, nil
}
+// LoadChainConfig loads the stored chain config if the chain config
+// is already present in database, otherwise, return the config in the
+// provided genesis specification.
+func LoadChainConfig(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, error) {
+ if genesis != nil && genesis.Config == nil {
+ return params.AllEthashProtocolChanges, errGenesisNoConfig
+ }
+ stored := rawdb.ReadCanonicalHash(db, 0)
+ if (stored != common.Hash{}) {
+ storedcfg := rawdb.ReadChainConfig(db, stored)
+ if storedcfg != nil {
+ return storedcfg, nil
+ }
+ }
+ if genesis == nil {
+ genesis = DefaultGenesisBlock()
+ }
+ return genesis.Config, nil
+}
+
func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
switch {
case g != nil:
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 8399c377b2..7c3fc5352b 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -117,7 +117,7 @@ func TestSetupGenesis(t *testing.T) {
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(db, nil, &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil, true)
diff --git a/core/headerchain_test.go b/core/headerchain_test.go
index f3e40b6213..3dd7d8c624 100644
--- a/core/headerchain_test.go
+++ b/core/headerchain_test.go
@@ -71,7 +71,8 @@ func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus
func TestHeaderInsertion(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
- genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
+ genesis = gspec.MustCommit(db)
)
hc, err := NewHeaderChain(db, params.AllEthashProtocolChanges, ethash.NewFaker(), func() bool { return false })
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index e92f506c7d..30fdf55606 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -107,7 +107,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
@@ -242,7 +242,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
for i, tt := range []struct {
@@ -282,7 +282,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
for i, tt := range []struct {
@@ -336,7 +336,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
smallInitCode = [320]byte{}
)
@@ -399,7 +399,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
for i, tt := range []struct {
@@ -501,7 +501,7 @@ func TestBlobTxStateTransition(t *testing.T) {
)
gspec.Config.ConsortiumV2Block = common.Big0
gspec.Config.RoninTreasuryAddress = roninTreasuryAddress
- chain, _ := NewBlockChain(gendb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(gendb, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1, func(i int, block *BlockGen) {
blobHashes := make([]common.Hash, nBlobs)
for i := 0; i < nBlobs; i++ {
@@ -580,7 +580,7 @@ func TestBaseFee(t *testing.T) {
gspec.Config.ConsortiumV2Block = common.Big0
gspec.Config.RoninTreasuryAddress = roninTreasuryAddress
gspec.Config.VenokiBlock = common.Big0
- chain, _ := NewBlockChain(gendb, nil, &chainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(gendb, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1, func(i int, block *BlockGen) {
tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{
To: &addr,
diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go
index 695db84cb7..67dce2b77b 100644
--- a/core/vote/vote_pool_test.go
+++ b/core/vote/vote_pool_test.go
@@ -95,12 +95,13 @@ func testVotePool(t *testing.T, isValidRules bool) {
// Create a database pre-initialize with a genesis block
db := rawdb.NewMemoryDatabase()
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
- }).MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ }
+ genesis := gspec.MustCommit(db)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
mux := new(event.TypeMux)
mockEngine := &mockPOSA{}
@@ -390,12 +391,13 @@ func TestVotePoolDosProtection(t *testing.T) {
// Create a database pre-initialize with a genesis block
db := rawdb.NewMemoryDatabase()
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
- }).MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ }
+ genesis := gspec.MustCommit(db)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 25, nil, true)
if _, err := chain.InsertChain(bs[:1], nil); err != nil {
@@ -516,12 +518,13 @@ func TestVotePoolWrongTargetNumber(t *testing.T) {
// Create a database pre-initialize with a genesis block
db := rawdb.NewMemoryDatabase()
- genesis := (&core.Genesis{
+ gspec := &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
- }).MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ }
+ genesis := gspec.MustCommit(db)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true)
if _, err := chain.InsertChain(bs[:1], nil); err != nil {
diff --git a/eth/backend.go b/eth/backend.go
index 52f1b0f6bf..32e3f17331 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -140,11 +140,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier, false)
- if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
- return nil, genesisErr
- }
- log.Info("Initialised chain configuration", "config", chainConfig)
if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil {
log.Error("Failed to recover state", "error", err)
@@ -167,7 +162,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
log.Info("Unprotected transactions allowed")
}
ethAPI := ethapi.NewPublicBlockChainAPI(eth.APIBackend)
- eth.engine = ethconfig.CreateConsensusEngine(stack, chainConfig, ðashConfig, config.Miner.Notify, config.Miner.Noverify,
+ loadedChainConfig, err := core.LoadChainConfig(chainDb, config.Genesis)
+ if err != nil {
+ return nil, err
+ }
+ eth.engine = ethconfig.CreateConsensusEngine(stack, loadedChainConfig, ðashConfig, config.Miner.Notify, config.Miner.Noverify,
chainDb, ethAPI, config.SyncMode)
bcVersion := rawdb.ReadDatabaseVersion(chainDb)
@@ -204,7 +203,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
TriesInMemory: config.TriesInMemory,
}
)
- eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
+ eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, config.OverrideArrowGlacier, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
+ chainConfig := eth.blockchain.Config()
+ genesisHash := eth.blockchain.Genesis().Hash()
if err != nil {
return nil, err
}
@@ -219,13 +220,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
StartENRFilter(eth.blockchain, eth.p2pServer)
-
- // Rewind the chain in case of an incompatible config upgrade.
- if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
- log.Warn("Rewinding chain to upgrade configuration", "err", compat)
- eth.blockchain.SetHead(compat.RewindTo)
- rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
- }
eth.bloomIndexer.Start(eth.blockchain)
if config.BlobPool.Datadir != "" {
diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go
index b23877f653..e35f2eab20 100644
--- a/eth/gasprice/gasprice_test.go
+++ b/eth/gasprice/gasprice_test.go
@@ -142,7 +142,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
// Construct testing chain
diskdb := rawdb.NewMemoryDatabase()
gspec.MustCommit(diskdb)
- chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, &config, engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create local chain, %v", err)
}
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index c4f7ca1c8d..da85e829dc 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -120,8 +120,8 @@ func testForkIDSplit(t *testing.T, protocol uint) {
genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
genesisProFork = gspecProFork.MustCommit(doFork)
- chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil)
- chainProFork, _ = core.NewBlockChain(doFork, nil, configProFork, engine, vm.Config{}, nil, nil)
+ chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil)
+ chainProFork, _ = core.NewBlockChain(doFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil)
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil, true)
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, doFork, 2, nil, true)
diff --git a/eth/handler_test.go b/eth/handler_test.go
index b9f6e3928c..137cd52e97 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -143,12 +143,13 @@ func newTestHandler() *testHandler {
func newTestHandlerWithBlocks(blocks int) *testHandler {
// Create a database pre-initialize with a genesis block
db := rawdb.NewMemoryDatabase()
- (&core.Genesis{
+ gspec := &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
- }).MustCommit(db)
+ }
+ gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, nil, true)
if _, err := chain.InsertChain(bs, nil); err != nil {
@@ -222,7 +223,7 @@ func newTestHandlerWithBlocks100(blocks int) (*testHandler, []*types.BlobTxSidec
},
}
gspec.MustCommit(db)
- chain, err := core.NewBlockChain(db, nil, &chainConfig, engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
panic(err)
}
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index d6cc356587..c805f76c11 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -66,12 +66,13 @@ func newTestBackend(blocks int) *testBackend {
func newTestBackendWithGenerator(blocks int, generator func(int, *core.BlockGen)) *testBackend {
// Create a database pre-initialize with a genesis block
db := rawdb.NewMemoryDatabase()
- (&core.Genesis{
+ gspec := &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
- }).MustCommit(db)
+ }
+ gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, generator, true)
if _, err := chain.InsertChain(bs, nil); err != nil {
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index 8f46207fab..1b397d08f1 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -90,7 +90,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
}
//chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit
- chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec.Config, backend.engine, vm.Config{}, nil, nil)
+ chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index e4226fb304..dfbda7b515 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -428,7 +428,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
// Generate blocks for testing
db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator)
txlookupLimit := uint64(0)
- chain, err := core.NewBlockChain(db, cacheConfig, gspec.Config, engine, vm.Config{}, nil, &txlookupLimit)
+ chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, &txlookupLimit)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
diff --git a/light/odr_test.go b/light/odr_test.go
index e89d609391..9b9ab7e1c3 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -254,6 +254,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
sdb = rawdb.NewMemoryDatabase()
ldb = rawdb.NewMemoryDatabase()
gspec = core.Genesis{
+ Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
@@ -261,7 +262,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := core.NewBlockChain(sdb, nil, &gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, 4, testChainGen, true)
if _, err := blockchain.InsertChain(gchain, nil); err != nil {
t.Fatal(err)
diff --git a/light/trie_test.go b/light/trie_test.go
index c8ec1116fc..3e858c8ab7 100644
--- a/light/trie_test.go
+++ b/light/trie_test.go
@@ -38,13 +38,14 @@ func TestNodeIterator(t *testing.T) {
fulldb = rawdb.NewMemoryDatabase()
lightdb = rawdb.NewMemoryDatabase()
gspec = core.Genesis{
+ Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
genesis = gspec.MustCommit(fulldb)
)
gspec.MustCommit(lightdb)
- blockchain, _ := core.NewBlockChain(fulldb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := core.NewBlockChain(fulldb, nil, &gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), fulldb, 4, testChainGen, true)
if _, err := blockchain.InsertChain(gchain, nil); err != nil {
panic(err)
diff --git a/light/txpool_test.go b/light/txpool_test.go
index e47a649c65..3c377f7b00 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -84,6 +84,7 @@ func TestTxPool(t *testing.T) {
sdb = rawdb.NewMemoryDatabase()
ldb = rawdb.NewMemoryDatabase()
gspec = core.Genesis{
+ Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
@@ -91,7 +92,7 @@ func TestTxPool(t *testing.T) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, nil, params.TestChainConfig, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := core.NewBlockChain(sdb, nil, &gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, poolTestBlocks, txPoolTestChainGen, true)
if _, err := blockchain.InsertChain(gchain, nil); err != nil {
panic(err)
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 1bd0f816d5..1b6eeecd66 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -240,21 +240,21 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
memdb := memorydb.New()
chainDB := rawdb.NewDatabase(memdb)
genesis := core.DeveloperGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
- chainConfig, _, err := core.SetupGenesisBlock(chainDB, genesis, false)
+ loadedChainConfig, err := core.LoadChainConfig(chainDB, genesis)
if err != nil {
- t.Fatalf("can't create new chain config: %v", err)
+ t.Fatalf("can't load chain config: %v", err)
}
// Create consensus engine
- engine := clique.New(chainConfig.Clique, chainDB)
+ engine := clique.New(loadedChainConfig.Clique, chainDB)
// Create Ethereum backend
- bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil)
+ bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("can't create new chain %v", err)
}
statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil)
blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
- legacyPool := legacypool.New(testTxPoolConfig, chainConfig, blockchain)
+ legacyPool := legacypool.New(testTxPoolConfig, bc.Config(), blockchain)
txPool, err := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{legacyPool})
if err != nil {
t.Fatal(err)
@@ -263,5 +263,5 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
// Create event Mux
mux := new(event.TypeMux)
// Create Miner
- return New(backend, &config, chainConfig, mux, engine, nil), mux
+ return New(backend, &config, bc.Config(), mux, engine, nil), mux
}
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 3a8b5a3848..4b150e1a85 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -144,7 +144,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
}
genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, &gspec, nil, engine, vm.Config{}, nil, nil)
legacyPool := legacypool.New(testTxPoolConfig, chainConfig, chain)
txpool, err := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{legacyPool})
if err != nil {
@@ -245,7 +245,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
// This test chain imports the mined blocks.
db2 := rawdb.NewMemoryDatabase()
b.genesis.MustCommit(db2)
- chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db2, nil, b.genesis, nil, engine, vm.Config{}, nil, nil)
defer chain.Stop()
// Ignore empty commit here for less noise.
diff --git a/params/config.go b/params/config.go
index b60bd2ba70..dd64c50aca 100644
--- a/params/config.go
+++ b/params/config.go
@@ -484,6 +484,36 @@ var (
ConsortiumV2Contracts: nil,
RoninTreasuryAddress: &common.Address{},
}
+ NonActivatedConfig = &ChainConfig{
+ ChainID: nil,
+ HomesteadBlock: nil,
+ DAOForkBlock: nil,
+ DAOForkSupport: false,
+ EIP150Block: nil,
+ EIP150Hash: common.Hash{},
+ EIP155Block: nil,
+ EIP158Block: nil,
+ ByzantiumBlock: nil,
+ ConstantinopleBlock: nil,
+ PetersburgBlock: nil,
+ IstanbulBlock: nil,
+ MuirGlacierBlock: nil,
+ MikoBlock: nil,
+ BerlinBlock: nil,
+ LondonBlock: nil,
+ ArrowGlacierBlock: nil,
+ OdysseusBlock: nil,
+ FenixBlock: nil,
+ ConsortiumV2Block: nil,
+ PuffyBlock: nil,
+ BlacklistContractAddress: nil,
+ FenixValidatorContractAddress: nil,
+ TerminalTotalDifficulty: nil,
+ Ethash: new(EthashConfig),
+ Clique: nil,
+ Consortium: nil,
+ ConsortiumV2Contracts: nil,
+ }
TestRules = TestChainConfig.Rules(new(big.Int))
)
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 2d6abb5eb3..4881a29d90 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -127,7 +127,10 @@ func (t *BlockTest) Run(snapshotter bool) error {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
}
- chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil, nil)
+ gspec := &core.Genesis{
+ Config: config,
+ }
+ chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
return err
}
diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go
index 20dcdec083..494979109a 100644
--- a/tests/fuzzers/les/les-fuzzer.go
+++ b/tests/fuzzers/les/les-fuzzer.go
@@ -82,7 +82,7 @@ func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) {
addrHashes = append(addrHashes, crypto.Keccak256Hash(addr[:]))
txHashes = append(txHashes, tx.Hash())
}, true)
- bc, _ = core.NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = core.NewBlockChain(db, nil, &gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if _, err := bc.InsertChain(blocks, nil); err != nil {
panic(err)
}
From d9bf4b0f89efe36da17a2a3e98473056bf42f143 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Wed, 18 Sep 2024 11:30:31 +0700
Subject: [PATCH 10/41] Make db inspector for extending multiple ancient stores
(#574)
* core: add blockchain test for failing create/destroy-case
* core,state: some refactors
* core/rawdb: refactor db inspector for extending multiple ancient store
---
core/blockchain.go | 2 +-
core/rawdb/ancient_scheme.go | 35 ----------
core/rawdb/ancient_utils.go | 125 +++++++++++++++++++++++++++++++++++
core/rawdb/chain_freezer.go | 5 ++
core/rawdb/database.go | 50 +++++++-------
core/rawdb/freezer.go | 6 ++
core/rawdb/table.go | 6 ++
core/state/state_object.go | 6 +-
core/state/statedb.go | 8 ++-
ethdb/database.go | 4 ++
10 files changed, 181 insertions(+), 66 deletions(-)
create mode 100644 core/rawdb/ancient_utils.go
diff --git a/core/blockchain.go b/core/blockchain.go
index b38826618c..d3e333753e 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -126,7 +126,7 @@ const (
BlockChainVersion uint64 = 8
)
-// CacheConfig contains the configuration values for the trie caching/pruning
+// CacheConfig contains the configuration values for the trie database
// that's resident in a blockchain.
type CacheConfig struct {
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
index 627a611998..f621ba1a3d 100644
--- a/core/rawdb/ancient_scheme.go
+++ b/core/rawdb/ancient_scheme.go
@@ -16,8 +16,6 @@
package rawdb
-import "fmt"
-
// The list of table names of chain freezer. (headers, hashes, bodies, difficulties)
const (
@@ -54,36 +52,3 @@ var (
// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}
-
-// InspectFreezerTable dumps out the index of a specific freezer table. The passed
-// ancient indicates the path of root ancient directory where the chain freezer can
-// be opened. Start and end specifiy the range for dumping out indexes.
-// Note this function can only used for debugging purpose.
-func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
- var (
- path string
- tables map[string]bool
- )
-
- switch freezerName {
- case chainFreezerName:
- path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
- default:
- return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
- }
- noSnappy, exit := tables[tableName]
- if !exit {
- // If the tableName is not exit in the tables, return an error.
- var names []string
- for name := range tables {
- names = append(names, name)
- }
- return fmt.Errorf("unknown table name, supported ones: %v", names)
- }
- table, err := newFreezerTable(path, tableName, noSnappy)
- if err != nil {
- return err
- }
- table.dumpIndexStdout(start, end)
- return nil
-}
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
new file mode 100644
index 0000000000..5fe2423694
--- /dev/null
+++ b/core/rawdb/ancient_utils.go
@@ -0,0 +1,125 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+type tableSize struct {
+ name string
+ size common.StorageSize
+}
+
+// freezerInfo contains the basic information of the freezer.
+type freezerInfo struct {
+ name string // The identifier of freezer
+ head uint64 // The number of last stored item in the freezer
+ tail uint64 // The number of first stored item in the freezer
+ sizes []tableSize // The storage size per table
+}
+
+// count returns the number of stored items in the freezer.
+func (info *freezerInfo) count() uint64 {
+ return info.head - info.tail + 1
+}
+
+// size returns the storage size of the entire freezer.
+func (info *freezerInfo) size() common.StorageSize {
+ var total common.StorageSize
+ for _, table := range info.sizes {
+ total += table.size
+ }
+ return total
+}
+
+// inspectFreezers inspects all freezers registered in the system.
+func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
+ var (
+ infos []freezerInfo
+ )
+ for _, freezer := range freezers {
+ switch freezer {
+ case chainFreezerName: // We only support chain freezer for now.
+ // Chain ancient store is a bit special. It's always opened along
+ // with a key-value store, inspect the chain store directly.
+ info := freezerInfo{name: freezer}
+ // Retrieve storage size of every contained table.
+ for table := range chainFreezerNoSnappy {
+ size, err := db.AncientSize(table)
+ if err != nil {
+ return nil, err
+ }
+ info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)})
+ }
+ // Retrieve the number of last stored item
+ ancients, err := db.Ancients()
+ if err != nil {
+ return nil, err
+ }
+ info.head = ancients - 1
+
+ // Retrieve the number of first stored item
+ tail, err := db.Tail()
+ if err != nil {
+ return nil, err
+ }
+ info.tail = tail
+ infos = append(infos, info)
+ default:
+ return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
+ }
+ }
+ return infos, nil
+}
+
+// InspectFreezerTable dumps out the index of a specific freezer table. The passed
+// ancient indicates the path of root ancient directory where the chain freezer can
+// be opened. Start and end specifiy the range for dumping out indexes.
+// Note this function can only used for debugging purpose.
+func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
+ var (
+ path string
+ tables map[string]bool
+ )
+
+ switch freezerName {
+ case chainFreezerName:
+ path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
+ default:
+ return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
+ }
+ noSnappy, exit := tables[tableName]
+ if !exit {
+ // If the tableName is not exit in the tables, return an error.
+ var names []string
+ for name := range tables {
+ names = append(names, name)
+ }
+ return fmt.Errorf("unknown table name, supported ones: %v", names)
+ }
+ table, err := newFreezerTable(path, tableName, noSnappy)
+ if err != nil {
+ return err
+ }
+
+ table.dumpIndexStdout(start, end)
+ return nil
+}
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
index d6e3ac5646..632f7a960e 100644
--- a/core/rawdb/chain_freezer.go
+++ b/core/rawdb/chain_freezer.go
@@ -73,6 +73,11 @@ func (f *chainFreezer) Close() error {
return err
}
+// Tail returns an error as we don't have a backing chain freezer.
+func (f *chainFreezer) Tail() (uint64, error) {
+ return 0, errNotSupported
+}
+
// freeze is a background thread that periodically checks the blockchain for any
// import progress and moves ancient data from the fast database into the freezer.
//
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 14038cc077..e2e06cff8b 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -23,6 +23,7 @@ import (
"os"
"path"
"path/filepath"
+ "strings"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -87,6 +88,11 @@ type nofreezedb struct {
ethdb.KeyValueStore
}
+// Tail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Tail() (uint64, error) {
+ return 0, errNotSupported
+}
+
// HasAncient returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
return false, errNotSupported
@@ -445,13 +451,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
cliqueSnaps stat
consortiumSnaps stat
- // Ancient store statistics
- ancientHeadersSize common.StorageSize
- ancientBodiesSize common.StorageSize
- ancientReceiptsSize common.StorageSize
- ancientTdsSize common.StorageSize
- ancientHashesSize common.StorageSize
-
// Les statistic
chtTrieNodes stat
bloomTrieNodes stat
@@ -540,20 +539,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
- // Inspect append-only file store then.
- ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
- for i, category := range []string{chainFreezerHeaderTable, chainFreezerBodiesTable, chainFreezerReceiptTable, chainFreezerHashTable, chainFreezerDifficultyTable} {
- if size, err := db.AncientSize(category); err == nil {
- *ancientSizes[i] += common.StorageSize(size)
- total += common.StorageSize(size)
- }
- }
- // Get number of ancient rows inside the freezer
- ancients := counter(0)
- if count, err := db.Ancients(); err == nil {
- ancients = counter(count)
- }
- // Display the database statistic.
+
+ // Display the database statistic of key-value store.
stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
@@ -571,14 +558,25 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Consortium snapshots", consortiumSnaps.Size(), consortiumSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
- {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
- {"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
- {"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
- {"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
- {"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
}
+ // Inspect all registered append-only file store then.
+ ancients, err := inspectFreezers(db)
+ if err != nil {
+ return err
+ }
+ for _, ancient := range ancients {
+ for _, table := range ancient.sizes {
+ stats = append(stats, []string{
+ fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
+ strings.Title(table.name),
+ table.size.String(),
+ fmt.Sprintf("%d", ancient.count()),
+ })
+ }
+ total += ancient.size()
+ }
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index df92836668..538a799b5c 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -219,6 +219,12 @@ func (f *Freezer) AncientSize(kind string) (uint64, error) {
return 0, errUnknownTable
}
+// Tail returns an error as we don't have a backing chain freezer.
+func (f *Freezer) Tail() (uint64, error) {
+ // return f.tail.Load(), nil, in the next implementing, right now just keep it zero
+ return 0, nil
+}
+
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
func (f *Freezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 05910a0557..5e07ec43ad 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -68,6 +68,12 @@ func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]by
return t.db.AncientRange(kind, start, count, maxBytes)
}
+// Tail is a noop passthrough that just forwards the request to the underlying
+// database.
+func (t *table) Tail() (uint64, error) {
+ return t.db.Tail()
+}
+
// Ancients is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) Ancients() (uint64, error) {
diff --git a/core/state/state_object.go b/core/state/state_object.go
index a6dee5c02f..91d3b71c46 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -392,9 +392,9 @@ func (s *stateObject) updateRoot(db Database) {
s.data.Root = s.trie.Hash()
}
-// CommitTrie the storage trie of the object to db.
-// This updates the trie root.
-func (s *stateObject) CommitTrie(db Database) (*trie.NodeSet, error) {
+// commitTrie submits the storage changes into the storage trie and re-computes
+// the root. Besides, all trie changes will be collected in a nodeset and returned.
+func (s *stateObject) commitTrie(db Database) (*trie.NodeSet, error) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
return nil, nil
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 6b041eaa65..e78968e2e5 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -986,7 +986,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
- nodeSet, err := obj.CommitTrie(s.db)
+ nodeSet, err := obj.commitTrie(s.db)
if err != nil {
return common.Hash{}, err
}
@@ -1000,6 +1000,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
}
}
+ // If the contract is destructed, the storage is still left in the
+ // database as dangling data. Theoretically it's should be wiped from
+ // database as well, but in hash-based-scheme it's extremely hard to
+ // determine that if the trie nodes are also referenced by other storage,
+ // and in path-based-scheme some technical challenges are still unsolved.
+ // Although it won't affect the correctness but please fix it TODO(rjl493456442).
if len(s.stateObjectsDirty) > 0 {
s.stateObjectsDirty = make(map[common.Address]struct{})
}
diff --git a/ethdb/database.go b/ethdb/database.go
index 3ddfbbac08..6d0e1147a1 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -87,6 +87,10 @@ type AncientReaderOp interface {
// Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error)
+ // Tail returns the number of first stored item in the ancient store.
+ // This number can also be interpreted as the total deleted items.
+ Tail() (uint64, error)
+
// AncientSize returns the ancient size of the specified category.
AncientSize(kind string) (uint64, error)
}
From 6de49a5c4908478c56eb60d95e0f800d6ef3f30d Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Thu, 19 Sep 2024 16:11:34 +0700
Subject: [PATCH 11/41] rawdb,ethdb,eth: implement freezer tail deletion and
use atomic reference commit 538a868 (#577)
fix up
---
core/blockchain.go | 6 +-
core/rawdb/database.go | 13 +-
core/rawdb/freezer.go | 72 ++++++--
core/rawdb/freezer_batch.go | 1 +
core/rawdb/freezer_meta.go | 112 ++++++++++++
core/rawdb/freezer_meta_test.go | 61 +++++++
core/rawdb/freezer_table.go | 287 ++++++++++++++++++++++++-------
core/rawdb/freezer_table_test.go | 4 +-
core/rawdb/freezer_test.go | 6 +-
core/rawdb/freezer_utils.go | 125 ++++++++++++++
core/rawdb/freezer_utils_test.go | 75 ++++++++
core/rawdb/table.go | 11 +-
core/state/pruner/pruner.go | 6 +-
ethdb/database.go | 26 ++-
14 files changed, 700 insertions(+), 105 deletions(-)
create mode 100644 core/rawdb/freezer_meta.go
create mode 100644 core/rawdb/freezer_meta_test.go
create mode 100644 core/rawdb/freezer_utils.go
create mode 100644 core/rawdb/freezer_utils_test.go
diff --git a/core/blockchain.go b/core/blockchain.go
index d3e333753e..1701130992 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -763,7 +763,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
- if err := bc.db.TruncateAncients(num); err != nil {
+ if err := bc.db.TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
@@ -1200,7 +1200,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// The tx index data could not be written.
// Roll back the ancient store update.
fastBlock := bc.CurrentFastBlock().NumberU64()
- if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
+ if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, err
@@ -1216,7 +1216,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain.
- if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
+ if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, errSideChainReceipts
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index e2e06cff8b..6a08932208 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -123,13 +123,18 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
return 0, errNotSupported
}
-// TruncateAncients returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateAncients(items uint64) error {
+// Sync returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) Sync() error {
return errNotSupported
}
-// Sync returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) Sync() error {
+// TruncateHead returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateHead(items uint64) error {
+ return errNotSupported
+}
+
+// TruncateTail returns an error as we don't have a backing chain freezer.
+func (db *nofreezedb) TruncateTail(items uint64) error {
return errNotSupported
}
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 538a799b5c..1a96aff6d7 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -52,11 +52,11 @@ var (
const (
- // freezerTableSize defines the maximum size of freezer data files.
+ // freezerTableSize defines the maximum size of freezer data files, max size of per file is 2GB.
freezerTableSize = 2 * 1000 * 1000 * 1000
)
-// freezer is an memory mapped append-only database to store immutable chain data
+// freezer is a memory mapped append-only database to store immutable chain data
// into flat files:
//
// - The append only nature ensures that disk writes are minimized.
@@ -65,6 +65,7 @@ const (
// of Geth, and thus also GC overhead.
type Freezer struct {
frozen atomic.Uint64 // Number of items already frozen
+ tail atomic.Uint64 // Number of the first stored item in the freezer
threshold atomic.Uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation, as well as
@@ -116,6 +117,8 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
trigger: make(chan chan struct{}),
quit: make(chan struct{}),
}
+ // The number of blocks after which a chain segment is
+ // considered immutable (i.e. soft finality)
freezer.threshold.Store(params.FullImmutabilityThreshold)
// Create the tables.
@@ -131,7 +134,7 @@ func NewFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
freezer.tables[name] = table
}
- // Truncate all tables to common length.
+ // Truncate all tables to common length, then close
if err := freezer.repair(); err != nil {
for _, table := range freezer.tables {
table.Close()
@@ -219,10 +222,9 @@ func (f *Freezer) AncientSize(kind string) (uint64, error) {
return 0, errUnknownTable
}
-// Tail returns an error as we don't have a backing chain freezer.
+// Tail returns the number of first stored item in the freezer.
func (f *Freezer) Tail() (uint64, error) {
- // return f.tail.Load(), nil, in the next implementing, right now just keep it zero
- return 0, nil
+ return f.tail.Load(), nil
}
// ReadAncients runs the given read operation while ensuring that no writes take place
@@ -247,7 +249,7 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
if err != nil {
// The write operation has failed. Go back to the previous item position.
for name, table := range f.tables {
- err := table.truncate(prevItem)
+ err := table.truncateHead(prevItem)
if err != nil {
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
}
@@ -267,19 +269,20 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
return writeSize, nil
}
-// TruncateAncients discards any recent data above the provided threshold number.
-func (f *Freezer) TruncateAncients(items uint64) error {
+// TruncateHead discards any recent data above the provided threshold number, only keep the first items ancient data.
+func (f *Freezer) TruncateHead(items uint64) error {
if f.readonly {
return errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
+ // If the current frozen number is less than the requested items for frozen, do nothing.
if f.frozen.Load() <= items {
return nil
}
for _, table := range f.tables {
- if err := table.truncate(items); err != nil {
+ if err := table.truncateHead(items); err != nil {
return err
}
}
@@ -287,6 +290,28 @@ func (f *Freezer) TruncateAncients(items uint64) error {
return nil
}
+// TruncateTail discards any recent data below the provided threshold number, only keep the last items ancient data.
+func (f *Freezer) TruncateTail(tail uint64) error {
+ if f.readonly {
+ return errReadOnly
+ }
+ f.writeLock.Lock()
+ defer f.writeLock.Unlock()
+
+ // If the current tail number is greater than the requested tail, seem out of range for truncating, do nothing.
+ if f.tail.Load() >= tail {
+ return nil
+ }
+
+ for _, table := range f.tables {
+ if err := table.truncateTail(tail); err != nil {
+ return err
+ }
+ }
+ f.tail.Store(tail)
+ return nil
+}
+
// Sync flushes all data tables to disk.
func (f *Freezer) Sync() error {
var errs []error
@@ -303,18 +328,35 @@ func (f *Freezer) Sync() error {
// repair truncates all data tables to the same length.
func (f *Freezer) repair() error {
- min := uint64(math.MaxUint64)
+ var (
+ head = uint64(math.MaxUint64)
+ tail = uint64(0)
+ )
+ // Looping through all tables to find the most common head and tail between tables
for _, table := range f.tables {
items := table.items.Load()
- if min > items {
- min = items
+
+ if head > items {
+ head = items
+ }
+ hidden := table.itemHidden.Load()
+ if hidden > tail {
+ tail = hidden
}
}
+
+ // Truncate all tables to the common head and tail.
for _, table := range f.tables {
- if err := table.truncate(min); err != nil {
+ if err := table.truncateHead(head); err != nil {
+ return err
+ }
+
+ if err := table.truncateTail(tail); err != nil {
return err
}
}
- f.frozen.Store(min)
+ // Update frozen and tail counters.
+ f.frozen.Store(head)
+ f.tail.Store(tail)
return nil
}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index dfb16a58e1..e143dba2d4 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -165,6 +165,7 @@ func (batch *freezerTableBatch) appendItem(data []byte) error {
batch.totalBytes += itemSize
// Put index entry to buffer.
+ // The index file contains a list of index entries.
entry := indexEntry{filenum: batch.t.headId, offset: uint32(itemOffset + itemSize)}
batch.indexBuffer = entry.append(batch.indexBuffer)
batch.curItem++
diff --git a/core/rawdb/freezer_meta.go b/core/rawdb/freezer_meta.go
new file mode 100644
index 0000000000..3eed366a7b
--- /dev/null
+++ b/core/rawdb/freezer_meta.go
@@ -0,0 +1,112 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "io"
+ "os"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+const freezerVersion = 1 // The initial version tag of freezer table metadata
+
+// freezerTableMeta wraps all the metadata of the freezer table.
+type freezerTableMeta struct {
+ // Version is the versioning descriptor of the freezer table.
+ Version uint16
+
+ // VirtualTail indicates how many items have been marked as deleted.
+ // Its value is equal to the number of items removed from the table
+ // plus the number of items hidden in the table, so it should never
+ // be lower than the "actual tail".
+ VirtualTail uint64
+}
+
+// newMetadata initializes the metadata object with the given virtual tail.
+func newMetadata(tail uint64) *freezerTableMeta {
+ return &freezerTableMeta{
+ Version: freezerVersion,
+ VirtualTail: tail,
+ }
+}
+
+// readMetadata reads the metadata of the freezer table from the
+// given metadata file.
+func readMetadata(file *os.File) (*freezerTableMeta, error) {
+ _, err := file.Seek(0, io.SeekStart) // SeekStart means the origin of the file
+ if err != nil {
+ return nil, err
+ }
+ var meta freezerTableMeta
+ if err := rlp.Decode(file, &meta); err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// writeMetadata writes the metadata of the freezer table into the
+// given metadata file.
+func writeMetadata(file *os.File, meta *freezerTableMeta) error {
+ _, err := file.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ return rlp.Encode(file, meta)
+}
+
+// loadMetadata loads the metadata from the given metadata file.
+// Initializes the metadata file with the given "actual tail" if
+// it's empty.
+func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
+ stat, err := file.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ // Write the metadata with the given actual tail into metadata file
+ // if it's non-existent. There are two possible scenarios here:
+ // - the freezer table is empty
+ // - the freezer table is legacy
+ // In both cases, write the meta into the file with the actual tail
+ // as the virtual tail.
+ if stat.Size() == 0 { // The file is empty
+ m := newMetadata(tail)
+ if err := writeMetadata(file, m); err != nil {
+ return nil, err
+ }
+ return m, nil
+ }
+
+ // If the file is not empty, read the metadata from the file.
+ m, err := readMetadata(file)
+ if err != nil {
+ return nil, err
+ }
+ // Update the virtual tail with the given actual tail if it's even
+ // lower than it. Theoretically it shouldn't happen at all, print
+ // a warning here.
+ if m.VirtualTail < tail {
+ log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
+ m.VirtualTail = tail
+ if err := writeMetadata(file, m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
diff --git a/core/rawdb/freezer_meta_test.go b/core/rawdb/freezer_meta_test.go
new file mode 100644
index 0000000000..191744a754
--- /dev/null
+++ b/core/rawdb/freezer_meta_test.go
@@ -0,0 +1,61 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestReadWriteFreezerTableMeta(t *testing.T) {
+ f, err := ioutil.TempFile(os.TempDir(), "*")
+ if err != nil {
+ t.Fatalf("Failed to create file %v", err)
+ }
+ err = writeMetadata(f, newMetadata(100))
+ if err != nil {
+ t.Fatalf("Failed to write metadata %v", err)
+ }
+ meta, err := readMetadata(f)
+ if err != nil {
+ t.Fatalf("Failed to read metadata %v", err)
+ }
+ if meta.Version != freezerVersion {
+ t.Fatalf("Unexpected version field")
+ }
+ if meta.VirtualTail != uint64(100) {
+ t.Fatalf("Unexpected virtual tail field")
+ }
+}
+
+func TestInitializeFreezerTableMeta(t *testing.T) {
+ f, err := ioutil.TempFile(os.TempDir(), "*")
+ if err != nil {
+ t.Fatalf("Failed to create file %v", err)
+ }
+ meta, err := loadMetadata(f, uint64(100))
+ if err != nil {
+ t.Fatalf("Failed to read metadata %v", err)
+ }
+ if meta.Version != freezerVersion {
+ t.Fatalf("Unexpected version field")
+ }
+ if meta.VirtualTail != uint64(100) {
+ t.Fatalf("Unexpected virtual tail field")
+ }
+}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index d15d443943..81e8a3155f 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -50,17 +50,16 @@ var (
// offset within the file to the end of the data
// In serialized form, the filenum is stored as uint16.
type indexEntry struct {
- filenum uint32 // stored as uint16 ( 2 bytes)
- offset uint32 // stored as uint32 ( 4 bytes)
+ filenum uint32 // stored as uint16 ( 2 bytes )
+ offset uint32 // stored as uint32 ( 4 bytes )
}
-const indexEntrySize = 6
+const indexEntrySize = 6 // filenum + offset
// unmarshalBinary deserializes binary b into the rawIndex entry.
-func (i *indexEntry) unmarshalBinary(b []byte) error {
+func (i *indexEntry) unmarshalBinary(b []byte) {
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
i.offset = binary.BigEndian.Uint32(b[2:6])
- return nil
}
// append adds the encoded entry to the end of b.
@@ -92,16 +91,24 @@ type freezerTable struct {
items atomic.Uint64 // Number of items stored in the table (including items removed from tail)
itemOffset atomic.Uint64 // Number of items removed from the table
+ // itemHidden is the number of items marked as deleted. Tail deletion is
+ // only supported at file level which means the actual deletion will be
+ // delayed until the entire data file is marked as deleted. Before that
+ // these items will be hidden to prevent being visited again. The value
+ // should never be lower than itemOffset.
+ itemHidden atomic.Uint64
+
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
maxFileSize uint32 // Max file size for data-files
name string
path string
head *os.File // File descriptor for the data head of the table
+ index *os.File // File descriptor for the indexEntry file of the table
+ meta *os.File // File descriptor for the metadata file of the table
files map[uint32]*os.File // open files
headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file
- index *os.File // File descriptor for the indexEntry file of the table
headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read
@@ -117,47 +124,9 @@ func newFreezerTable(path, name string, disableSnappy bool) (*freezerTable, erro
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
}
-// openFreezerFileForAppend opens a freezer table file and seeks to the end
-func openFreezerFileForAppend(filename string) (*os.File, error) {
- // Open the file without the O_APPEND flag
- // because it has differing behaviour during Truncate operations
- // on different OS's
- file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
- if err != nil {
- return nil, err
- }
- // Seek to end for append
- if _, err = file.Seek(0, io.SeekEnd); err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// openFreezerFileForReadOnly opens a freezer table file for read only access
-func openFreezerFileForReadOnly(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDONLY, 0644)
-}
-
-// openFreezerFileTruncated opens a freezer table making sure it is truncated
-func openFreezerFileTruncated(filename string) (*os.File, error) {
- return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
-}
-
-// truncateFreezerFile resizes a freezer table file and seeks to the end
-func truncateFreezerFile(file *os.File, size int64) error {
- if err := file.Truncate(size); err != nil {
- return err
- }
- // Seek to end for append
- if _, err := file.Seek(0, io.SeekEnd); err != nil {
- return err
- }
- return nil
-}
-
// newTable opens a freezer table, creating the data and index files if they are
// non existent. Both files are truncated to the shortest common length to ensure
-// they don't go out of sync.
+// they don't go out of sync. (Table name could be bodies, receipts, etc.)
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil {
@@ -171,13 +140,24 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
// Compressed idx
idxName = fmt.Sprintf("%s.cidx", name)
}
- offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
+ var (
+ err error
+ index *os.File
+ meta *os.File
+ )
+ index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
+ if err != nil {
+ return nil, err
+ }
+ meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
if err != nil {
return nil, err
}
+
// Create the table and repair any past inconsistency
tab := &freezerTable{
- index: offsets,
+ index: index,
+ meta: meta,
files: make(map[uint32]*os.File),
readMeter: readMeter,
writeMeter: writeMeter,
@@ -244,8 +224,20 @@ func (t *freezerTable) repair() error {
t.tailId = firstIndex.filenum
t.itemOffset.Store(uint64(firstIndex.offset))
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
- lastIndex.unmarshalBinary(buffer)
+ // Load metadata from the file
+ meta, err := loadMetadata(t.meta, t.itemOffset.Load())
+ if err != nil {
+ return err
+ }
+ t.itemHidden.Store(meta.VirtualTail)
+
+ // Read the last index, use the default value in case the freezer is empty
+ if offsetsSize == indexEntrySize {
+ lastIndex = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ lastIndex.unmarshalBinary(buffer)
+ }
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
if err != nil {
return err
@@ -274,9 +266,15 @@ func (t *freezerTable) repair() error {
return err
}
offsetsSize -= indexEntrySize
- t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ // Read the new head index, use the default value in case
+ // the freezer is already empty.
var newLastIndex indexEntry
- newLastIndex.unmarshalBinary(buffer)
+ if offsetsSize == indexEntrySize {
+ newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
+ newLastIndex.unmarshalBinary(buffer)
+ }
// We might have slipped back into an earlier head-file here
if newLastIndex.filenum != lastIndex.filenum {
// Release earlier opened file
@@ -302,11 +300,19 @@ func (t *freezerTable) repair() error {
if err := t.head.Sync(); err != nil {
return err
}
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
// Update the item and byte counters and return
t.items.Store(t.itemOffset.Load() + uint64(offsetsSize/indexEntrySize-1)) // last indexEntry points to the end of the data file
t.headBytes = contentSize
t.headId = lastIndex.filenum
+ // Delete the leftover files because of head deletion
+ t.releaseFilesAfter(t.headId, true)
+
+ // Delete the leftover files because of tail deletion
+ t.releaseFilesBefore(t.tailId, true)
// Close opened files and preopen all files
if err := t.preopen(); err != nil {
return err
@@ -333,16 +339,20 @@ func (t *freezerTable) preopen() (err error) {
return err
}
-// truncate discards any recent data above the provided threshold number.
-func (t *freezerTable) truncate(items uint64) error {
+// truncateHead discards any recent data above the provided threshold number.
+func (t *freezerTable) truncateHead(items uint64) error {
t.lock.Lock()
defer t.lock.Unlock()
- // If our item count is correct, don't do anything
+ // Ensure the given truncate target must be within the existing range.
existing := t.items.Load()
if existing <= items {
return nil
}
+ // Ensure the given truncate target must be above the hidden items.
+ if items < t.itemHidden.Load() {
+ return errors.New("truncation below tail")
+ }
// We need to truncate, save the old size for metrics tracking
oldSize, err := t.sizeNolock()
if err != nil {
@@ -354,17 +364,25 @@ func (t *freezerTable) truncate(items uint64) error {
log = t.logger.Warn // Only loud warn if we delete multiple items
}
log("Truncating freezer table", "items", existing, "limit", items)
- if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
- return err
- }
+
+ // Truncate the index file first, the tail position is also considered
+ // when calculating the new freezer table length.
// Calculate the new expected size of the data file and truncate it
- buffer := make([]byte, indexEntrySize)
- if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
+ length := items - t.itemOffset.Load()
+ if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
var expected indexEntry
- expected.unmarshalBinary(buffer)
+ if length == 0 {
+ expected = indexEntry{filenum: t.tailId, offset: 0}
+ } else {
+ buffer := make([]byte, indexEntrySize)
+ if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
+ return err
+ }
+ expected.unmarshalBinary(buffer)
+ }
// We might need to truncate back to older files
if expected.filenum != t.headId {
// If already open for reading, force-reopen for writing
@@ -397,6 +415,117 @@ func (t *freezerTable) truncate(items uint64) error {
return nil
}
+// truncateTail discards any recent data before the provided threshold number.
+// tail -> item-offset -> item-hidden -> truncated-items -> items/head. (Valid Range).
+func (t *freezerTable) truncateTail(items uint64) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ // The truncateTarget is below the current tail, return nil, no need to truncate
+ if t.itemHidden.Load() >= items {
+ return nil
+ }
+ // The truncateTarget is above the current head, return error
+ if t.items.Load() < items {
+ return errors.New("truncation above head")
+ }
+
+ // Load the new tail index by the given new tail position
+ var (
+ newTailId uint32
+ buffer = make([]byte, indexEntrySize)
+ )
+
+ if t.items.Load() == items {
+ newTailId = t.headId // Truncate in the head.
+ } else {
+ // Get the index entry of the new tail position and it's it based on the file number.
+ offset := items - t.itemOffset.Load()
+ if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
+ return err
+ }
+ var newTailIndex indexEntry
+ newTailIndex.unmarshalBinary(buffer)
+ newTailId = newTailIndex.filenum
+ }
+ // Update the virtual tail marker and hidden these entries in table.
+ t.itemHidden.Store(items)
+ if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
+ return err
+ }
+ // Hidden items still fall in the current tail file, no data file
+ // can be dropped.
+ if t.tailId == newTailId {
+ return nil
+ }
+ // Hidden items fall in the incorrect range, returns the error.
+ if t.tailId > newTailId {
+ return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
+ }
+ // Hidden items exceed the current tail file, drop the relevant
+ // data files. We need to truncate, save the old size for metrics
+ // tracking.
+ oldSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ // Count how many items can be deleted from the file.
+ var (
+ newDeleted = items
+ deleted = t.itemOffset.Load()
+ )
+ for current := items - 1; current >= deleted; current -= 1 {
+ if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
+ return err
+ }
+ var pre indexEntry
+ pre.unmarshalBinary(buffer)
+ if pre.filenum != newTailId {
+ break
+ }
+ newDeleted = current
+ }
+ // Commit the changes of metadata file first before manipulating
+ // the indexes file.
+ if err := t.meta.Sync(); err != nil {
+ return err
+ }
+ // Truncate the deleted index entries from the index file. It overwrites the entries in current index file.
+ err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
+ tailIndex := indexEntry{
+ filenum: newTailId,
+ offset: uint32(newDeleted),
+ }
+ _, err := f.Write(tailIndex.append(nil))
+ return err
+ })
+ if err != nil {
+ return err
+ }
+ // Reopen the modified index file to load the changes
+ if err := t.index.Close(); err != nil {
+ return err
+ }
+ t.index, err = openFreezerFileForAppend(t.index.Name())
+ if err != nil {
+ return err
+ }
+ // Release/Delete any files before the current tail
+ t.tailId = newTailId
+ t.itemOffset.Store(newDeleted)
+
+ // Release with removing any files before the current tailId
+ t.releaseFilesBefore(t.tailId, true)
+
+ // Retrieve the new size and update the total size counter
+ newSize, err := t.sizeNolock()
+ if err != nil {
+ return err
+ }
+ t.sizeGauge.Dec(int64(oldSize - newSize))
+ return nil
+}
+
// Close closes all opened files.
func (t *freezerTable) Close() error {
t.lock.Lock()
@@ -408,6 +537,11 @@ func (t *freezerTable) Close() error {
}
t.index = nil
+ if err := t.meta.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ t.meta = nil
+
for _, f := range t.files {
if err := f.Close(); err != nil {
errs = append(errs, err)
@@ -421,6 +555,19 @@ func (t *freezerTable) Close() error {
return nil
}
+// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
+func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
+ for fnum, f := range t.files {
+ if fnum < num {
+ delete(t.files, fnum)
+ f.Close()
+ if remove {
+ os.Remove(f.Name())
+ }
+ }
+ }
+}
+
// openFile assumes that the write-lock is held by the caller
func (t *freezerTable) openFile(num uint32, opener func(string) (*os.File, error)) (f *os.File, err error) {
var exist bool
@@ -559,14 +706,15 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
if t.index == nil || t.head == nil {
return nil, nil, errClosed
}
- itemCount := t.items.Load() // max number
+ items := t.items.Load() // max number
+ hidden := t.itemHidden.Load()
// Ensure the start is written, not deleted from the tail, and that the
// caller actually wants something
- if itemCount <= start || t.itemOffset.Load() > start || count == 0 {
+ if items <= start || hidden > start || count == 0 {
return nil, nil, errOutOfBounds
}
- if start+count > itemCount {
- count = itemCount - start
+ if start+count > items {
+ count = items - start
}
var (
output = make([]byte, maxBytes) // Buffer to read data into
@@ -645,7 +793,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
// has returns an indicator whether the specified number data
// exists in the freezer table.
func (t *freezerTable) has(number uint64) bool {
- return t.items.Load() > number
+ return t.items.Load() > number && t.itemHidden.Load() <= number
}
// size returns the total data size in the freezer table.
@@ -716,13 +864,20 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
}
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
+ meta, err := readMetadata(t.meta)
+ if err != nil {
+ fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
+ return
+ }
+ fmt.Fprintf(w, "Version %d deleted %d, hidden %d\n", meta.Version, t.itemOffset.Load(), t.itemHidden.Load())
+
buf := make([]byte, indexEntrySize)
fmt.Fprintf(w, "| number | fileno | offset |\n")
fmt.Fprintf(w, "|--------|--------|--------|\n")
for i := uint64(start); ; i++ {
- if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
+ if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
break
}
var entry indexEntry
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index 57ddde49f7..edbfa15687 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -387,7 +387,7 @@ func TestFreezerTruncate(t *testing.T) {
t.Fatal(err)
}
defer f.Close()
- f.truncate(10) // 150 bytes
+ f.truncateHead(10) // 150 bytes
if f.items.Load() != 10 {
t.Fatalf("expected %d items, got %d", 10, f.items.Load())
}
@@ -504,7 +504,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
}
// Now, truncate back to zero
- f.truncate(0)
+ f.truncateHead(0)
// Write the data again
batch := f.newBatch()
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index 474650e00d..418e4ae5b1 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -186,7 +186,7 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
wg.Wait()
}
-// This test runs ModifyAncients and TruncateAncients concurrently with each other.
+// This test runs ModifyAncients and TruncateHead concurrently with each other.
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
f, dir := newFreezerForTesting(t, freezerTestTableDef)
defer os.RemoveAll(dir)
@@ -196,7 +196,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
for i := 0; i < 1000; i++ {
// First reset and write 100 items.
- if err := f.TruncateAncients(0); err != nil {
+ if err := f.TruncateHead(0); err != nil {
t.Fatal("truncate failed:", err)
}
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@@ -231,7 +231,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
wg.Done()
}()
go func() {
- truncateErr = f.TruncateAncients(10)
+ truncateErr = f.TruncateHead(10)
wg.Done()
}()
go func() {
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
new file mode 100644
index 0000000000..4354f94986
--- /dev/null
+++ b/core/rawdb/freezer_utils.go
@@ -0,0 +1,125 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
+// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
+// Before the copy is executed, there is a callback can be registered to
+// manipulate the dest file.
+// It is perfectly valid to have destPath == srcPath.
+// Those paths must be absolute path.
+func copyFrom(srcPath, destPath string, offset uint64, beforeCopyFunc func(f *os.File) error) error {
+ // Create a temp file in the same directory where we want it to wind up
+ f, err := ioutil.TempFile(filepath.Dir(destPath), "*") // Create random name
+ if err != nil {
+ return err
+ }
+
+ fname := f.Name()
+
+ // Clean up the remaining file.
+ defer func() {
+ if f != nil {
+ f.Close()
+ }
+ os.Remove(fname) // Clean up the temp file
+ }()
+
+ // Apply the beforeCopyFun , before we processing
+ if beforeCopyFunc != nil {
+ if err := beforeCopyFunc(f); err != nil {
+ return err
+ }
+ }
+ // Open the source file
+
+ src, err := os.Open(srcPath)
+ if err != nil {
+ return err
+ }
+ // Set offset of nextRead in offset relative to origin of the file.
+ if _, err = src.Seek(int64(offset), 0); err != nil {
+ src.Close()
+ return err
+ }
+
+ // io.Copy uses 32K buffer internally.
+ _, err = io.Copy(f, src)
+ if err != nil {
+ src.Close()
+ return err
+ }
+ // Rename the temporary file to the specified dest name.
+ // src may be same as dest, so needs to be closed before
+ // we do the final move.
+ src.Close()
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+ f = nil
+
+ if err := os.Rename(fname, destPath); err != nil {
+ return err
+ }
+ return nil
+}
+
+// openFreezerFileForAppend opens a freezer table file and seeks to the end, if it's not exist, create it.
+func openFreezerFileForAppend(filename string) (*os.File, error) {
+ // Open the file without the O_APPEND flag
+ // because it has differing behaviour during Truncate operations
+ // on different OS's
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ // Seek to end for append
+ if _, err = file.Seek(0, io.SeekEnd); err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+// openFreezerFileForReadOnly opens a freezer table file for read only access
+func openFreezerFileForReadOnly(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDONLY, 0644)
+}
+
+// openFreezerFileTruncated opens a freezer table making sure it is truncated
+func openFreezerFileTruncated(filename string) (*os.File, error) {
+ return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
+}
+
+// truncateFreezerFile resizes a freezer table file and seeks to the end
+func truncateFreezerFile(file *os.File, size int64) error {
+ if err := file.Truncate(size); err != nil {
+ return err
+ }
+ // Seek to end for append
+ if _, err := file.Seek(0, io.SeekEnd); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/core/rawdb/freezer_utils_test.go b/core/rawdb/freezer_utils_test.go
new file mode 100644
index 0000000000..445f63fb79
--- /dev/null
+++ b/core/rawdb/freezer_utils_test.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+package rawdb
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestCopyFrom(t *testing.T) {
+ var (
+ content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
+ prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
+ )
+ var cases = []struct {
+ src, dest string
+ offset uint64
+ writePrefix bool
+ }{
+ {"foo", "bar", 0, false},
+ {"foo", "bar", 1, false},
+ {"foo", "bar", 8, false},
+ {"foo", "foo", 0, false},
+ {"foo", "foo", 1, false},
+ {"foo", "foo", 8, false},
+ {"foo", "bar", 0, true},
+ {"foo", "bar", 1, true},
+ {"foo", "bar", 8, true},
+ }
+ for _, c := range cases {
+ ioutil.WriteFile(c.src, content, 0644)
+
+ if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
+ if !c.writePrefix {
+ return nil
+ }
+ f.Write(prefix)
+ return nil
+ }); err != nil {
+ os.Remove(c.src)
+ t.Fatalf("Failed to copy %v", err)
+ }
+
+ blob, err := ioutil.ReadFile(c.dest)
+ if err != nil {
+ os.Remove(c.src)
+ os.Remove(c.dest)
+ t.Fatalf("Failed to read %v", err)
+ }
+ want := content[c.offset:]
+ if c.writePrefix {
+ want = append(prefix, want...)
+ }
+ if !bytes.Equal(blob, want) {
+ t.Fatal("Unexpected value")
+ }
+ os.Remove(c.src)
+ os.Remove(c.dest)
+ }
+}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 5e07ec43ad..2672f4ea8d 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -95,10 +95,15 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err e
return t.db.ReadAncients(fn)
}
-// TruncateAncients is a noop passthrough that just forwards the request to the underlying
+// TruncateHead is a noop passthrough that just forwards the request to the underlying
// database.
-func (t *table) TruncateAncients(items uint64) error {
- return t.db.TruncateAncients(items)
+func (t *table) TruncateHead(items uint64) error {
+ return t.db.TruncateHead(items)
+}
+
+// TruncateTail is a noop passthrough that just forwards the request to the underlying
+func (t *table) TruncateTail(items uint64) error {
+ return t.db.TruncateTail(items)
}
// Sync is a noop passthrough that just forwards the request to the underlying
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 96fbbd26b9..559507d7ff 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -66,9 +66,9 @@ var (
// Pruner is an offline tool to prune the stale state with the
// help of the snapshot. The workflow of pruner is very simple:
//
-// - iterate the snapshot, reconstruct the relevant state
-// - iterate the database, delete all other state entries which
-// don't belong to the target state and the genesis state
+// - iterate the snapshot, reconstruct the relevant state
+// - iterate the database, delete all other state entries which
+// don't belong to the target state and the genesis state
//
// It can take several hours(around 2 hours for mainnet) to finish
// the whole pruning work. It's recommended to run this offline tool
diff --git a/ethdb/database.go b/ethdb/database.go
index 6d0e1147a1..a3c5570b53 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -87,12 +87,12 @@ type AncientReaderOp interface {
// Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error)
- // Tail returns the number of first stored item in the ancient store.
- // This number can also be interpreted as the total deleted items.
- Tail() (uint64, error)
-
// AncientSize returns the ancient size of the specified category.
AncientSize(kind string) (uint64, error)
+
+ // Tail returns the number of first stored item in the freezer
+ // This number can also be interpreted as the total deleted item numbers (counting from 0)
+ Tail() (uint64, error)
}
// AncientReader is the extended ancient reader interface including 'batched' or 'atomic' reading.
@@ -111,8 +111,22 @@ type AncientWriter interface {
// The integer return value is the total size of the written data.
ModifyAncients(func(AncientWriteOp) error) (int64, error)
- // TruncateAncients discards all but the first n ancient data from the ancient store.
- TruncateAncients(n uint64) error
+ /*
+ Tail ------------> Head
+ */
+
+ // TruncateHead discards all, but keep the first n ancient data from the ancient store.
+ // After the truncation, the latest item can be accessed it item_ n-1 (start from 0)
+ // Tail 0 -> (n-1)New-headxxxxOld-head
+ TruncateHead(n uint64) error
+
+ // TruncateTail discards the first n ancient data from the ancient store. The already
+ // deleted items are ignored. After the truncation, the earliest item can be accessed
+ // is item_n(start from 0). The deleted items may not be removed from the ancient store
+ // immediately, but only when the accumulated deleted data reach the threshold then
+ // will be removed all together.
+ // Old-tail(0)xxxxxxxNew-tail(n)->Head
+ TruncateTail(n uint64) error
// Sync flushes all in-memory ancient store data to disk.
Sync() error
From 44d3f93e35ce1477cc7b0e5d3a581dfa9e06d559 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Wed, 25 Sep 2024 14:52:06 +0700
Subject: [PATCH 12/41] core,eth,tests,trie: abstract node scheme, and
contruct database (#578)
* core,eth,tests,trie: abstract node scheme, and contruct database
interface instead of keyvalue for supporting storing diff reverse data
in ancient
* stacktrie,core,eth: port the changes in stacktries, track the path prefix of nodes when commits, use ethdb.Database for constructing trie.Database, it's not necessary right now, but it's required for path-based used to open reverse diff freezer
* core,trie: add scheme and resolvepath logic
---
cmd/ronin/chaincmd.go | 12 ++-
core/blockchain.go | 51 ++++++----
core/blockchain_reader.go | 6 ++
core/chain_makers.go | 5 +-
core/genesis.go | 39 ++++----
core/genesis_test.go | 22 +++--
core/rawdb/accessors_state.go | 6 ++
core/state/database.go | 24 +++--
core/state/iterator_test.go | 15 ++-
core/state/snapshot/conversion.go | 27 ++++--
core/state/snapshot/generate.go | 5 +-
core/state/snapshot/generate_test.go | 4 +-
core/state/snapshot/snapshot.go | 12 +--
core/state/sync.go | 4 +-
core/state/sync_test.go | 64 +++++++------
eth/downloader/downloader.go | 6 +-
eth/downloader/downloader_test.go | 15 ++-
eth/downloader/statesync.go | 5 +-
eth/protocols/snap/sync.go | 37 +++++---
eth/protocols/snap/sync_test.go | 126 ++++++++++++++-----------
eth/tracers/api_test.go | 3 +-
les/client.go | 3 +-
les/downloader/downloader.go | 42 +++++----
les/downloader/statesync.go | 5 +-
tests/block_test_util.go | 5 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 59 +++++++++++-
tests/fuzzers/trie/trie-fuzzer.go | 5 +-
trie/database.go | 12 ++-
trie/database_test.go | 4 +-
trie/iterator_test.go | 6 +-
trie/schema.go | 96 +++++++++++++++++++
trie/secure_trie_test.go | 6 +-
trie/stacktrie.go | 117 ++++++++++++-----------
trie/stacktrie_test.go | 15 +--
trie/sync.go | 38 +++++++-
trie/sync_test.go | 37 ++++----
trie/trie.go | 16 ----
trie/trie_test.go | 29 +++---
38 files changed, 637 insertions(+), 346 deletions(-)
create mode 100644 trie/schema.go
diff --git a/cmd/ronin/chaincmd.go b/cmd/ronin/chaincmd.go
index b53a5c8951..937c5b3d68 100644
--- a/cmd/ronin/chaincmd.go
+++ b/cmd/ronin/chaincmd.go
@@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/urfave/cli/v2"
)
@@ -224,7 +225,11 @@ func initGenesis(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
- _, hash, err := core.SetupGenesisBlock(chaindb, genesis, overrideChainConfig)
+ // Create triedb firstly
+ triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{
+ Preimages: ctx.Bool(utils.CachePreimagesFlag.Name),
+ })
+ _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis, overrideChainConfig)
if err != nil {
utils.Fatalf("Failed to write genesis block: %v", err)
}
@@ -466,7 +471,10 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
- state, err := state.New(root, state.NewDatabase(db), nil)
+ config := &trie.Config{
+ Preimages: true, // always enable preimage lookup
+ }
+ state, err := state.New(root, state.NewDatabaseWithConfig(db, config), nil)
if err != nil {
return err
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 1701130992..55600c60ca 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -176,6 +176,7 @@ type BlockChain struct {
snaps *snapshot.Tree // Snapshot tree for fast trie leaf access
triegc *prque.Prque // Priority queue mapping block numbers to tries to gc
gcproc time.Duration // Accumulates canonical block processing for trie dumping
+ triedb *trie.Database // The database handler for maintaining trie nodes.
// txLookupLimit is the maximum number of blocks from head whose tx indices
// are reserved:
@@ -260,7 +261,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
internalTxsCache, _ := lru.New[common.Hash, []*types.InternalTransaction](internalTxsCacheLimit)
blobSidecarsCache, _ := lru.New[common.Hash, types.BlobSidecars](blobSidecarsCacheLimit)
- chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, genesis, overrideArrowGlacier, false)
+
+ // Open trie database with provided config
+ triedb := trie.NewDatabaseWithConfig(
+ db,
+ &trie.Config{
+ Cache: cacheConfig.TrieCleanLimit,
+ Journal: cacheConfig.TrieCleanJournal,
+ Preimages: cacheConfig.Preimages,
+ })
+ // Setup the genesis block, commit the provided genesis specification
+ // to database if the genesis block is not present yet, or load the
+ // stored one from database.
+ chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, triedb, genesis, overrideArrowGlacier, false)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
@@ -270,6 +283,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
chainConfig: chainConfig,
cacheConfig: cacheConfig,
db: db,
+ triedb: triedb,
triegc: prque.New(nil),
stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
@@ -299,6 +313,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
bc.evmHook = TestnetHook{}
}
+ bc.stateCache = state.NewDatabaseWithNodeDB(bc.db, bc.triedb)
bc.validator = NewBlockValidator(chainConfig, bc, engine)
bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine)
bc.processor = NewStateProcessor(chainConfig, bc, engine)
@@ -335,7 +350,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
- if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
+ if !bc.HasState(head.Root()) {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
// rewound point is lower than disk layer.
@@ -419,11 +434,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
var recover bool
head := bc.CurrentBlock()
- if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer > head.NumberU64() {
+ // If we rewind the chain state to disk layer, then in this case recovery mode should be enabled.
+ if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.NumberU64() {
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
recover = true
}
- bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
+ bc.snaps, _ = snapshot.New(bc.db, bc.triedb, bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
}
// Start future block processor.
@@ -444,11 +460,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
bc.cacheConfig.TrieCleanRejournal = time.Minute
}
- triedb := bc.stateCache.TrieDB()
bc.wg.Add(1)
go func() {
defer bc.wg.Done()
- triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
+ bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
}()
}
@@ -707,7 +722,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// if the historical chain pruning is enabled. In that case the logic
// needs to be improved here.
if !bc.HasState(bc.genesisBlock.Root()) {
- if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
+ if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
log.Crit("Failed to commit genesis state", "err", err)
}
log.Debug("Recommitted genesis state to disk")
@@ -808,7 +823,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4])
}
- if _, err := trie.NewSecure(common.Hash{}, block.Root(), bc.stateCache.TrieDB()); err != nil {
+ if _, err := trie.NewSecure(common.Hash{}, block.Root(), bc.triedb); err != nil {
return err
}
@@ -995,7 +1010,7 @@ func (bc *BlockChain) Stop() {
// - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.TrieDirtyDisabled {
- triedb := bc.stateCache.TrieDB()
+ triedb := bc.triedb
for _, offset := range []uint64{0, 1, uint64(bc.cacheConfig.TriesInMemory) - 1} {
if number := bc.CurrentBlock().NumberU64(); number > offset {
@@ -1023,8 +1038,7 @@ func (bc *BlockChain) Stop() {
// Ensure all live cached entries be saved into disk, so that we can skip
// cache warmup when node restarts.
if bc.cacheConfig.TrieCleanJournal != "" {
- triedb := bc.stateCache.TrieDB()
- triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
+ bc.triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
}
log.Info("Blockchain stopped")
}
@@ -1564,27 +1578,26 @@ func (bc *BlockChain) writeBlockWithState(
if err != nil {
return NonStatTy, err
}
- triedb := bc.stateCache.TrieDB()
// If we're running an archive node, always flush
if bc.cacheConfig.TrieDirtyDisabled {
- if err := triedb.Commit(root, false, nil); err != nil {
+ if err := bc.triedb.Commit(root, false, nil); err != nil {
return NonStatTy, err
}
} else {
// Full but not archive node, do proper garbage collection
- triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
+ bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
bc.triegc.Push(root, -int64(block.NumberU64()))
triesInMemory := uint64(bc.cacheConfig.TriesInMemory)
if current := block.NumberU64(); current > triesInMemory {
// If we exceeded our memory allowance, flush matured singleton nodes to disk
var (
- nodes, imgs = triedb.Size()
+ nodes, imgs = bc.triedb.Size()
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
)
if nodes > limit || imgs > 4*1024*1024 {
- triedb.Cap(limit - ethdb.IdealBatchSize)
+ bc.triedb.Cap(limit - ethdb.IdealBatchSize)
}
// Find the next state trie we need to commit
chosen := current - triesInMemory
@@ -1608,7 +1621,7 @@ func (bc *BlockChain) writeBlockWithState(
)
}
// Flush an entire trie and restart the counters
- triedb.Commit(header.Root, true, nil)
+ bc.triedb.Commit(header.Root, true, nil)
lastWrite = chosen
bc.gcproc = 0
}
@@ -1620,7 +1633,7 @@ func (bc *BlockChain) writeBlockWithState(
bc.triegc.Push(root, number)
break
}
- triedb.Dereference(root.(common.Hash))
+ bc.triedb.Dereference(root.(common.Hash))
}
}
}
@@ -2056,7 +2069,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool, sidecars
stats.processed++
stats.usedGas += usedGas
- dirty, _ := bc.stateCache.TrieDB().Size()
+ dirty, _ := bc.triedb.Size()
stats.report(chain, it.index, dirty)
}
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index ff503f91ec..f8371afa2d 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
@@ -325,6 +326,11 @@ func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
return bc.stateCache.TrieDB().Node(hash)
}
+// TrieDB retrieves the low level trie database used for data storage.
+func (bc *BlockChain) TrieDB() *trie.Database {
+ return bc.triedb
+}
+
// ContractCode retrieves a blob of data associated with a contract hash
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 9e0f63fb14..5cec7c0214 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -362,10 +362,7 @@ func generateChain(
// then generate chain on top.
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
db := rawdb.NewMemoryDatabase()
- _, err := genesis.Commit(db)
- if err != nil {
- panic(err)
- }
+ genesis.MustCommit(db)
blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen, true)
return db, blocks, receipts
}
diff --git a/core/genesis.go b/core/genesis.go
index ea0b88941c..b3f6e1b7fc 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -103,8 +103,8 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
// flush is very similar with deriveHash, but the main difference is
// all the generated states will be persisted into the given database.
// Also, the genesis state specification will be flushed as well.
-func (ga *GenesisAlloc) flush(db ethdb.Database) error {
- statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
+func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error {
+ statedb, err := state.New(common.Hash{}, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil {
return err
}
@@ -116,14 +116,18 @@ func (ga *GenesisAlloc) flush(db ethdb.Database) error {
statedb.SetState(addr, key, value)
}
}
+ // Commit current state, return the root hash.
root, err := statedb.Commit(false)
if err != nil {
return err
}
- err = statedb.Database().TrieDB().Commit(root, true, nil)
- if err != nil {
- return err
+ // Commit newly generated states into disk if it's not empty.
+ if root != types.EmptyRootHash {
+ if err := triedb.Commit(root, true, nil); err != nil {
+ return err
+ }
}
+
// Marshal the genesis state specification and persist.
blob, err := json.Marshal(ga)
if err != nil {
@@ -134,8 +138,8 @@ func (ga *GenesisAlloc) flush(db ethdb.Database) error {
}
// CommitGenesisState loads the stored genesis state with the given block
-// hash and commits them into the given database handler.
-func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
+// hash and commits it into the provided database handler.
+func CommitGenesisState(db ethdb.Database, triedb *trie.Database, hash common.Hash) error {
var alloc GenesisAlloc
blob := rawdb.ReadGenesisStateSpec(db, hash)
if len(blob) != 0 {
@@ -167,7 +171,7 @@ func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
return errors.New("not found")
}
}
- return alloc.flush(db)
+ return alloc.flush(db, triedb)
}
// GenesisAccount is an account in the state of the genesis block.
@@ -244,14 +248,15 @@ func (e *GenesisMismatchError) Error() string {
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
//
// The returned chain configuration is never nil.
-func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, overrideGenesis bool) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlockWithOverride(db, genesis, nil, overrideGenesis)
+func SetupGenesisBlock(db ethdb.Database, triedb *trie.Database, genesis *Genesis, overrideGenesis bool) (*params.ChainConfig, common.Hash, error) {
+ return SetupGenesisBlockWithOverride(db, triedb, genesis, nil, overrideGenesis)
}
-func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int, forceOverrideChainConfig bool) (*params.ChainConfig, common.Hash, error) {
+func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, genesis *Genesis, overrideArrowGlacier *big.Int, forceOverrideChainConfig bool) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
+
// Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
@@ -261,7 +266,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
} else {
log.Info("Writing custom genesis block")
}
- block, err := genesis.Commit(db)
+ block, err := genesis.Commit(db, triedb)
if err != nil {
return genesis.Config, common.Hash{}, err
}
@@ -270,7 +275,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
// We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0)
- if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil {
+ if _, err := state.New(header.Root, state.NewDatabaseWithNodeDB(db, triedb), nil); err != nil {
if genesis == nil {
genesis = DefaultGenesisBlock()
}
@@ -279,7 +284,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if hash != stored {
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
}
- block, err := genesis.Commit(db)
+ block, err := genesis.Commit(db, triedb)
if err != nil {
return genesis.Config, hash, err
}
@@ -410,7 +415,7 @@ func (g *Genesis) ToBlock() *types.Block {
// Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block.
-func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
+func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block, error) {
block := g.ToBlock()
if block.Number().Sign() != 0 {
return nil, errors.New("can't commit genesis block with number > 0")
@@ -428,7 +433,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
// All the checks has passed, flush the states derived from the genesis
// specification as well as the specification itself into the provided
// database.
- if err := g.Alloc.flush(db); err != nil {
+ if err := g.Alloc.flush(db, triedb); err != nil {
return nil, err
}
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
@@ -445,7 +450,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
// MustCommit writes the genesis block and state to db, panicking on error.
// The block is committed as the canonical head block.
func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
- block, err := g.Commit(db)
+ block, err := g.Commit(db, trie.NewDatabase(db))
if err != nil {
panic(err)
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 7c3fc5352b..aced782f3e 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -17,6 +17,7 @@
package core
import (
+ "encoding/json"
"math/big"
"reflect"
"testing"
@@ -28,12 +29,14 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
func TestInvalidCliqueConfig(t *testing.T) {
block := DefaultGoerliGenesisBlock()
block.ExtraData = []byte{}
- if _, err := block.Commit(nil); err == nil {
+ db := rawdb.NewMemoryDatabase()
+ if _, err := block.Commit(db, trie.NewDatabase(db)); err == nil {
t.Fatal("Expected error on invalid clique config")
}
}
@@ -60,7 +63,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "genesis without ChainConfig",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, new(Genesis), false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), new(Genesis), false)
},
wantErr: errGenesisNoConfig,
wantConfig: params.AllEthashProtocolChanges,
@@ -68,7 +71,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "no block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, nil, false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), nil, false)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@@ -77,7 +80,7 @@ func TestSetupGenesis(t *testing.T) {
name: "mainnet block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
DefaultGenesisBlock().MustCommit(db)
- return SetupGenesisBlock(db, nil, false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), nil, false)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@@ -86,7 +89,7 @@ func TestSetupGenesis(t *testing.T) {
name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
customg.MustCommit(db)
- return SetupGenesisBlock(db, nil, false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), nil, false)
},
wantHash: customghash,
wantConfig: customg.Config,
@@ -95,7 +98,7 @@ func TestSetupGenesis(t *testing.T) {
name: "custom block in DB, genesis == ropsten",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
customg.MustCommit(db)
- return SetupGenesisBlock(db, DefaultRopstenGenesisBlock(), false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), DefaultRopstenGenesisBlock(), false)
},
wantErr: &GenesisMismatchError{Stored: customghash, New: params.RopstenGenesisHash},
wantHash: params.RopstenGenesisHash,
@@ -105,7 +108,7 @@ func TestSetupGenesis(t *testing.T) {
name: "compatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
oldcustomg.MustCommit(db)
- return SetupGenesisBlock(db, &customg, false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), &customg, false)
},
wantHash: customghash,
wantConfig: customg.Config,
@@ -124,7 +127,7 @@ func TestSetupGenesis(t *testing.T) {
bc.InsertChain(blocks, nil)
bc.CurrentBlock()
// This should return a compatibility error.
- return SetupGenesisBlock(db, &customg, false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db), &customg, false)
},
wantHash: customghash,
wantConfig: customg.Config,
@@ -219,7 +222,8 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
}
hash, _ = alloc.deriveHash()
)
- alloc.flush(db)
+ blob, _ := json.Marshal(alloc)
+ rawdb.WriteGenesisStateSpec(db, hash, blob)
var reload GenesisAlloc
err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index eb35804f41..d6b1053b60 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -61,6 +61,12 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
}
}
+// HasTrieNode checks if the trie node with the provided hash is present in db.
+func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
// DeleteCode deletes the specified contract code from the database.
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeKey(hash)); err != nil {
diff --git a/core/state/database.go b/core/state/database.go
index 87461efcf1..02f5c4ea54 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -121,21 +121,31 @@ func NewDatabase(db ethdb.Database) Database {
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
csc, _ := lru.New[common.Hash, int](codeSizeCacheSize)
return &cachingDB{
- db: trie.NewDatabaseWithConfig(db, config),
+ triedb: trie.NewDatabaseWithConfig(db, config),
+ codeSizeCache: csc,
+ codeCache: fastcache.New(codeCacheSize),
+ }
+}
+
+// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
+func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database {
+ csc, _ := lru.New[common.Hash, int](codeSizeCacheSize)
+ return &cachingDB{
+ triedb: triedb,
codeSizeCache: csc,
codeCache: fastcache.New(codeCacheSize),
}
}
type cachingDB struct {
- db *trie.Database
+ triedb *trie.Database
codeSizeCache *lru.Cache[common.Hash, int]
codeCache *fastcache.Cache
}
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
- tr, err := trie.NewSecure(common.Hash{}, root, db.db)
+ tr, err := trie.NewSecure(common.Hash{}, root, db.triedb)
if err != nil {
return nil, err
}
@@ -144,7 +154,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account.
func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
- tr, err := trie.NewSecure(addrHash, root, db.db)
+ tr, err := trie.NewSecure(addrHash, root, db.triedb)
if err != nil {
return nil, err
}
@@ -166,7 +176,7 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
return code, nil
}
- code := rawdb.ReadCode(db.db.DiskDB(), codeHash)
+ code := rawdb.ReadCode(db.triedb.DiskDB(), codeHash)
if len(code) > 0 {
db.codeCache.Set(codeHash.Bytes(), code)
db.codeSizeCache.Add(codeHash, len(code))
@@ -182,7 +192,7 @@ func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]b
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
return code, nil
}
- code := rawdb.ReadCodeWithPrefix(db.db.DiskDB(), codeHash)
+ code := rawdb.ReadCodeWithPrefix(db.triedb.DiskDB(), codeHash)
if len(code) > 0 {
db.codeCache.Set(codeHash.Bytes(), code)
db.codeSizeCache.Add(codeHash, len(code))
@@ -202,5 +212,5 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro
// TrieDB retrieves any intermediate trie-node caching layer.
func (db *cachingDB) TrieDB() *trie.Database {
- return db.db
+ return db.triedb
}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index d1afe9ca3e..7669ac97a2 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -21,16 +21,15 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
)
// Tests that the node iterator indeed walks over the entire database contents.
func TestNodeIteratorCoverage(t *testing.T) {
// Create some arbitrary test state to iterate
- db, root, _ := makeTestState()
- db.TrieDB().Commit(root, false, nil)
+ db, sdb, root, _ := makeTestState()
+ sdb.TrieDB().Commit(root, false, nil)
- state, err := New(root, db, nil)
+ state, err := New(root, sdb, nil)
if err != nil {
t.Fatalf("failed to create state trie at %x: %v", root, err)
}
@@ -43,19 +42,19 @@ func TestNodeIteratorCoverage(t *testing.T) {
}
// Cross check the iterated hashes and the database/nodepool content
for hash := range hashes {
- if _, err = db.TrieDB().Node(hash); err != nil {
- _, err = db.ContractCode(common.Hash{}, hash)
+ if _, err = sdb.TrieDB().Node(hash); err != nil {
+ _, err = sdb.ContractCode(common.Hash{}, hash)
}
if err != nil {
t.Errorf("failed to retrieve reported node %x", hash)
}
}
- for _, hash := range db.TrieDB().Nodes() {
+ for _, hash := range sdb.TrieDB().Nodes() {
if _, ok := hashes[hash]; !ok {
t.Errorf("state entry not reported %x", hash)
}
}
- it := db.TrieDB().DiskDB().(ethdb.Database).NewIterator(nil, nil)
+ it := db.NewIterator(nil, nil)
for it.Next() {
key := it.Key()
if bytes.HasPrefix(key, []byte("secure-key-")) {
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index 0f3934cb42..15bb43b842 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -43,7 +43,7 @@ type trieKV struct {
type (
// trieGeneratorFn is the interface of trie generation which can
// be implemented by different trie algorithm.
- trieGeneratorFn func(db ethdb.KeyValueWriter, owner common.Hash, in chan (trieKV), out chan (common.Hash))
+ trieGeneratorFn func(db ethdb.KeyValueWriter, scheme trie.NodeScheme, owner common.Hash, in chan (trieKV), out chan (common.Hash))
// leafCallbackFn is the callback invoked at the leaves of the trie,
// returns the subtrie root with the specified subtrie identifier.
@@ -52,12 +52,12 @@ type (
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
- return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
+ return generateTrieRoot(nil, nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
}
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
- return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true)
+ return generateTrieRoot(nil, nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true)
}
// GenerateTrie takes the whole snapshot tree as the input, traverses all the
@@ -71,7 +71,8 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd
}
defer acctIt.Release()
- got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+ scheme := snaptree.triedb.Scheme()
+ got, err := generateTrieRoot(dst, scheme, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
// Migrate the code first, commit the contract code into the tmp db.
if codeHash != emptyCode {
code := rawdb.ReadCode(src, codeHash)
@@ -87,7 +88,7 @@ func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethd
}
defer storageIt.Release()
- hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ hash, err := generateTrieRoot(dst, scheme, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil {
return common.Hash{}, err
}
@@ -242,7 +243,7 @@ func runReport(stats *generateStats, stop chan bool) {
// generateTrieRoot generates the trie hash based on the snapshot iterator.
// It can be used for generating account trie, storage trie or even the
// whole state which connects the accounts and the corresponding storages.
-func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
+func generateTrieRoot(db ethdb.KeyValueWriter, scheme trie.NodeScheme, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
var (
in = make(chan trieKV) // chan to pass leaves
out = make(chan common.Hash, 1) // chan to collect result
@@ -253,7 +254,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
wg.Add(1)
go func() {
defer wg.Done()
- generatorFn(db, account, in, out)
+ generatorFn(db, scheme, account, in, out)
}()
// Spin up a go-routine for progress logging
if report && stats != nil {
@@ -360,8 +361,16 @@ func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash,
return stop(nil)
}
-func stackTrieGenerate(db ethdb.KeyValueWriter, owner common.Hash, in chan trieKV, out chan common.Hash) {
- t := trie.NewStackTrieWithOwner(db, owner)
+func stackTrieGenerate(db ethdb.KeyValueWriter, scheme trie.NodeScheme, owner common.Hash, in chan trieKV, out chan common.Hash) {
+
+ var nodeWriter trie.NodeWriteFunc
+ // Implement nodeWriter in case db is existed otherwise let it be nil.
+ if db != nil {
+ nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ scheme.WriteTrieNode(db, owner, path, hash, blob)
+ }
+ }
+ t := trie.NewStackTrieWithOwner(nodeWriter, owner)
for leaf := range in {
t.TryUpdate(leaf.key[:], leaf.value)
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 85b667c537..a18ecf22ea 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
@@ -428,9 +427,9 @@ func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix [
// We use the snap data to build up a cache which can be used by the
// main account trie as a primary lookup when resolving hashes
- var snapNodeCache ethdb.KeyValueStore
+ var snapNodeCache ethdb.Database
if len(result.keys) > 0 {
- snapNodeCache = memorydb.New()
+ snapNodeCache = rawdb.NewMemoryDatabase()
snapTrieDb := trie.NewDatabase(snapNodeCache)
snapTrie, _ := trie.New(owner, common.Hash{}, snapTrieDb)
for i, key := range result.keys {
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index fc09cecbf3..3d59590c89 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -115,12 +115,12 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
t.Helper()
accIt := snap.AccountIterator(common.Hash{})
defer accIt.Release()
- snapRoot, err := generateTrieRoot(nil, accIt, common.Hash{}, stackTrieGenerate,
+ snapRoot, err := generateTrieRoot(nil, nil, accIt, common.Hash{}, stackTrieGenerate,
func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, _ := snap.StorageIterator(accountHash, common.Hash{})
defer storageIt.Release()
- hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ hash, err := generateTrieRoot(nil, nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 6ee6b06bb5..f111c96313 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -179,10 +179,10 @@ type Tree struct {
// If the memory layers in the journal do not match the disk layer (e.g. there is
// a gap) or the journal is missing, there are two repair cases:
//
-// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
-// This case happens when the snapshot is 'ahead' of the state trie.
-// - otherwise, the entire snapshot is considered invalid and will be recreated on
-// a background thread.
+// - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
+// This case happens when the snapshot is 'ahead' of the state trie.
+// - otherwise, the entire snapshot is considered invalid and will be recreated on
+// a background thread.
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
// Create a new, empty snapshot tree
snap := &Tree{
@@ -767,14 +767,14 @@ func (t *Tree) Verify(root common.Hash) error {
}
defer acctIt.Release()
- got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+ got, err := generateTrieRoot(nil, nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
if err != nil {
return common.Hash{}, err
}
defer storageIt.Release()
- hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ hash, err := generateTrieRoot(nil, nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/sync.go b/core/state/sync.go
index a69a10dd92..e2b414259a 100644
--- a/core/state/sync.go
+++ b/core/state/sync.go
@@ -27,7 +27,7 @@ import (
)
// NewStateSync create a new state trie download scheduler.
-func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(keys [][]byte, leaf []byte) error) *trie.Sync {
+func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(keys [][]byte, leaf []byte) error, scheme trie.NodeScheme) *trie.Sync {
// Register the storage slot callback if the external callback is specified.
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
if onLeaf != nil {
@@ -52,6 +52,6 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S
syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), path, parent, parentPath)
return nil
}
- syncer = trie.NewSync(root, database, onAccount, bloom)
+ syncer = trie.NewSync(root, database, onAccount, bloom, scheme)
return syncer
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index f03e0ac840..b35830d1a9 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -40,10 +40,11 @@ type testAccount struct {
}
// makeTestState create a sample test state to test node-wise reconstruction.
-func makeTestState() (Database, common.Hash, []*testAccount) {
+func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
// Create an empty state
- db := NewDatabase(rawdb.NewMemoryDatabase())
- state, _ := New(common.Hash{}, db, nil)
+ db := rawdb.NewMemoryDatabase()
+ sdb := NewDatabase(db)
+ state, _ := New(common.Hash{}, sdb, nil)
// Fill it with some arbitrary data
var accounts []*testAccount
@@ -64,7 +65,7 @@ func makeTestState() (Database, common.Hash, []*testAccount) {
if i%5 == 0 {
for j := byte(0); j < 5; j++ {
hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
- obj.SetState(db, hash, hash)
+ obj.SetState(sdb, hash, hash)
}
}
state.updateStateObject(obj)
@@ -73,7 +74,7 @@ func makeTestState() (Database, common.Hash, []*testAccount) {
root, _ := state.Commit(false)
// Return the generated state
- return db, root, accounts
+ return db, sdb, root, accounts
}
// checkStateAccounts cross references a reconstructed state with an expected
@@ -133,8 +134,9 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error {
// Tests that an empty state is not scheduled for syncing.
func TestEmptyStateSync(t *testing.T) {
+ db := trie.NewDatabase(rawdb.NewMemoryDatabase())
empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil)
+ sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil, db.Scheme())
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes)
}
@@ -171,7 +173,7 @@ type stateElement struct {
func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
// Create a random state to copy
- srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, srcRoot, srcAccounts := makeTestState()
if commit {
srcDb.TrieDB().Commit(srcRoot, false, nil)
}
@@ -179,7 +181,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
var (
nodeElements []stateElement
@@ -281,11 +283,11 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
// partial results are returned, and the others sent only later.
func TestIterativeDelayedStateSync(t *testing.T) {
// Create a random state to copy
- srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
var (
nodeElements []stateElement
@@ -374,11 +376,11 @@ func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomS
func testIterativeRandomStateSync(t *testing.T, count int) {
// Create a random state to copy
- srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
nodeQueue := make(map[string]stateElement)
codeQueue := make(map[common.Hash]struct{})
@@ -454,11 +456,11 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
// partial results are returned (Even those randomly), others sent only later.
func TestIterativeRandomDelayedStateSync(t *testing.T) {
// Create a random state to copy
- srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, srcRoot, srcAccounts := makeTestState()
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
nodeQueue := make(map[string]stateElement)
codeQueue := make(map[common.Hash]struct{})
@@ -544,7 +546,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
// the database.
func TestIncompleteStateSync(t *testing.T) {
// Create a random state to copy
- srcDb, srcRoot, srcAccounts := makeTestState()
+ db, srcDb, srcRoot, srcAccounts := makeTestState()
// isCodeLookup to save some hashing
var isCode = make(map[common.Hash]struct{})
@@ -554,15 +556,16 @@ func TestIncompleteStateSync(t *testing.T) {
}
}
isCode[common.BytesToHash(emptyCodeHash)] = struct{}{}
- checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot)
+ checkTrieConsistency(db, srcRoot)
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
var (
- addedCodes []common.Hash
- addedNodes []common.Hash
+ addedCodes []common.Hash
+ addedPaths []string
+ addedHashes []common.Hash
)
nodeQueue := make(map[string]stateElement)
codeQueue := make(map[common.Hash]struct{})
@@ -599,15 +602,16 @@ func TestIncompleteStateSync(t *testing.T) {
var nodehashes []common.Hash
if len(nodeQueue) > 0 {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
- for key, element := range nodeQueue {
+ for path, element := range nodeQueue {
data, err := srcDb.TrieDB().Node(element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
- results = append(results, trie.NodeSyncResult{Path: key, Data: data})
+ results = append(results, trie.NodeSyncResult{Path: path, Data: data})
if element.hash != srcRoot {
- addedNodes = append(addedNodes, element.hash)
+ addedPaths = append(addedPaths, element.path)
+ addedHashes = append(addedHashes, element.hash)
}
nodehashes = append(nodehashes, element.hash)
}
@@ -655,12 +659,18 @@ func TestIncompleteStateSync(t *testing.T) {
}
rawdb.WriteCode(dstDb, node, val)
}
- for _, node := range addedNodes {
- val := rawdb.ReadTrieNode(dstDb, node)
- rawdb.DeleteTrieNode(dstDb, node)
+ scheme := srcDb.TrieDB().Scheme()
+ for i, path := range addedPaths {
+ owner, inner := trie.ResolvePath([]byte(path))
+ hash := addedHashes[i]
+ val := scheme.ReadTrieNode(dstDb, owner, inner, hash)
+ if val == nil {
+ t.Error("missing trie node")
+ }
+ scheme.DeleteTrieNode(dstDb, owner, inner, hash)
if err := checkStateConsistency(dstDb, srcRoot); err == nil {
- t.Errorf("trie inconsistency not caught, missing: %v", node.Hex())
+ t.Errorf("trie inconsistency not caught, missing: %v", path)
}
- rawdb.WriteTrieNode(dstDb, node, val)
+ scheme.WriteTrieNode(dstDb, owner, inner, hash, val)
}
}
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 4623205baa..a0cc2fd341 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -202,6 +202,10 @@ type BlockChain interface {
// Snapshots returns the blockchain snapshot tree to paused it during sync.
Snapshots() *snapshot.Tree
+
+ // TrieDB retrieves the low level trie database used for interacting
+ // with the trie nodes.
+ TrieDB() *trie.Database
}
// New creates a new downloader to fetch hashes and blocks from remote peers.
@@ -230,7 +234,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
headerProcCh: make(chan []*types.Header, 1),
quitCh: make(chan struct{}),
stateCh: make(chan dataPack),
- SnapSyncer: snap.NewSyncer(stateDb),
+ SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()),
stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{
processed: rawdb.ReadFastTrieProgress(stateDb),
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 010f3a11ca..018f03e38d 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -48,11 +48,11 @@ func init() {
// downloadTester is a test simulator for mocking out local block chain.
type downloadTester struct {
downloader *Downloader
-
- genesis *types.Block // Genesis blocks used by the tester and peers
- stateDb ethdb.Database // Database used by the tester for syncing from peers
- peerDb ethdb.Database // Database of the peers containing all data
- peers map[string]*downloadTesterPeer
+ triedb *trie.Database
+ genesis *types.Block // Genesis blocks used by the tester and peers
+ stateDb ethdb.Database // Database used by the tester for syncing from peers
+ peerDb ethdb.Database // Database of the peers containing all data
+ peers map[string]*downloadTesterPeer
ownHashes []common.Hash // Hash chain belonging to the tester
ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester
@@ -88,11 +88,16 @@ func newTester() *downloadTester {
}
tester.stateDb = rawdb.NewMemoryDatabase()
tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
+ tester.triedb = trie.NewDatabase(tester.stateDb)
tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer, tester.verifyBlobHeader)
return tester
}
+func (dl *downloadTester) TrieDB() *trie.Database {
+ return dl.triedb
+}
+
// terminate aborts any operations on the embedded downloader and releases all
// held resources.
func (dl *downloadTester) terminate() {
diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go
index 696089eaba..4e7f818135 100644
--- a/eth/downloader/statesync.go
+++ b/eth/downloader/statesync.go
@@ -296,10 +296,13 @@ type codeTask struct {
// newStateSync creates a new state trie download scheduler. This method does not
// yet start the sync. The user needs to call run to initiate.
func newStateSync(d *Downloader, root common.Hash) *stateSync {
+ // Hack the node scheme here. It's a dead code is not used
+ // by light client at all. Just aim for passing tests.
+ scheme := trie.NewDatabase(rawdb.NewMemoryDatabase()).Scheme()
return &stateSync{
d: d,
root: root,
- sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
+ sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil, scheme),
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
trieTasks: make(map[string]*trieTask),
codeTasks: make(map[common.Hash]*codeTask),
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index a78ed079ce..b798f9afa2 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -414,7 +414,8 @@ type SyncPeer interface {
// - The peer delivers a stale response after a previous timeout
// - The peer delivers a refusal to serve the requested state
type Syncer struct {
- db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
+ db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
+ scheme trie.NodeScheme // Node scheme used in node database
root common.Hash // Current state trie root being synced
tasks []*accountTask // Current account task set being synced
@@ -480,10 +481,10 @@ type Syncer struct {
// NewSyncer creates a new snapshot syncer to download the Ethereum state over the
// snap protocol.
-func NewSyncer(db ethdb.KeyValueStore) *Syncer {
+func NewSyncer(db ethdb.KeyValueStore, scheme trie.NodeScheme) *Syncer {
return &Syncer{
- db: db,
-
+ db: db,
+ scheme: scheme,
peers: make(map[string]SyncPeer),
peerJoin: new(event.Feed),
peerDrop: new(event.Feed),
@@ -574,7 +575,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.lock.Lock()
s.root = root
s.healer = &healTask{
- scheduler: state.NewStateSync(root, s.db, nil, s.onHealState),
+ scheduler: state.NewStateSync(root, s.db, nil, s.onHealState, s.scheme),
trieTasks: make(map[string]common.Hash),
codeTasks: make(map[common.Hash]struct{}),
}
@@ -719,7 +720,9 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
- task.genTrie = trie.NewStackTrie(task.genBatch)
+ task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ s.scheme.WriteTrieNode(task.genBatch, owner, path, hash, val)
+ })
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
@@ -729,7 +732,9 @@ func (s *Syncer) loadSyncStatus() {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
- subtask.genTrie = trie.NewStackTrieWithOwner(subtask.genBatch, accountHash)
+ subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ s.scheme.WriteTrieNode(subtask.genBatch, owner, path, hash, val)
+ }, accountHash)
}
}
}
@@ -783,7 +788,9 @@ func (s *Syncer) loadSyncStatus() {
Last: last,
SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch,
- genTrie: trie.NewStackTrie(batch),
+ genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ }),
})
log.Debug("Created account sync task", "from", next, "last", last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@@ -1796,7 +1803,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
}
// Check if the account is a contract with an unknown storage trie
if account.Root != emptyRoot {
- if node, err := s.db.Get(account.Root[:]); err != nil || node == nil {
+ if !s.scheme.HasTrieNode(s.db, res.hashes[i], nil, account.Root) {
// If there was a previous large state retrieval in progress,
// don't restart it from scratch. This happens if a sync cycle
// is interrupted and resumed later. However, *do* update the
@@ -1968,7 +1975,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(batch, account),
+ genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ }, account),
})
for r.Next() {
batch := ethdb.HookedBatch{
@@ -1982,7 +1991,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(batch, account),
+ genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ }, account),
})
}
for _, task := range tasks {
@@ -2027,7 +2038,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil {
- tr := trie.NewStackTrieWithOwner(batch, account)
+ tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
+ s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ }, account)
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index dc2a9a4839..940357f412 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -159,6 +159,13 @@ func newTestPeer(id string, t *testing.T, term func()) *testPeer {
return peer
}
+func (t *testPeer) setStorageTries(tries map[common.Hash]*trie.Trie) {
+ t.storageTries = make(map[common.Hash]*trie.Trie)
+ for root, trie := range tries {
+ t.storageTries[root] = trie.Copy()
+ }
+}
+
func (t *testPeer) ID() string { return t.id }
func (t *testPeer) Log() log.Logger { return t.logger }
@@ -562,7 +569,8 @@ func TestSyncBloatedProof(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
source := newTestPeer("source", t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
@@ -610,15 +618,15 @@ func TestSyncBloatedProof(t *testing.T) {
}
return nil
}
- syncer := setupSyncer(source)
+ syncer := setupSyncer(nodeScheme, source)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {
t.Fatal("No error returned from incomplete/cancelled sync")
}
}
-func setupSyncer(peers ...*testPeer) *Syncer {
+func setupSyncer(scheme trie.NodeScheme, peers ...*testPeer) *Syncer {
stateDb := rawdb.NewMemoryDatabase()
- syncer := NewSyncer(stateDb)
+ syncer := NewSyncer(stateDb, scheme)
for _, peer := range peers {
syncer.Register(peer)
peer.remote = syncer
@@ -639,7 +647,7 @@ func TestSync(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -647,7 +655,7 @@ func TestSync(t *testing.T) {
source.accountValues = elems
return source
}
- syncer := setupSyncer(mkSource("source"))
+ syncer := setupSyncer(nodeScheme, mkSource("source"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
@@ -668,7 +676,7 @@ func TestSyncTinyTriePanic(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -676,7 +684,7 @@ func TestSyncTinyTriePanic(t *testing.T) {
source.accountValues = elems
return source
}
- syncer := setupSyncer(mkSource("source"))
+ syncer := setupSyncer(nodeScheme, mkSource("source"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
@@ -698,7 +706,7 @@ func TestMultiSync(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -706,7 +714,7 @@ func TestMultiSync(t *testing.T) {
source.accountValues = elems
return source
}
- syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB"))
+ syncer := setupSyncer(nodeScheme, mkSource("sourceA"), mkSource("sourceB"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
@@ -728,7 +736,7 @@ func TestSyncWithStorage(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -738,7 +746,7 @@ func TestSyncWithStorage(t *testing.T) {
source.storageValues = storageElems
return source
}
- syncer := setupSyncer(mkSource("sourceA"))
+ syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
@@ -760,13 +768,13 @@ func TestMultiSyncManyUseless(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
- source.storageTries = storageTries
+ source.setStorageTries(storageTries)
source.storageValues = storageElems
if !noAccount {
@@ -781,7 +789,7 @@ func TestMultiSyncManyUseless(t *testing.T) {
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -806,13 +814,13 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
- source.storageTries = storageTries
+ source.setStorageTries(storageTries)
source.storageValues = storageElems
if !noAccount {
@@ -828,6 +836,7 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
}
syncer := setupSyncer(
+ nodeScheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -857,13 +866,13 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
- source.storageTries = storageTries
+ source.setStorageTries(storageTries)
source.storageValues = storageElems
if !noAccount {
@@ -878,7 +887,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -923,7 +932,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
+ nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -931,7 +940,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) {
source.accountValues = elems
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("peer-a"),
mkSource("peer-b"),
)
@@ -957,11 +966,11 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
if slow {
@@ -970,7 +979,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("nice-a", false),
mkSource("nice-b", false),
mkSource("nice-c", false),
@@ -998,11 +1007,11 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.codeRequestHandler = codeFn
return source
@@ -1011,7 +1020,7 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("capped", cappedCodeRequestHandler),
mkSource("corrupt", corruptCodeRequestHandler),
)
@@ -1035,7 +1044,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1048,7 +1057,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
// chance that the full set of codes requested are sent only to the
// non-corrupt peer, which delivers everything in one go, and makes the
// test moot
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("capped", defaultAccountRequestHandler),
mkSource("corrupt", corruptAccountRequestHandler),
)
@@ -1074,7 +1083,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1087,6 +1096,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
// so it shouldn't be more than that
var counter int
syncer := setupSyncer(
+ nodeScheme,
mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {
counter++
return cappedCodeRequestHandler(t, id, hashes, max)
@@ -1122,7 +1132,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1132,7 +1142,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) {
source.storageValues = storageElems
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("peer-a"),
mkSource("peer-b"),
)
@@ -1158,7 +1168,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -1173,7 +1183,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("nice-a", false),
mkSource("slow", true),
)
@@ -1199,7 +1209,7 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1211,7 +1221,7 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
@@ -1237,7 +1247,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1248,7 +1258,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
source.storageRequestHandler = handler
return source
}
- syncer := setupSyncer(
+ syncer := setupSyncer(nodeScheme,
mkSource("nice-a", defaultStorageRequestHandler),
mkSource("nice-b", defaultStorageRequestHandler),
mkSource("nice-c", defaultStorageRequestHandler),
@@ -1277,7 +1287,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) {
})
}
)
- sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1288,7 +1298,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) {
source.storageRequestHandler = proofHappyStorageRequestHandler
return source
}
- syncer := setupSyncer(mkSource("sourceA"))
+ syncer := setupSyncer(nodeScheme, mkSource("sourceA"))
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
@@ -1345,12 +1355,14 @@ func getCodeByHash(hash common.Hash) []byte {
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
-func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
+func makeAccountTrieNoStorage(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
+ // Create emptry Trie
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
entries entrySlice
)
+ // Fill the trie with n accounts
for i := uint64(1); i <= uint64(n); i++ {
value, _ := rlp.EncodeToBytes(types.StateAccount{
Nonce: i,
@@ -1360,9 +1372,11 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
})
key := key32(i)
elem := &kv{key, value}
+ // Update Account tries and keep the entries
accTrie.Update(elem.k, elem.v)
entries = append(entries, elem)
}
+ // Sort anscending by key
sort.Sort(entries)
// Commit the state changes into db and re-create the trie
// for accessing later.
@@ -1370,13 +1384,13 @@ func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {
db.Update(trie.NewWithNodeSet(nodes))
accTrie, _ = trie.New(common.Hash{}, root, db)
- return accTrie, entries
+ return db.Scheme(), accTrie, entries
}
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
-func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
+func makeBoundaryAccountTrie(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
@@ -1431,12 +1445,12 @@ func makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {
db.Update(trie.NewWithNodeSet(nodes))
accTrie, _ = trie.New(common.Hash{}, root, db)
- return accTrie, entries
+ return db.Scheme(), accTrie, entries
}
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
-// has a unique storage set.
-func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
+// has a unique storage set. Code is true when u pass a random code hash to the account
+func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (trie.NodeScheme, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
@@ -1485,11 +1499,11 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
storageTries[common.BytesToHash(key)] = trie
}
- return accTrie, entries, storageTries, storageEntries
+ return db.Scheme(), accTrie, entries, storageTries, storageEntries
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
-func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
+func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (trie.NodeScheme, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
@@ -1553,7 +1567,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie
}
storageTries[common.BytesToHash(key)] = trie
}
- return accTrie, entries, storageTries, storageEntries
+ return db.Scheme(), accTrie, entries, storageTries, storageEntries
}
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
@@ -1632,7 +1646,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
- triedb := trie.NewDatabase(db)
+ triedb := trie.NewDatabase(rawdb.NewDatabase(db))
accTrie, err := trie.New(common.Hash{}, root, triedb)
if err != nil {
t.Fatal(err)
@@ -1687,7 +1701,7 @@ func TestSyncAccountPerformance(t *testing.T) {
})
}
)
- sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1696,7 +1710,7 @@ func TestSyncAccountPerformance(t *testing.T) {
return source
}
src := mkSource("source")
- syncer := setupSyncer(src)
+ syncer := setupSyncer(nodeScheme, src)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index 1b397d08f1..c6d98e0704 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -46,6 +46,7 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
)
var (
@@ -84,7 +85,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
TrieDirtyDisabled: true, // Archive mode
}
- _, _, genesisErr := core.SetupGenesisBlockWithOverride(backend.chaindb, gspec, nil, true)
+ _, _, genesisErr := core.SetupGenesisBlockWithOverride(backend.chaindb, trie.NewDatabase(backend.chaindb), gspec, nil, true)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
t.Fatal(genesisErr.Error())
}
diff --git a/les/client.go b/les/client.go
index 46daa0eb03..6e8fcbc308 100644
--- a/les/client.go
+++ b/les/client.go
@@ -47,6 +47,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
)
type LightEthereum struct {
@@ -88,7 +89,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier, false)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, config.OverrideArrowGlacier, false)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go
index e7dfc4158e..109406d1e1 100644
--- a/les/downloader/downloader.go
+++ b/les/downloader/downloader.go
@@ -229,7 +229,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
headerProcCh: make(chan []*types.Header, 1),
quitCh: make(chan struct{}),
stateCh: make(chan dataPack),
- SnapSyncer: snap.NewSyncer(stateDb),
+ SnapSyncer: snap.NewSyncer(stateDb, nil),
stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{
processed: rawdb.ReadFastTrieProgress(stateDb),
@@ -705,9 +705,11 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
// common ancestor.
// It returns parameters to be used for peer.RequestHeadersByNumber:
-// from - starting block number
-// count - number of headers to request
-// skip - number of headers to skip
+//
+// from - starting block number
+// count - number of headers to request
+// skip - number of headers to skip
+//
// and also returns 'max', the last block which is expected to be returned by the remote peers,
// given the (from,count,skip)
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
@@ -1322,22 +1324,22 @@ func (d *Downloader) fetchReceipts(from uint64) error {
// various callbacks to handle the slight differences between processing them.
//
// The instrumentation parameters:
-// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
-// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
-// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
-// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
-// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
-// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
-// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
-// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
-// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
-// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
-// - fetch: network callback to actually send a particular download request to a physical remote peer
-// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
-// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
-// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
-// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
-// - kind: textual label of the type being downloaded to display in log messages
+// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer)
+// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers)
+// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`)
+// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed)
+// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
+// - pending: task callback for the number of requests still needing download (detect completion/non-completability)
+// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish)
+// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use)
+// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions)
+// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
+// - fetch: network callback to actually send a particular download request to a physical remote peer
+// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
+// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
+// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
+// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
+// - kind: textual label of the type being downloaded to display in log messages
func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go
index 696089eaba..4e7f818135 100644
--- a/les/downloader/statesync.go
+++ b/les/downloader/statesync.go
@@ -296,10 +296,13 @@ type codeTask struct {
// newStateSync creates a new state trie download scheduler. This method does not
// yet start the sync. The user needs to call run to initiate.
func newStateSync(d *Downloader, root common.Hash) *stateSync {
+ // Hack the node scheme here. It's a dead code is not used
+ // by light client at all. Just aim for passing tests.
+ scheme := trie.NewDatabase(rawdb.NewMemoryDatabase()).Scheme()
return &stateSync{
d: d,
root: root,
- sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil),
+ sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil, scheme),
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
trieTasks: make(map[string]*trieTask),
codeTasks: make(map[common.Hash]*codeTask),
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 4881a29d90..df955e3d69 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -106,10 +106,7 @@ func (t *BlockTest) Run(snapshotter bool) error {
// import pre accounts & construct test genesis block & state root
db := rawdb.NewMemoryDatabase()
- gblock, err := t.genesis(config).Commit(db)
- if err != nil {
- return err
- }
+ gblock := t.genesis(config).MustCommit(db)
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 48dbd04610..b230e8fd98 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -25,6 +25,8 @@ import (
"io"
"sort"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
@@ -143,11 +145,14 @@ func (f *fuzzer) fuzz() int {
// This spongeDb is used to check the sequence of disk-db-writes
var (
- spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbA = trie.NewDatabase(spongeA)
- trieA = trie.NewEmpty(dbA)
- spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- trieB = trie.NewStackTrie(spongeB)
+ spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
+ dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA))
+ trieA = trie.NewEmpty(dbA)
+ spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
+ dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB))
+ trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ dbB.Scheme().WriteTrieNode(spongeB, owner, path, hash, blob)
+ })
vals kvs
useful bool
maxElements = 10000
@@ -206,5 +211,49 @@ func (f *fuzzer) fuzz() int {
if !bytes.Equal(sumA, sumB) {
panic(fmt.Sprintf("sequence differ: (trie) %x != %x (stacktrie)", sumA, sumB))
}
+ // Ensure all the nodes are persisted correctly
+ // Need tracked deleted nodes.
+ // var (
+ // nodeset = make(map[string][]byte) // path -> blob
+ // trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ // if crypto.Keccak256Hash(blob) != hash {
+ // panic("invalid node blob")
+ // }
+ // if owner != (common.Hash{}) {
+ // panic("invalid node owner")
+ // }
+ // nodeset[string(path)] = common.CopyBytes(blob)
+ // })
+ // checked int
+ // )
+ // for _, kv := range vals {
+ // trieC.Update(kv.k, kv.v)
+ // }
+ // rootC, _ := trieC.Commit()
+ // if rootA != rootC {
+ // panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
+ // }
+ // trieA, _ = trie.New(trie.TrieID(rootA), dbA)
+ // iterA := trieA.NodeIterator(nil)
+ // for iterA.Next(true) {
+ // if iterA.Hash() == (common.Hash{}) {
+ // if _, present := nodeset[string(iterA.Path())]; present {
+ // panic("unexpected tiny node")
+ // }
+ // continue
+ // }
+ // nodeBlob, present := nodeset[string(iterA.Path())]
+ // if !present {
+ // panic("missing node")
+ // }
+ // if !bytes.Equal(nodeBlob, iterA.NodeBlob()) {
+ // panic("node blob is not matched")
+ // }
+ // checked += 1
+ // }
+ // if checked != len(nodeset) {
+ // panic("node number is not matched")
+ // }
+
return 1
}
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index b2f260fb58..4237abfc98 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -22,7 +22,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/trie"
)
@@ -143,8 +143,7 @@ func Fuzz(input []byte) int {
}
func runRandTest(rt randTest) error {
-
- triedb := trie.NewDatabase(memorydb.New())
+ triedb := trie.NewDatabase(rawdb.NewMemoryDatabase())
tr := trie.NewEmpty(triedb)
values := make(map[string]string) // tracks content of the trie
diff --git a/trie/database.go b/trie/database.go
index 6453f2bf0b..c3add0a8a2 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -68,7 +68,7 @@ var (
// behind this split design is to provide read access to RPC handlers and sync
// servers even while the trie is executing expensive garbage collection.
type Database struct {
- diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
@@ -280,14 +280,15 @@ type Config struct {
// NewDatabase creates a new trie database to store ephemeral trie content before
// its written out to disk or garbage collected. No read cache is created, so all
// data retrievals will hit the underlying disk database.
-func NewDatabase(diskdb ethdb.KeyValueStore) *Database {
+// Using ethdb.Database which covers KeyValueStore and Freezer Interfaces.
+func NewDatabase(diskdb ethdb.Database) *Database {
return NewDatabaseWithConfig(diskdb, nil)
}
// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content
// before its written out to disk or garbage collected. It also acts as a read cache
// for nodes loaded from disk.
-func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database {
+func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
var cleans *fastcache.Cache
if config != nil && config.Cache > 0 {
if config.Journal == "" {
@@ -864,3 +865,8 @@ func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, st
}
}
}
+
+// Scheme returns the node scheme used in the database. Right now, we only support hash scheme.
+func (db *Database) Scheme() NodeScheme {
+ return &hashScheme{}
+}
diff --git a/trie/database_test.go b/trie/database_test.go
index 81c469500f..54d7529476 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -20,13 +20,13 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/core/rawdb"
)
// Tests that the trie database returns a missing trie node error if attempting
// to retrieve the meta root.
func TestDatabaseMetarootFetch(t *testing.T) {
- db := NewDatabase(memorydb.New())
+ db := NewDatabase(rawdb.NewMemoryDatabase())
if _, err := db.Node(common.Hash{}); err == nil {
t.Fatalf("metaroot retrieval succeeded")
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 77a0fd3d67..32d2bfae39 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -326,7 +326,7 @@ func TestIteratorContinueAfterErrorDisk(t *testing.T) { testIteratorContinueA
func TestIteratorContinueAfterErrorMemonly(t *testing.T) { testIteratorContinueAfterError(t, true) }
func testIteratorContinueAfterError(t *testing.T, memonly bool) {
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
tr := NewEmpty(triedb)
@@ -418,7 +418,7 @@ func TestIteratorContinueAfterSeekErrorMemonly(t *testing.T) {
func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
// Commit test trie to db, then remove the node containing "bars".
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
ctr := NewEmpty(triedb)
@@ -531,7 +531,7 @@ func (l *loggingDb) Close() error {
func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
// Create an empty trie
logDb := &loggingDb{0, memorydb.New()}
- triedb := NewDatabase(logDb)
+ triedb := NewDatabase(rawdb.NewDatabase(logDb))
trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
// Fill it with some arbitrary data
diff --git a/trie/schema.go b/trie/schema.go
new file mode 100644
index 0000000000..72b67aa7d9
--- /dev/null
+++ b/trie/schema.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+const (
+ HashScheme = "hashScheme" // Identifier of hash based node scheme
+
+ // Path-based scheme will be introduced in the following PRs.
+ // PathScheme = "pathScheme" // Identifier of path based node scheme
+)
+
+// NodeShceme desribes the scheme for interacting nodes in disk.
+type NodeScheme interface {
+ // Name returns the identifier of node scheme.
+ Name() string
+
+ // HasTrieNode checks the trie node presence with the provided node info and
+ // the associated node hash.
+ HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) bool
+
+ // ReadTrieNode retrieves the trie node from database with the provided node
+ // info and the associated node hash.
+ ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) []byte
+
+ // WriteTrieNode writes the trie node into database with the provided node
+ // info and associated node hash.
+ WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte)
+
+ // DeleteTrieNode deletes the trie node from database with the provided node
+ // info and associated node hash.
+ DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash)
+
+ // IsTrieNode returns an indicator if the given database key is the key of
+ // trie node according to the scheme.
+ IsTrieNode(key []byte) (bool, []byte)
+}
+
+type hashScheme struct{}
+
+// Name returns the identifier of hash based scheme.
+func (scheme *hashScheme) Name() string {
+ return HashScheme
+}
+
+// HasTrieNode checks the trie node presence with the provided node info and
+// the associated node hash.
+func (scheme *hashScheme) HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) bool {
+ return rawdb.HasTrieNode(db, hash)
+}
+
+// ReadTrieNode retrieves the trie node from database with the provided node info
+// and associated node hash.
+func (scheme *hashScheme) ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) []byte {
+ return rawdb.ReadTrieNode(db, hash)
+}
+
+// WriteTrieNode writes the trie node into database with the provided node info
+// and associated node hash.
+func (scheme *hashScheme) WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte) {
+ rawdb.WriteTrieNode(db, hash, node)
+}
+
+// DeleteTrieNode deletes the trie node from database with the provided node info
+// and associated node hash.
+func (scheme *hashScheme) DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash) {
+ rawdb.DeleteTrieNode(db, hash)
+}
+
+// IsTrieNode returns an indicator if the given database key is the key of trie
+// node according to the scheme.
+func (scheme *hashScheme) IsTrieNode(key []byte) (bool, []byte) {
+ if len(key) == common.HashLength {
+ return true, key
+ }
+ return false, nil
+}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index c18d399543..5030c5b3a6 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -23,19 +23,19 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
)
func newEmptySecure() *SecureTrie {
- trie, _ := NewSecure(common.Hash{}, common.Hash{}, NewDatabase(memorydb.New()))
+ trie, _ := NewSecure(common.Hash{}, common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
return trie
}
// makeTestSecureTrie creates a large enough secure trie for testing.
func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(memorydb.New())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase())
trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
// Fill it with some arbitrary data
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 3d742d7fca..753da13c31 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -26,7 +26,6 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -39,10 +38,14 @@ var stPool = sync.Pool{
},
}
-func stackTrieFromPool(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie {
+// NodeWriteFunc is used to provide all information of a dirty node for committing
+// so that callers can flush nodes into database with desired scheme.
+type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte)
+
+func stackTrieFromPool(writenFn NodeWriteFunc, owner common.Hash) *StackTrie {
st := stPool.Get().(*StackTrie)
- st.db = db
st.owner = owner
+ st.writeFn = writenFn
return st
}
@@ -55,42 +58,42 @@ func returnToPool(st *StackTrie) {
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
- owner common.Hash // the owner of the trie
- nodeType uint8 // node type (as in branch, ext, leaf)
- val []byte // value contained by this node if it's a leaf
- key []byte // key chunk covered by this (full|ext) node
- keyOffset int // offset of the key chunk inside a full key
- children [16]*StackTrie // list of children (for fullnodes and exts)
- db ethdb.KeyValueWriter // Pointer to the commit db, can be nil
+ owner common.Hash // the owner of the trie
+ nodeType uint8 // node type (as in branch, ext, leaf)
+ val []byte // value contained by this node if it's a leaf
+ key []byte // key chunk covered by this (full|ext) node
+ keyOffset int // offset of the key chunk inside a full key
+ children [16]*StackTrie // list of children (for fullnodes and exts)
+ writeFn NodeWriteFunc // function for commiting nodes, can be nil
}
// NewStackTrie allocates and initializes an empty trie.
-func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie {
+func NewStackTrie(writeFn NodeWriteFunc) *StackTrie {
return &StackTrie{
nodeType: emptyNode,
- db: db,
+ writeFn: writeFn, // function for committing nodes, can be nil
}
}
// NewStackTrieWithOwner allocates and initializes an empty trie, but with
// the additional owner field.
-func NewStackTrieWithOwner(db ethdb.KeyValueWriter, owner common.Hash) *StackTrie {
+func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
return &StackTrie{
owner: owner,
nodeType: emptyNode,
- db: db,
+ writeFn: writeFn, // function for committing nodes, can be nil
}
}
// NewFromBinary initialises a serialized stacktrie with the given db.
-func NewFromBinary(data []byte, db ethdb.KeyValueWriter) (*StackTrie, error) {
+func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) {
var st StackTrie
if err := st.UnmarshalBinary(data); err != nil {
return nil, err
}
// If a database is used, we need to recursively add it to every child
- if db != nil {
- st.setDb(db)
+ if writeFn != nil {
+ st.setWriteFunc(writeFn)
}
return &st, nil
}
@@ -167,17 +170,17 @@ func (st *StackTrie) unmarshalBinary(r io.Reader) error {
return nil
}
-func (st *StackTrie) setDb(db ethdb.KeyValueWriter) {
- st.db = db
+func (st *StackTrie) setWriteFunc(writeFn NodeWriteFunc) {
+ st.writeFn = writeFn
for _, child := range st.children {
if child != nil {
- child.setDb(db)
+ child.setWriteFunc(writeFn)
}
}
}
-func newLeaf(owner common.Hash, ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie {
- st := stackTrieFromPool(db, owner)
+func newLeaf(owner common.Hash, ko int, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
+ st := stackTrieFromPool(writeFn, owner)
st.nodeType = leafNode
st.keyOffset = ko
st.key = append(st.key, key[ko:]...)
@@ -185,8 +188,8 @@ func newLeaf(owner common.Hash, ko int, key, val []byte, db ethdb.KeyValueWriter
return st
}
-func newExt(owner common.Hash, ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie {
- st := stackTrieFromPool(db, owner)
+func newExt(owner common.Hash, ko int, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
+ st := stackTrieFromPool(writeFn, owner)
st.nodeType = extNode
st.keyOffset = ko
st.key = append(st.key, key[ko:]...)
@@ -209,7 +212,7 @@ func (st *StackTrie) TryUpdate(key, value []byte) error {
if len(value) == 0 {
panic("deletion not supported")
}
- st.insert(k[:len(k)-1], value)
+ st.insert(k[:len(k)-1], value, nil)
return nil
}
@@ -221,7 +224,7 @@ func (st *StackTrie) Update(key, value []byte) {
func (st *StackTrie) Reset() {
st.owner = common.Hash{}
- st.db = nil
+ st.writeFn = nil
st.key = st.key[:0]
st.val = nil
for i := range st.children {
@@ -242,8 +245,8 @@ func (st *StackTrie) getDiffIndex(key []byte) int {
}
// Helper function to that inserts a (key, value) pair into
-// the trie.
-func (st *StackTrie) insert(key, value []byte) {
+// the trie. Adding the prefix when inserting too.
+func (st *StackTrie) insert(key, value []byte, prefix []byte) {
switch st.nodeType {
case branchNode: /* Branch */
idx := int(key[st.keyOffset])
@@ -251,17 +254,17 @@ func (st *StackTrie) insert(key, value []byte) {
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
if st.children[i].nodeType != hashedNode {
- st.children[i].hash()
+ st.children[i].hash(append(prefix, byte(i)))
}
break
}
}
// Add new child
if st.children[idx] == nil {
- st.children[idx] = stackTrieFromPool(st.db, st.owner)
+ st.children[idx] = stackTrieFromPool(st.writeFn, st.owner)
st.children[idx].keyOffset = st.keyOffset + 1
}
- st.children[idx].insert(key, value)
+ st.children[idx].insert(key, value, append(prefix, key[st.keyOffset]))
case extNode: /* Ext */
// Compare both key chunks and see where they differ
diffidx := st.getDiffIndex(key)
@@ -274,7 +277,7 @@ func (st *StackTrie) insert(key, value []byte) {
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
- st.children[0].insert(key, value)
+ st.children[0].insert(key, value, append(prefix, key[:diffidx]...))
return
}
// Save the original part. Depending if the break is
@@ -283,14 +286,19 @@ func (st *StackTrie) insert(key, value []byte) {
// node directly.
var n *StackTrie
if diffidx < len(st.key)-1 {
- n = newExt(st.owner, diffidx+1, st.key, st.children[0], st.db)
+ // Break on the non-last byte, insert an intermediate
+ // extension. The path prefix of the newly-inserted
+ // extension should also contain the different byte.
+ n = newExt(st.owner, diffidx+1, st.key, st.children[0], st.writeFn)
+ n.hash(append(prefix, st.key[:diffidx+1]...))
} else {
- // Break on the last byte, no need to insert
- // an extension node: reuse the current node
+ // an extension node: reuse the current node.
+ // The path prefix of the original part should
+ // still be same.
n = st.children[0]
+ n.hash(append(prefix, st.key...))
}
- // Convert to hash
- n.hash()
+
var p *StackTrie
if diffidx == 0 {
// the break is on the first byte, so
@@ -303,13 +311,13 @@ func (st *StackTrie) insert(key, value []byte) {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
- st.children[0] = stackTrieFromPool(st.db, st.owner)
+ st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
st.children[0].nodeType = branchNode
st.children[0].keyOffset = st.keyOffset + diffidx
p = st.children[0]
}
// Create a leaf for the inserted part
- o := newLeaf(st.owner, st.keyOffset+diffidx+1, key, value, st.db)
+ o := newLeaf(st.owner, st.keyOffset+diffidx+1, key, value, st.writeFn)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
@@ -345,7 +353,7 @@ func (st *StackTrie) insert(key, value []byte) {
// Convert current node into an ext,
// and insert a child branch node.
st.nodeType = extNode
- st.children[0] = NewStackTrieWithOwner(st.db, st.owner)
+ st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
st.children[0].nodeType = branchNode
st.children[0].keyOffset = st.keyOffset + diffidx
p = st.children[0]
@@ -356,11 +364,11 @@ func (st *StackTrie) insert(key, value []byte) {
// The child leave will be hashed directly in order to
// free up some memory.
origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.owner, diffidx+1, st.key, st.val, st.db)
- p.children[origIdx].hash()
+ p.children[origIdx] = newLeaf(st.owner, diffidx+1, st.key, st.val, st.writeFn)
+ p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
newIdx := key[diffidx+st.keyOffset]
- p.children[newIdx] = newLeaf(st.owner, p.keyOffset+1, key, value, st.db)
+ p.children[newIdx] = newLeaf(st.owner, p.keyOffset+1, key, value, st.writeFn)
// Finally, cut off the key part that has been passed
// over to the children.
@@ -390,7 +398,7 @@ func (st *StackTrie) insert(key, value []byte) {
// This method will also:
// set 'st.type' to hashedNode
// clear 'st.key'
-func (st *StackTrie) hash() {
+func (st *StackTrie) hash(path []byte) {
/* Shortcut if node is already hashed */
if st.nodeType == hashedNode {
return
@@ -408,7 +416,7 @@ func (st *StackTrie) hash() {
nodes[i] = nilValueNode
continue
}
- child.hash()
+ child.hash(append(path, byte(i)))
if len(child.val) < 32 {
nodes[i] = rawNode(child.val)
} else {
@@ -425,7 +433,7 @@ func (st *StackTrie) hash() {
panic(err)
}
case extNode:
- st.children[0].hash()
+ st.children[0].hash(append(path, st.key...))
h = newHasher(false)
defer returnHasherToPool(h)
h.tmp.Reset()
@@ -468,6 +476,7 @@ func (st *StackTrie) hash() {
st.key = st.key[:0]
st.nodeType = hashedNode
if len(h.tmp) < 32 {
+ // If rlp-encoded value was < 32 bytes, then val point directly to the rlp-encoded value
st.val = common.CopyBytes(h.tmp)
return
}
@@ -477,16 +486,15 @@ func (st *StackTrie) hash() {
h.sha.Reset()
h.sha.Write(h.tmp)
h.sha.Read(st.val)
- if st.db != nil {
- // TODO! Is it safe to Put the slice here?
- // Do all db implementations copy the value provided?
- st.db.Put(st.val, h.tmp)
+
+ if st.writeFn != nil {
+ st.writeFn(st.owner, path, common.BytesToHash(st.val), h.tmp)
}
}
// Hash returns the hash of the current node
func (st *StackTrie) Hash() (h common.Hash) {
- st.hash()
+ st.hash(nil)
if len(st.val) != 32 {
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed, and instead contain the rlp-encoding of the
@@ -510,10 +518,10 @@ func (st *StackTrie) Hash() (h common.Hash) {
// The associated database is expected, otherwise the whole commit
// functionality should be disabled.
func (st *StackTrie) Commit() (common.Hash, error) {
- if st.db == nil {
+ if st.writeFn == nil {
return common.Hash{}, ErrCommitDisabled
}
- st.hash()
+ st.hash(nil)
if len(st.val) != 32 {
// If the node's RLP isn't 32 bytes long, the node will not
// be hashed (and committed), and instead contain the rlp-encoding of the
@@ -522,9 +530,10 @@ func (st *StackTrie) Commit() (common.Hash, error) {
h := newHasher(false)
defer returnHasherToPool(h)
h.sha.Reset()
+ // hash st.val -> ret
h.sha.Write(st.val)
h.sha.Read(ret)
- st.db.Put(ret, st.val)
+ st.writeFn(st.owner, nil, common.BytesToHash(ret), st.val)
return common.BytesToHash(ret), nil
}
return common.BytesToHash(st.val), nil
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index 15e5cd3d16..dd4c75f5f8 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -22,8 +22,8 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
)
func TestStackTrieInsertAndHash(t *testing.T) {
@@ -188,7 +188,8 @@ func TestStackTrieInsertAndHash(t *testing.T) {
func TestSizeBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(memorydb.New()))
+
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -203,7 +204,7 @@ func TestSizeBug(t *testing.T) {
func TestEmptyBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -229,7 +230,7 @@ func TestEmptyBug(t *testing.T) {
func TestValLength56(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -254,7 +255,7 @@ func TestValLength56(t *testing.T) {
// which causes a lot of node-within-node. This case was found via fuzzing.
func TestUpdateSmallNodes(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
kvs := []struct {
K string
@@ -283,7 +284,7 @@ func TestUpdateSmallNodes(t *testing.T) {
func TestUpdateVariableKeys(t *testing.T) {
t.SkipNow()
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(memorydb.New()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
kvs := []struct {
K string
@@ -354,7 +355,7 @@ func TestStacktrieNotModifyValues(t *testing.T) {
func TestStacktrieSerialization(t *testing.T) {
var (
st = NewStackTrie(nil)
- nt = NewEmpty(NewDatabase(memorydb.New()))
+ nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
keyB = big.NewInt(1)
keyDelta = big.NewInt(1)
vals [][]byte
diff --git a/trie/sync.go b/trie/sync.go
index 579da76130..1ea443e3f7 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -137,6 +137,7 @@ func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type Sync struct {
+ scheme NodeScheme // Node scheme descriptor used in database.
database ethdb.KeyValueReader // Persistent database to check for existing entries
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
nodeReqs map[string]*nodeRequest // Pending requests pertaining to a trie node path
@@ -146,9 +147,26 @@ type Sync struct {
bloom *SyncBloom // Bloom filter for fast state existence checks
}
+// LeafCallback is a callback type invoked when a trie operation reaches a leaf
+// node.
+//
+// The keys is a path tuple identifying a particular trie node either in a single
+// trie (account) or a layered trie (account -> storage). Each key in the tuple
+// is in the raw format(32 bytes).
+//
+// The path is a composite hexary path identifying the trie node. All the key
+// bytes are converted to the hexary nibbles and composited with the parent path
+// if the trie node is in a layered trie.
+//
+// It's used by state sync and commit to allow handling external references
+// between account and storage tries. And also it's used in the state healing
+// for extracting the raw states(leaf nodes) with corresponding paths.
+type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
+
// NewSync creates a new trie data download scheduler.
-func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync {
+func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom, scheme NodeScheme) *Sync {
ts := &Sync{
+ scheme: scheme,
database: database,
membatch: newSyncMemBatch(),
nodeReqs: make(map[string]*nodeRequest),
@@ -343,8 +361,9 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error {
func (s *Sync) Commit(dbw ethdb.Batch) error {
// Dump the membatch into a database dbw
for path, value := range s.membatch.nodes {
+ owner, inner := ResolvePath([]byte(path))
+ s.scheme.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value)
hash := s.membatch.hashes[path]
- rawdb.WriteTrieNode(dbw, hash, value)
if s.bloom != nil {
s.bloom.Add(hash[:])
}
@@ -461,9 +480,11 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
- if blob := rawdb.ReadTrieNode(s.database, chash); len(blob) > 0 {
+ owner, inner := ResolvePath(child.path)
+ if s.scheme.HasTrieNode(s.database, owner, inner, chash) {
continue
}
+
// False positive, bump fault meter
bloomFaultMeter.Mark(1)
}
@@ -522,3 +543,14 @@ func (s *Sync) commitCodeRequest(req *codeRequest) error {
}
return nil
}
+
+// ResolvePath resolves the provided composite node path by separating the
+// path in account trie if it's existent.
+func ResolvePath(path []byte) (common.Hash, []byte) {
+ var owner common.Hash
+ if len(path) >= 2*common.HashLength {
+ owner = common.BytesToHash(hexToKeybytes(path[:2*common.HashLength]))
+ path = path[2*common.HashLength:]
+ }
+ return owner, path
+}
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 027f36c6de..095892e16e 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -22,6 +22,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
@@ -29,7 +30,7 @@ import (
// makeTestTrie create a sample test trie to test node-wise reconstruction.
func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(memorydb.New())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase())
trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
// Fill it with some arbitrary data
@@ -103,13 +104,13 @@ type trieElement struct {
// Tests that an empty trie is not scheduled for syncing.
func TestEmptySync(t *testing.T) {
- dbA := NewDatabase(memorydb.New())
- dbB := NewDatabase(memorydb.New())
+ dbA := NewDatabase(rawdb.NewMemoryDatabase())
+ dbB := NewDatabase(rawdb.NewMemoryDatabase())
emptyA := NewEmpty(dbA)
emptyB, _ := New(common.Hash{}, emptyRoot, dbB)
for i, trie := range []*Trie{emptyA, emptyB} {
- sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()))
+ sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB}[i].Scheme())
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes)
}
@@ -128,9 +129,9 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
@@ -194,9 +195,9 @@ func TestIterativeDelayedSync(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
@@ -255,9 +256,9 @@ func testIterativeRandomSync(t *testing.T, count int) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
@@ -313,9 +314,9 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
@@ -376,9 +377,9 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
@@ -439,9 +440,9 @@ func TestIncompleteSync(t *testing.T) {
srcDb, srcTrie, _ := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
@@ -519,9 +520,9 @@ func TestSyncOrdering(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler, tracking the requests
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
- sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb))
+ sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
diff --git a/trie/trie.go b/trie/trie.go
index e6d40be256..a1cc31c5cb 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -37,22 +37,6 @@ var (
emptyState = crypto.Keccak256Hash(nil)
)
-// LeafCallback is a callback type invoked when a trie operation reaches a leaf
-// node.
-//
-// The keys is a path tuple identifying a particular trie node either in a single
-// trie (account) or a layered trie (account -> storage). Each key in the tuple
-// is in the raw format(32 bytes).
-//
-// The path is a composite hexary path identifying the trie node. All the key
-// bytes are converted to the hexary nibbles and composited with the parent path
-// if the trie node is in a layered trie.
-//
-// It's used by state sync and commit to allow handling external references
-// between account and storage tries. And also it's used in the state healing
-// for extracting the raw states(leaf nodes) with corresponding paths.
-type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
-
// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
// top of Database. Whenever tries performance a commit operation, the generated nodes will be
// gathered and returned in a set. Once a trie is committed, it's node usable anymore. Callers have to
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 4758328c91..c9533060c1 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -36,7 +36,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
- "github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
@@ -48,7 +47,7 @@ func init() {
// Used for testing
func newEmpty() *Trie {
- trie := NewEmpty(NewDatabase(memorydb.New()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
return trie
}
@@ -72,7 +71,7 @@ func TestNull(t *testing.T) {
}
func TestMissingRoot(t *testing.T) {
- trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(memorydb.New()))
+ trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(rawdb.NewMemoryDatabase()))
if trie != nil {
t.Error("New returned non-nil trie for invalid root")
}
@@ -85,7 +84,7 @@ func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) }
func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) }
func testMissingNode(t *testing.T, memonly bool) {
- diskdb := memorydb.New()
+ diskdb := rawdb.NewMemoryDatabase()
triedb := NewDatabase(diskdb)
trie := NewEmpty(triedb)
@@ -422,7 +421,7 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
func runRandTest(rt randTest) bool {
var (
- triedb = NewDatabase(memorydb.New())
+ triedb = NewDatabase(rawdb.NewMemoryDatabase())
tr = NewEmpty(triedb)
values = make(map[string]string) // tracks content of the trie
)
@@ -730,7 +729,7 @@ func TestCommitSequence(t *testing.T) {
addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(s)
+ db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
// Another sponge is used to check the callback-sequence
callbackSponge := sha3.NewLegacyKeccak256()
@@ -773,7 +772,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(s)
+ db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
// Another sponge is used to check the callback-sequence
callbackSponge := sha3.NewLegacyKeccak256()
@@ -813,11 +812,14 @@ func TestCommitSequenceStackTrie(t *testing.T) {
prng := rand.New(rand.NewSource(int64(count)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(s)
+ db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- stTrie := NewStackTrie(stackTrieSponge)
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ db.Scheme().WriteTrieNode(stackTrieSponge, owner, path, hash, blob)
+ }
+ stTrie := NewStackTrie(writeFn)
// Fill the trie with elements, should start 0, otherwise nodes will be nil in the first time.
for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order
@@ -870,11 +872,14 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
func TestCommitSequenceSmallRoot(t *testing.T) {
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(s)
+ db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- stTrie := NewStackTrie(stackTrieSponge)
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ db.Scheme().WriteTrieNode(stackTrieSponge, owner, path, hash, blob)
+ }
+ stTrie := NewStackTrie(writeFn)
// Add a single small-element to the trie(s)
key := make([]byte, 5)
key[0] = 1
@@ -1069,7 +1074,7 @@ func tempDB() (string, *Database) {
if err != nil {
panic(fmt.Sprintf("can't create temporary database: %v", err))
}
- return dir, NewDatabase(diskdb)
+ return dir, NewDatabase(rawdb.NewDatabase(diskdb))
}
func getString(trie *Trie, k string) []byte {
From d3f93f95d615d4e12f55dea732cbc4357ec78c01 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Wed, 25 Sep 2024 22:37:04 +0700
Subject: [PATCH 13/41] cmd, core, eth, trie: track deleted nodes (#576)
* trie: track deleted nodes
* core: track deleted nodes
---
cmd/ronin/dbcmd.go | 38 ++++---
cmd/ronin/snapshot.go | 8 +-
core/blockchain.go | 13 ++-
core/state/database.go | 8 +-
core/state/iterator.go | 2 +-
core/state/metrics.go | 14 +--
core/state/pruner/pruner.go | 4 +-
core/state/snapshot/generate.go | 21 ++--
core/state/snapshot/generate_test.go | 4 +-
core/state/state_object.go | 4 +-
core/state/statedb.go | 49 ++++++---
core/state/sync_test.go | 6 +-
core/state/trie_prefetcher.go | 8 +-
eth/api.go | 4 +-
eth/downloader/downloader_test.go | 2 +-
eth/protocols/snap/handler.go | 10 +-
eth/protocols/snap/sync_test.go | 20 ++--
les/downloader/downloader_test.go | 2 +-
les/handler_test.go | 8 +-
les/server_handler.go | 4 +-
les/server_requests.go | 2 +-
light/odr.go | 12 ++-
light/odr_test.go | 2 +-
light/postprocess.go | 12 +--
light/trie.go | 21 ++--
tests/fuzzers/trie/trie-fuzzer.go | 3 +-
trie/committer.go | 34 +++++-
trie/database.go | 32 +++++-
trie/iterator.go | 7 +-
trie/iterator_test.go | 16 +--
trie/nodeset.go | 153 ++++++++++++++++++++++++---
trie/proof.go | 10 +-
trie/secure_trie.go | 4 +-
trie/secure_trie_test.go | 4 +-
trie/sync_test.go | 8 +-
trie/trie.go | 64 ++++++-----
trie/trie_id.go | 55 ++++++++++
trie/trie_reader.go | 106 +++++++++++++++++++
trie/trie_test.go | 86 +++++++++++----
trie/utils.go | 51 ++++++---
trie/utils_test.go | 123 ++++++++++++++++++++-
41 files changed, 805 insertions(+), 229 deletions(-)
create mode 100644 trie/trie_id.go
create mode 100644 trie/trie_reader.go
diff --git a/cmd/ronin/dbcmd.go b/cmd/ronin/dbcmd.go
index c4793b25e3..dc8ce61742 100644
--- a/cmd/ronin/dbcmd.go
+++ b/cmd/ronin/dbcmd.go
@@ -180,7 +180,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
Action: dbDumpTrie,
Name: "dumptrie",
Usage: "Show the storage key/values of a given storage trie",
- ArgsUsage: " ",
+ ArgsUsage: " ",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.DBEngineFlag,
@@ -468,7 +468,7 @@ func dbPut(ctx *cli.Context) error {
// dbDumpTrie shows the key-value slots of a given storage trie
func dbDumpTrie(ctx *cli.Context) error {
- if ctx.NArg() < 1 {
+ if ctx.NArg() < 3 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
stack, _ := makeConfigNode(ctx)
@@ -477,29 +477,39 @@ func dbDumpTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var (
- root []byte
- start []byte
- max = int64(-1)
- err error
+ state []byte
+ storage []byte
+ account []byte
+ start []byte
+ max = int64(-1)
+ err error
)
- if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
- log.Info("Could not decode the root", "error", err)
+ if state, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
+ log.Info("Could not decode the state", "error", err)
return err
}
- stRoot := common.BytesToHash(root)
- if ctx.NArg() >= 2 {
- if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
+ if account, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
+ log.Info("Could not decode the account hash", "error", err)
+ return err
+ }
+ if storage, err = hexutil.Decode(ctx.Args().Get(2)); err != nil {
+ log.Info("Could not decode the storage trie root", "error", err)
+ return err
+ }
+ if ctx.NArg() > 3 {
+ if start, err = hexutil.Decode(ctx.Args().Get(3)); err != nil {
log.Info("Could not decode the seek position", "error", err)
return err
}
}
- if ctx.NArg() >= 3 {
- if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
+ if ctx.NArg() > 4 {
+ if max, err = strconv.ParseInt(ctx.Args().Get(4), 10, 64); err != nil {
log.Info("Could not decode the max count", "error", err)
return err
}
}
- theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
+ id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
+ theTrie, err := trie.New(id, trie.NewDatabase(db))
if err != nil {
return err
}
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index 78b398a212..86bde30e0e 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -283,7 +283,7 @@ func traverseState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
- t, err := trie.NewSecure(common.Hash{}, root, triedb)
+ t, err := trie.NewSecure(trie.StateTrieID(root), triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
return err
@@ -304,7 +304,7 @@ func traverseState(ctx *cli.Context) error {
return err
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.Key), acc.Root, triedb)
+ storageTrie, err := trie.NewSecure(trie.StorageTrieID(root, common.BytesToHash(accIter.Key), acc.Root), triedb)
if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err
@@ -373,7 +373,7 @@ func traverseRawState(ctx *cli.Context) error {
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
- t, err := trie.NewSecure(common.Hash{}, root, triedb)
+ t, err := trie.NewSecure(trie.StateTrieID(root), triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
return err
@@ -410,7 +410,7 @@ func traverseRawState(ctx *cli.Context) error {
return errors.New("invalid account")
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, triedb)
+ storageTrie, err := trie.NewSecure(trie.StorageTrieID(root, common.BytesToHash(accIter.LeafKey()), acc.Root), triedb)
if err != nil {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie")
diff --git a/core/blockchain.go b/core/blockchain.go
index 55600c60ca..cbc69ac6f0 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -68,6 +68,8 @@ var (
snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
+ triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil)
+
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
@@ -823,10 +825,10 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4])
}
- if _, err := trie.NewSecure(common.Hash{}, block.Root(), bc.triedb); err != nil {
- return err
+ root := block.Root()
+ if !bc.HasState(root) {
+ return fmt.Errorf("non existent state [%x..]", root[:4])
}
-
// If all checks out, manually set the head block.
if !bc.chainmu.TryLock() {
return errChainStopped
@@ -838,7 +840,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
// Destroy any existing state snapshot and regenerate it in the background,
// also resuming the normal maintenance of any previously paused snapshot.
if bc.snaps != nil {
- bc.snaps.Rebuild(block.Root())
+ bc.snaps.Rebuild(root)
}
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil
@@ -2034,8 +2036,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool, sidecars
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
+ triedbCommitTimer.Update(statedb.TrieDBCommits) // Triedb commits are complete, we can mark them
- blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
+ blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockInsertTimer.UpdateSince(start)
blockTxsGauge.Update(int64(len(block.Transactions())))
blockGasUsedGauge.Update(int64(block.GasUsed()))
diff --git a/core/state/database.go b/core/state/database.go
index 02f5c4ea54..d2837c83f9 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -43,7 +43,7 @@ type Database interface {
OpenTrie(root common.Hash) (Trie, error)
// OpenStorageTrie opens the storage trie of an account.
- OpenStorageTrie(addrHash, root common.Hash) (Trie, error)
+ OpenStorageTrie(stateRoot, addrHash, root common.Hash) (Trie, error)
// CopyTrie returns an independent copy of the given trie.
CopyTrie(Trie) Trie
@@ -145,7 +145,7 @@ type cachingDB struct {
// OpenTrie opens the main account trie at a specific root hash.
func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
- tr, err := trie.NewSecure(common.Hash{}, root, db.triedb)
+ tr, err := trie.NewSecure(trie.StateTrieID(root), db.triedb)
if err != nil {
return nil, err
}
@@ -153,8 +153,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
}
// OpenStorageTrie opens the storage trie of an account.
-func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
- tr, err := trie.NewSecure(addrHash, root, db.triedb)
+func (db *cachingDB) OpenStorageTrie(stateRoot, addrHash, root common.Hash) (Trie, error) {
+ tr, err := trie.NewSecure(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb)
if err != nil {
return nil, err
}
diff --git a/core/state/iterator.go b/core/state/iterator.go
index 611df52431..ba7efd4653 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -109,7 +109,7 @@ func (it *NodeIterator) step() error {
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
return err
}
- dataTrie, err := it.state.db.OpenStorageTrie(common.BytesToHash(it.stateIt.LeafKey()), account.Root)
+ dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root)
if err != nil {
return err
}
diff --git a/core/state/metrics.go b/core/state/metrics.go
index 35d2df92dd..e702ef3a81 100644
--- a/core/state/metrics.go
+++ b/core/state/metrics.go
@@ -19,10 +19,12 @@ package state
import "github.com/ethereum/go-ethereum/metrics"
var (
- accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
- storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
- accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
- storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
- accountTrieCommittedMeter = metrics.NewRegisteredMeter("state/commit/accountnodes", nil)
- storageTriesCommittedMeter = metrics.NewRegisteredMeter("state/commit/storagenodes", nil)
+ accountUpdatedMeter = metrics.NewRegisteredMeter("state/update/account", nil)
+ storageUpdatedMeter = metrics.NewRegisteredMeter("state/update/storage", nil)
+ accountDeletedMeter = metrics.NewRegisteredMeter("state/delete/account", nil)
+ storageDeletedMeter = metrics.NewRegisteredMeter("state/delete/storage", nil)
+ accountTrieUpdatedMeter = metrics.NewRegisteredMeter("state/update/accountnodes", nil)
+ storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
+ accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
+ storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
)
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 559507d7ff..7b01a74a2b 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -410,7 +410,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil {
return errors.New("missing genesis block")
}
- t, err := trie.NewSecure(common.Hash{}, genesis.Root(), trie.NewDatabase(db))
+ t, err := trie.NewSecure(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db))
if err != nil {
return err
}
@@ -430,7 +430,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
return err
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(common.BytesToHash(accIter.LeafKey()), acc.Root, trie.NewDatabase(db))
+ storageTrie, err := trie.NewSecure(trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root), trie.NewDatabase(db))
if err != nil {
return err
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index a18ecf22ea..341a37a180 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -247,7 +247,7 @@ func (result *proofResult) forEach(callback func(key []byte, val []byte) error)
//
// The proof result will be returned if the range proving is finished, otherwise
// the error will be returned to abort the entire procedure.
-func (dl *diskLayer) proveRange(stats *generatorStats, owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
+func (dl *diskLayer) proveRange(stats *generatorStats, trieID *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
var (
keys [][]byte
vals [][]byte
@@ -304,8 +304,9 @@ func (dl *diskLayer) proveRange(stats *generatorStats, owner common.Hash, root c
}(time.Now())
// The snap state is exhausted, pass the entire key/val set for verification
+ root := trieID.Root
if origin == nil && !diskMore {
- stackTr := trie.NewStackTrieWithOwner(nil, owner)
+ stackTr := trie.NewStackTrie(nil)
for i, key := range keys {
stackTr.TryUpdate(key, vals[i])
}
@@ -319,7 +320,7 @@ func (dl *diskLayer) proveRange(stats *generatorStats, owner common.Hash, root c
return &proofResult{keys: keys, vals: vals}, nil
}
// Snap state is chunked, generate edge proofs for verification.
- tr, err := trie.New(owner, root, dl.triedb)
+ tr, err := trie.New(trieID, dl.triedb)
if err != nil {
stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return nil, errMissingTrie
@@ -380,9 +381,9 @@ type onStateCallback func(key []byte, val []byte, write bool, delete bool) error
// generateRange generates the state segment with particular prefix. Generation can
// either verify the correctness of existing state through rangeproof and skip
// generation, or iterate trie to regenerate state on demand.
-func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
+func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
// Use range prover to check the validity of the flat state in the range
- result, err := dl.proveRange(stats, owner, root, prefix, kind, origin, max, valueConvertFn)
+ result, err := dl.proveRange(stats, trieID, prefix, kind, origin, max, valueConvertFn)
if err != nil {
return false, nil, err
}
@@ -431,7 +432,7 @@ func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix [
if len(result.keys) > 0 {
snapNodeCache = rawdb.NewMemoryDatabase()
snapTrieDb := trie.NewDatabase(snapNodeCache)
- snapTrie, _ := trie.New(owner, common.Hash{}, snapTrieDb)
+ snapTrie := trie.NewEmpty(snapTrieDb)
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
}
@@ -443,7 +444,7 @@ func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix [
}
tr := result.tr
if tr == nil {
- tr, err = trie.New(owner, root, dl.triedb)
+ tr, err = trie.New(trieID, dl.triedb)
if err != nil {
stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
return false, nil, errMissingTrie
@@ -526,7 +527,7 @@ func (dl *diskLayer) generateRange(owner common.Hash, root common.Hash, prefix [
} else {
snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
}
- logger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last),
+ logger.Debug("Regenerated state range", "root", trieID.Root, "last", hexutil.Encode(last),
"count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted)
// If there are either more trie items, or there are more snap items
@@ -700,7 +701,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
}
var storeOrigin = common.CopyBytes(storeMarker)
for {
- exhausted, last, err := dl.generateRange(common.Hash{}, acc.Root, append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil)
+ exhausted, last, err := dl.generateRange(trie.StorageTrieID(dl.Root(), accountHash, acc.Root), append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil)
if err != nil {
return err
}
@@ -719,7 +720,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
// Global loop for regerating the entire state trie + all layered storage tries.
for {
- exhausted, last, err := dl.generateRange(common.Hash{}, dl.root, rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP)
+ exhausted, last, err := dl.generateRange(trie.StateTrieID(dl.root), rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP)
// The procedure it aborted, either by external signal or internal error
if err != nil {
if abort == nil { // aborted by internal error, wait the signal
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 3d59590c89..ce1e358a3f 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -144,7 +144,7 @@ type testHelper struct {
func newHelper() *testHelper {
diskdb := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(diskdb)
- accTrie, _ := trie.NewSecure(common.Hash{}, common.Hash{}, triedb)
+ accTrie, _ := trie.NewSecure(trie.StateTrieID(common.Hash{}), triedb)
return &testHelper{
diskdb: diskdb,
triedb: triedb,
@@ -177,7 +177,7 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
}
func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte {
- stTrie, _ := trie.NewSecure(owner, common.Hash{}, t.triedb)
+ stTrie, _ := trie.NewSecure(trie.StorageTrieID(stateRoot, owner, common.Hash{}), t.triedb)
for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i]))
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 91d3b71c46..bb9e3d6781 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -162,9 +162,9 @@ func (s *stateObject) getTrie(db Database) Trie {
}
if s.trie == nil {
var err error
- s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root)
+ s.trie, err = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
if err != nil {
- s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{})
+ s.trie, _ = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, common.Hash{})
s.setError(fmt.Errorf("can't create storage trie: %v", err))
}
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index e78968e2e5..898e65597c 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -124,6 +124,7 @@ type StateDB struct {
SnapshotAccountReads time.Duration
SnapshotStorageReads time.Duration
SnapshotCommits time.Duration
+ TrieDBCommits time.Duration
AccountUpdated int
StorageUpdated int
@@ -973,9 +974,11 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
// Commit objects to the trie, measuring the elapsed time
var (
- accountTrieNodes int
- storageTrieNodes int
- nodes = trie.NewMergedNodeSet()
+ accountTrieNodesUpdated int
+ accountTrieNodesDeleted int
+ storageTrieNodesUpdated int
+ storageTrieNodesDeleted int
+ nodes = trie.NewMergedNodeSet()
)
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
@@ -996,7 +999,9 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if err := nodes.Merge(nodeSet); err != nil {
return common.Hash{}, err
}
- storageTrieNodes += nodeSet.Len()
+ updated, deleted := nodeSet.Size()
+ storageTrieNodesUpdated += updated
+ storageTrieNodesDeleted += deleted
}
}
}
@@ -1029,7 +1034,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if err := nodes.Merge(nodeSet); err != nil {
return common.Hash{}, err
}
- accountTrieNodes = nodeSet.Len()
+ accountTrieNodesUpdated, accountTrieNodesDeleted = nodeSet.Size()
}
if metrics.EnabledExpensive {
s.AccountCommits += time.Since(start)
@@ -1038,16 +1043,16 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
storageUpdatedMeter.Mark(int64(s.StorageUpdated))
accountDeletedMeter.Mark(int64(s.AccountDeleted))
storageDeletedMeter.Mark(int64(s.StorageDeleted))
- accountTrieCommittedMeter.Mark(int64(accountTrieNodes))
- storageTriesCommittedMeter.Mark(int64(storageTrieNodes))
+ accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated))
+ accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted))
+ storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated))
+ storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted))
s.AccountUpdated, s.AccountDeleted = 0, 0
s.StorageUpdated, s.StorageDeleted = 0, 0
}
// If snapshotting is enabled, update the snapshot tree with this new version
if s.snap != nil {
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
- }
+ start := time.Now()
// Only update if there's a state transition (skip empty Clique blocks)
if parent := s.snap.Root(); parent != root {
if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
@@ -1062,14 +1067,30 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
}
s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
+ if metrics.EnabledExpensive {
+ s.SnapshotCommits += time.Since(start)
+ }
}
// Update Trie MergeNodeSets.
- if err := s.db.TrieDB().Update(nodes); err != nil {
- return common.Hash{}, err
+ if root == (common.Hash{}) {
+ root = emptyRoot
+ }
+ origin := s.originalRoot
+ if origin == (common.Hash{}) {
+ origin = emptyRoot
+ }
+ if root != origin {
+ start := time.Now()
+ if err := s.db.TrieDB().Update(nodes); err != nil {
+ return common.Hash{}, err
+ }
+ s.originalRoot = root
+ if metrics.EnabledExpensive {
+ s.TrieDBCommits += time.Since(start)
+ }
}
- s.originalRoot = root
- return root, err
+ return root, nil
}
// ResetAccessList sets access list to empty
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index b35830d1a9..58329481d6 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -106,7 +106,7 @@ func checkTrieConsistency(db ethdb.Database, root common.Hash) error {
if v, _ := db.Get(root[:]); v == nil {
return nil // Consider a non existent state consistent.
}
- trie, err := trie.New(common.Hash{}, root, trie.NewDatabase(db))
+ trie, err := trie.New(trie.StateTrieID(root), trie.NewDatabase(db))
if err != nil {
return err
}
@@ -177,7 +177,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if commit {
srcDb.TrieDB().Commit(srcRoot, false, nil)
}
- srcTrie, _ := trie.New(common.Hash{}, srcRoot, srcDb.TrieDB())
+ srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
@@ -225,7 +225,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil {
t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err)
}
- stTrie, err := trie.New(common.BytesToHash(node.syncPath[0]), acc.Root, srcDb.TrieDB())
+ stTrie, err := trie.New(trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root), srcDb.TrieDB())
if err != nil {
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index a81872cd32..5c85e5adc5 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -150,7 +150,7 @@ func (p *triePrefetcher) prefetch(owner common.Hash, root common.Hash, keys [][]
id := p.trieID(owner, root)
fetcher := p.fetchers[id]
if fetcher == nil {
- fetcher = newSubfetcher(p.db, owner, root)
+ fetcher = newSubfetcher(p.db, p.root, owner, root)
p.fetchers[id] = fetcher
}
fetcher.schedule(keys)
@@ -206,6 +206,7 @@ func (p *triePrefetcher) trieID(owner common.Hash, root common.Hash) string {
// the trie being worked on is retrieved from the prefetcher.
type subfetcher struct {
db Database // Database to load trie nodes through
+ state common.Hash // Root hash of the state to prefetch
owner common.Hash // Owner of the trie, usually account hash
root common.Hash // Root hash of the trie to prefetch
trie Trie // Trie being populated with nodes
@@ -225,9 +226,10 @@ type subfetcher struct {
// newSubfetcher creates a goroutine to prefetch state items belonging to a
// particular root hash.
-func newSubfetcher(db Database, owner common.Hash, root common.Hash) *subfetcher {
+func newSubfetcher(db Database, state common.Hash, owner common.Hash, root common.Hash) *subfetcher {
sf := &subfetcher{
db: db,
+ state: state,
owner: owner,
root: root,
wake: make(chan struct{}, 1),
@@ -298,7 +300,7 @@ func (sf *subfetcher) loop() {
}
sf.trie = trie
} else {
- trie, err := sf.db.OpenStorageTrie(sf.owner, sf.root)
+ trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root)
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return
diff --git a/eth/api.go b/eth/api.go
index a646529e8b..29558cdedf 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -553,11 +553,11 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc
}
triedb := api.eth.BlockChain().StateCache().TrieDB()
- oldTrie, err := trie.NewSecure(common.Hash{}, startBlock.Root(), triedb)
+ oldTrie, err := trie.NewSecure(trie.StateTrieID(startBlock.Root()), triedb)
if err != nil {
return nil, err
}
- newTrie, err := trie.NewSecure(common.Hash{}, endBlock.Root(), triedb)
+ newTrie, err := trie.NewSecure(trie.StateTrieID(endBlock.Root()), triedb)
if err != nil {
return nil, err
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 018f03e38d..49804de0fa 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -234,7 +234,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil {
- _, err := trie.NewSecure(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb))
+ _, err := trie.NewSecure(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb))
return err
}
return fmt.Errorf("non existent block: %x", hash[:4])
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index f2bd4eae9e..235340fa96 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -165,7 +165,7 @@ func handleMessage(backend Backend, peer *Peer) error {
req.Bytes = softResponseLimit
}
// Retrieve the requested state and bail out if non existent
- tr, err := trie.New(common.Hash{}, req.Root, backend.Chain().StateCache().TrieDB())
+ tr, err := trie.New(trie.StateTrieID(req.Root), backend.Chain().StateCache().TrieDB())
if err != nil {
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
}
@@ -315,7 +315,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if origin != (common.Hash{}) || abort {
// Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs
- accTrie, err := trie.New(common.Hash{}, req.Root, backend.Chain().StateCache().TrieDB())
+ accTrie, err := trie.New(trie.StateTrieID(req.Root), backend.Chain().StateCache().TrieDB())
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -323,7 +323,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
- stTrie, err := trie.New(account, acc.Root, backend.Chain().StateCache().TrieDB())
+ stTrie, err := trie.New(trie.StorageTrieID(req.Root, account, acc.Root), backend.Chain().StateCache().TrieDB())
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -430,7 +430,7 @@ func handleMessage(backend Backend, peer *Peer) error {
// Make sure we have the state associated with the request
triedb := backend.Chain().StateCache().TrieDB()
- accTrie, err := trie.NewSecure(common.Hash{}, req.Root, triedb)
+ accTrie, err := trie.NewSecure(trie.StateTrieID(req.Root), triedb)
if err != nil {
// We don't have the requested state available, bail out
return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})
@@ -472,7 +472,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err != nil || account == nil {
break
}
- stTrie, err := trie.NewSecure(common.BytesToHash(pathset[0]), common.BytesToHash(account.Root), triedb)
+ stTrie, err := trie.NewSecure(trie.StorageTrieID(req.Root, common.BytesToHash(pathset[0]), common.BytesToHash(account.Root)), triedb)
loads++ // always account database reads, even for failures
if err != nil {
break
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 940357f412..59fb52f9ff 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1383,7 +1383,7 @@ func makeAccountTrieNoStorage(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
root, nodes, _ := accTrie.Commit(false)
db.Update(trie.NewWithNodeSet(nodes))
- accTrie, _ = trie.New(common.Hash{}, root, db)
+ accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
}
@@ -1444,7 +1444,7 @@ func makeBoundaryAccountTrie(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
root, nodes, _ := accTrie.Commit(false)
db.Update(trie.NewWithNodeSet(nodes))
- accTrie, _ = trie.New(common.Hash{}, root, db)
+ accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
}
@@ -1493,10 +1493,10 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
db.Update(nodes)
// Re-create tries with new root
- accTrie, _ = trie.New(common.Hash{}, root, db)
+ accTrie, _ = trie.New(trie.StateTrieID(root), db)
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
- trie, _ := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
+ trie, _ := trie.New(trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)]), db)
storageTries[common.BytesToHash(key)] = trie
}
return db.Scheme(), accTrie, entries, storageTries, storageEntries
@@ -1555,13 +1555,13 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (trie.
db.Update(nodes)
// Re-create tries with new root
- accTrie, err := trie.New(common.Hash{}, root, db)
+ accTrie, err := trie.New(trie.StateTrieID(root), db)
if err != nil {
panic(err)
}
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
- trie, err := trie.New(common.BytesToHash(key), storageRoots[common.BytesToHash(key)], db)
+ trie, err := trie.New(trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)]), db)
if err != nil {
panic(err)
}
@@ -1574,7 +1574,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (trie.
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
- trie, _ := trie.New(owner, common.Hash{}, db)
+ trie, _ := trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
// store 'x' at slot 'x'
@@ -1600,7 +1600,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
var (
entries entrySlice
boundaries []common.Hash
- trie, _ = trie.New(owner, common.Hash{}, db)
+ trie, _ = trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
)
// Initialize boundaries
var next common.Hash
@@ -1647,7 +1647,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
triedb := trie.NewDatabase(rawdb.NewDatabase(db))
- accTrie, err := trie.New(common.Hash{}, root, triedb)
+ accTrie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil {
t.Fatal(err)
}
@@ -1665,7 +1665,7 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
}
accounts++
if acc.Root != emptyRoot {
- storeTrie, err := trie.NewSecure(common.BytesToHash(accIt.Key), acc.Root, triedb)
+ storeTrie, err := trie.NewSecure(trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root), triedb)
if err != nil {
t.Fatal(err)
}
diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go
index 70f76956ff..963d4d9035 100644
--- a/les/downloader/downloader_test.go
+++ b/les/downloader/downloader_test.go
@@ -229,7 +229,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil {
- _, err := trie.NewSecure(common.Hash{}, block.Root(), trie.NewDatabase(dl.stateDb))
+ _, err := trie.NewSecure(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb))
return err
}
return fmt.Errorf("non existent block: %x", hash[:4])
diff --git a/les/handler_test.go b/les/handler_test.go
index 0ccc71973f..4fa19e9915 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -406,7 +406,7 @@ func testGetProofs(t *testing.T, protocol int) {
accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i)
- trie, _ := trie.New(common.Hash{}, header.Root, trie.NewDatabase(server.db))
+ trie, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db))
for _, acc := range accounts {
req := ProofReq{
@@ -457,7 +457,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
var expected []rlp.RawValue
if wantOK {
proofsV2 := light.NewNodeSet()
- t, _ := trie.New(common.Hash{}, header.Root, trie.NewDatabase(server.db))
+ t, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db))
t.Prove(account, 0, proofsV2)
expected = proofsV2.NodeList()
}
@@ -513,7 +513,7 @@ func testGetCHTProofs(t *testing.T, protocol int) {
AuxData: [][]byte{rlp},
}
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
- trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
+ trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols
requestsV2 := []HelperTrieReq{{
@@ -578,7 +578,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) {
var proofs HelperTrieResps
root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
- trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
+ trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
trie.Prove(key, 0, &proofs.Proofs)
// Send the proof request and verify the response
diff --git a/les/server_handler.go b/les/server_handler.go
index 9cda4368ef..9029034a5c 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -360,7 +360,7 @@ func (h *serverHandler) AddTxsSync() bool {
// getAccount retrieves an account from the state based on root.
func getAccount(triedb *trie.Database, root, hash common.Hash) (types.StateAccount, error) {
- trie, err := trie.New(common.Hash{}, root, triedb)
+ trie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil {
return types.StateAccount{}, err
}
@@ -392,7 +392,7 @@ func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie {
if root == (common.Hash{}) {
return nil
}
- trie, _ := trie.New(common.Hash{}, root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
+ trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
return trie
}
diff --git a/les/server_requests.go b/les/server_requests.go
index b0b675b659..5b35115791 100644
--- a/les/server_requests.go
+++ b/les/server_requests.go
@@ -429,7 +429,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
p.bumpInvalid()
continue
}
- trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root)
+ trie, err = statedb.OpenStorageTrie(root, common.BytesToHash(request.AccKey), account.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err)
continue
diff --git a/light/odr.go b/light/odr.go
index 493f6fd7fc..f998dbe584 100644
--- a/light/odr.go
+++ b/light/odr.go
@@ -54,9 +54,11 @@ type OdrRequest interface {
// TrieID identifies a state or account storage trie
type TrieID struct {
- BlockHash, Root common.Hash
- BlockNumber uint64
- AccKey []byte
+ BlockHash common.Hash
+ BlockNumber uint64
+ StateRoot common.Hash
+ Root common.Hash
+ AccKey []byte
}
// StateTrieID returns a TrieID for a state trie belonging to a certain block
@@ -65,8 +67,9 @@ func StateTrieID(header *types.Header) *TrieID {
return &TrieID{
BlockHash: header.Hash(),
BlockNumber: header.Number.Uint64(),
- AccKey: nil,
+ StateRoot: header.Root,
Root: header.Root,
+ AccKey: nil,
}
}
@@ -77,6 +80,7 @@ func StorageTrieID(state *TrieID, addrHash, root common.Hash) *TrieID {
return &TrieID{
BlockHash: state.BlockHash,
BlockNumber: state.BlockNumber,
+ StateRoot: state.StateRoot,
AccKey: addrHash[:],
Root: root,
}
diff --git a/light/odr_test.go b/light/odr_test.go
index 9b9ab7e1c3..1cadad5ec2 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -82,7 +82,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
req.Receipts = rawdb.ReadRawReceipts(odr.sdb, req.Hash, *number)
}
case *TrieRequest:
- t, _ := trie.New(common.BytesToHash(req.Id.AccKey), req.Id.Root, trie.NewDatabase(odr.sdb))
+ t, _ := trie.New(trie.StorageTrieID(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root), trie.NewDatabase(odr.sdb))
nodes := NewNodeSet()
t.Prove(req.Key, 0, nodes)
req.Proof = nodes
diff --git a/light/postprocess.go b/light/postprocess.go
index 7d45839391..d6ba1089a7 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -187,12 +187,12 @@ func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSecti
root = GetChtRoot(c.diskdb, section-1, lastSectionHead)
}
var err error
- c.trie, err = trie.New(common.Hash{}, root, c.triedb)
+ c.trie, err = trie.New(trie.StateTrieID(root), c.triedb)
if err != nil && c.odr != nil {
err = c.fetchMissingNodes(ctx, section, root)
if err == nil {
- c.trie, err = trie.New(common.Hash{}, root, c.triedb)
+ c.trie, err = trie.New(trie.StateTrieID(root), c.triedb)
}
}
c.section = section
@@ -228,7 +228,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
}
// Re-create trie with nelwy generated root and updated database.
- c.trie, err = trie.New(common.Hash{}, root, c.triedb)
+ c.trie, err = trie.New(trie.StateTrieID(root), c.triedb)
if err != nil {
return err
}
@@ -414,11 +414,11 @@ func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, las
root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead)
}
var err error
- b.trie, err = trie.New(common.Hash{}, root, b.triedb)
+ b.trie, err = trie.New(trie.StateTrieID(root), b.triedb)
if err != nil && b.odr != nil {
err = b.fetchMissingNodes(ctx, section, root)
if err == nil {
- b.trie, err = trie.New(common.Hash{}, root, b.triedb)
+ b.trie, err = trie.New(trie.StateTrieID(root), b.triedb)
}
}
b.section = section
@@ -476,7 +476,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
// Re-create trie with nelwy generated root and updated database.
- b.trie, err = trie.New(common.Hash{}, root, b.triedb)
+ b.trie, err = trie.New(trie.StateTrieID(root), b.triedb)
if err != nil {
return err
}
diff --git a/light/trie.go b/light/trie.go
index a2ef8ebff3..e60ad49c97 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -54,7 +54,7 @@ func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) {
return &odrTrie{db: db, id: db.id}, nil
}
-func (db *odrDatabase) OpenStorageTrie(addrHash, root common.Hash) (state.Trie, error) {
+func (db *odrDatabase) OpenStorageTrie(stateRoot, addrHash, root common.Hash) (state.Trie, error) {
return &odrTrie{db: db, id: StorageTrieID(db.id, addrHash, root)}, nil
}
@@ -63,8 +63,7 @@ func (db *odrDatabase) CopyTrie(t state.Trie) state.Trie {
case *odrTrie:
cpy := &odrTrie{db: t.db, id: t.id}
if t.trie != nil {
- cpytrie := *t.trie
- cpy.trie = &cpytrie
+ cpy.trie = t.trie.Copy()
}
return cpy
default:
@@ -169,11 +168,13 @@ func (t *odrTrie) do(key []byte, fn func() error) error {
for {
var err error
if t.trie == nil {
- var owner common.Hash
+ var id *trie.ID
if len(t.id.AccKey) > 0 {
- owner = common.BytesToHash(t.id.AccKey)
+ id = trie.StorageTrieID(t.id.StateRoot, common.BytesToHash(t.id.AccKey), t.id.Root)
+ } else {
+ id = trie.StateTrieID(t.id.StateRoot)
}
- t.trie, err = trie.New(owner, t.id.Root, trie.NewDatabase(t.db.backend.Database()))
+ t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database()))
}
if err == nil {
err = fn()
@@ -199,11 +200,13 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator {
// Open the actual non-ODR trie if that hasn't happened yet.
if t.trie == nil {
it.do(func() error {
- var owner common.Hash
+ var id *trie.ID
if len(t.id.AccKey) > 0 {
- owner = common.BytesToHash(t.id.AccKey)
+ id = trie.StorageTrieID(t.id.StateRoot, common.BytesToHash(t.id.AccKey), t.id.Root)
+ } else {
+ id = trie.StateTrieID(t.id.StateRoot)
}
- t, err := trie.New(owner, t.id.Root, trie.NewDatabase(t.db.backend.Database()))
+ t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database()))
if err == nil {
it.t.trie = t
}
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 4237abfc98..4be8ebb9e8 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -21,7 +21,6 @@ import (
"encoding/binary"
"fmt"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/trie"
)
@@ -174,7 +173,7 @@ func runRandTest(rt randTest) error {
return err
}
}
- newtr, err := trie.New(common.Hash{}, hash, triedb)
+ newtr, err := trie.New(trie.TrieID(hash), triedb)
if err != nil {
return err
}
diff --git a/trie/committer.go b/trie/committer.go
index 495da8a1fc..cf43e12fed 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -43,8 +43,9 @@ type committer struct {
tmp sliceBuffer
sha crypto.KeccakState
- owner common.Hash
+ owner common.Hash // TODO: same as nodes.owner, consider removing
nodes *NodeSet
+ tracer *tracer
collectLeaf bool
}
@@ -59,9 +60,10 @@ var committerPool = sync.Pool{
}
// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(owner common.Hash, collectLeaf bool) *committer {
+func newCommitter(owner common.Hash, tracer *tracer, collectLeaf bool) *committer {
return &committer{
nodes: NewNodeSet(owner),
+ tracer: tracer,
collectLeaf: collectLeaf,
}
}
@@ -72,6 +74,20 @@ func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
if err != nil {
return nil, nil, err
}
+ // Some nodes can be deleted from trie which can't be captured by committer
+ // itself. Iterate all deleted nodes tracked by tracer and marked them as
+ // deleted only if they are present in database previously.
+ for _, path := range c.tracer.deleteList() {
+ // There are a few possibilities for this scenario(the node is deleted
+ // but not present in database previously), for example the node was
+ // embedded in the parent and now deleted from the trie. In this case
+ // it's noop from database's perspective.
+ val := c.tracer.getPrev(path)
+ if len(val) == 0 {
+ continue
+ }
+ c.nodes.markDeleted(path, val)
+ }
return h.(hashNode), c.nodes, nil
}
@@ -103,6 +119,12 @@ func (c *committer) commit(path []byte, n node) (node, error) {
if hn, ok := hashedNode.(hashNode); ok {
return hn, nil
}
+ // The short node now is embedded in its parent. Mark the node as
+ // deleted if it's present in database previously. It's equivalent
+ // as deletion from database's perspective.
+ if prev := c.tracer.getPrev(path); len(prev) != 0 {
+ c.nodes.markDeleted(path, prev)
+ }
return collapsed, nil
case *fullNode:
hashedKids, err := c.commitChildren(path, cn)
@@ -116,6 +138,12 @@ func (c *committer) commit(path []byte, n node) (node, error) {
if hn, ok := hashedNode.(hashNode); ok {
return hn, nil
}
+ // The short node now is embedded in its parent. Mark the node as
+ // deleted if it's present in database previously. It's equivalent
+ // as deletion from database's perspective.
+ if prev := c.tracer.getPrev(path); len(prev) != 0 {
+ c.nodes.markDeleted(path, prev)
+ }
return collapsed, nil
case hashNode:
return cn, nil
@@ -183,7 +211,7 @@ func (c *committer) store(path []byte, n node) node {
)
// Collect the dirty node to nodeset for return.
- c.nodes.add(string(path), mnode)
+ c.nodes.markUpdated(path, mnode, c.tracer.getPrev(path))
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
// length of leaves should be exactly same.
diff --git a/trie/database.go b/trie/database.go
index c3add0a8a2..28fadba104 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -785,8 +785,8 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
// can be linked with their parent correctly. The order of writing between
// different tries(account trie, storage tries) is not required.
for owner, subset := range nodes.sets {
- for _, path := range subset.paths {
- n, ok := subset.nodes[path]
+ for _, path := range subset.updates.order {
+ n, ok := subset.updates.nodes[path]
if !ok {
return fmt.Errorf("missing node %x %v", owner, path)
}
@@ -826,6 +826,34 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize
}
+// GetReader retrieves a node reader belonging to the given state root.
+func (db *Database) GetReader(root common.Hash) Reader {
+ return newHashReader(db)
+}
+
+// hashReader is reader of hashDatabase which implements the Reader interface.
+type hashReader struct {
+ db *Database
+}
+
+// newHashReader initializes the hash reader.
+func newHashReader(db *Database) *hashReader {
+ return &hashReader{db: db}
+}
+
+// Node retrieves the trie node with the given node hash.
+// No error will be returned if the node is not found.
+func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) (node, error) {
+ return reader.db.node(hash), nil
+}
+
+// NodeBlob retrieves the RLP-encoded trie node blob with the given node hash.
+// No error will be returned if the node is not found.
+func (reader *hashReader) NodeBlob(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
+ blob, _ := reader.db.Node(hash)
+ return blob, nil
+}
+
// saveCache saves clean state cache to given directory path
// using specified CPU cores.
func (db *Database) saveCache(dir string, threads int) error {
diff --git a/trie/iterator.go b/trie/iterator.go
index d37cccd603..39a9ebcefc 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -361,7 +361,12 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
}
}
}
- return it.trie.resolveHash(hash, path)
+ // Retrieve the specified node from the underlying node reader.
+ // it.trie.resolveAndTrack is not used since in that function the
+ // loaded blob will be tracked, while it's not required here since
+ // all loaded nodes won't be linked to trie at all and track nodes
+ // may lead to out-of-memory issue.
+ return it.trie.reader.node(path, common.BytesToHash(hash))
}
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 32d2bfae39..d0e9b7f128 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -65,7 +65,7 @@ func TestIterator(t *testing.T) {
t.Fatalf("Failed to commit trie %v", err)
}
db.Update(NewWithNodeSet(nodes))
- trie, _ = New(common.Hash{}, root, db)
+ trie, _ = New(TrieID(root), db)
found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil))
for it.Next() {
@@ -226,7 +226,7 @@ func TestDifferenceIterator(t *testing.T) {
}
rootA, nodesA, _ := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA))
- triea, _ = New(common.Hash{}, rootA, dba)
+ triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb)
@@ -235,7 +235,7 @@ func TestDifferenceIterator(t *testing.T) {
}
rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB))
- trieb, _ = New(common.Hash{}, rootB, dbb)
+ trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
@@ -268,7 +268,7 @@ func TestUnionIterator(t *testing.T) {
}
rootA, nodesA, _ := triea.Commit(false)
dba.Update(NewWithNodeSet(nodesA))
- triea, _ = New(common.Hash{}, rootA, dba)
+ triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
trieb := NewEmpty(dbb)
@@ -277,7 +277,7 @@ func TestUnionIterator(t *testing.T) {
}
rootB, nodesB, _ := trieb.Commit(false)
dbb.Update(NewWithNodeSet(nodesB))
- trieb, _ = New(common.Hash{}, rootB, dbb)
+ trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
it := NewIterator(di)
@@ -355,7 +355,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
}
for i := 0; i < 20; i++ {
// Create trie that will load all nodes from DB.
- tr, _ := New(common.Hash{}, tr.Hash(), triedb)
+ tr, _ := New(TrieID(tr.Hash()), triedb)
// Remove a random node from the database. It can't be the root node
// because that one is already loaded.
@@ -444,7 +444,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- tr, _ := New(common.Hash{}, root, triedb)
+ tr, _ := New(TrieID(root), triedb)
it := tr.NodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
@@ -532,7 +532,7 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
// Create an empty trie
logDb := &loggingDb{0, memorydb.New()}
triedb := NewDatabase(rawdb.NewDatabase(logDb))
- trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
+ trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data
for i := 0; i < 10000; i++ {
diff --git a/trie/nodeset.go b/trie/nodeset.go
index 4825ecaebf..a94535069e 100644
--- a/trie/nodeset.go
+++ b/trie/nodeset.go
@@ -18,52 +18,171 @@ package trie
import (
"fmt"
+ "reflect"
+ "strings"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
)
// memoryNode is all the information we know about a single cached trie node
// in the memory.
type memoryNode struct {
- hash common.Hash // Node hash, computed by hashing rlp value
- size uint16 // Byte size of the useful cached data
- node node // Cached collapsed trie node, or raw rlp data
+ hash common.Hash // Node hash, computed by hashing rlp value, empty for deleted nodes
+ size uint16 // Byte size of the useful cached data, 0 for deleted nodes
+ node node // Cached collapsed trie node, or raw rlp data, nil for deleted nodes
+}
+
+// memoryNodeSize is the raw size of a memoryNode data structure without any
+// node data included. It's an approximate size, but should be a lot better
+// than not counting them.
+// nolint:unused
+var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size())
+
+// memorySize returns the total memory size used by this node.
+// nolint:unused
+func (n *memoryNode) memorySize(key int) int {
+ return int(n.size) + memoryNodeSize + key
+}
+
+// rlp returns the raw rlp encoded blob of the cached trie node, either directly
+// from the cache, or by regenerating it from the collapsed node.
+// nolint:unused
+func (n *memoryNode) rlp() []byte {
+ if node, ok := n.node.(rawNode); ok {
+ return node
+ }
+ enc, err := rlp.EncodeToBytes(n.node)
+ if err != nil {
+ log.Error("Failed to encode trie node", "err", err)
+ }
+ return enc
+}
+
+// obj returns the decoded and expanded trie node, either directly from the cache,
+// or by regenerating it from the rlp encoded blob.
+// nolint:unused
+func (n *memoryNode) obj() node {
+ if node, ok := n.node.(rawNode); ok {
+ return mustDecodeNode(n.hash[:], node)
+ }
+ return expandNode(n.hash[:], n.node)
+}
+
+// nodeWithPrev wraps the memoryNode with the previous node value.
+type nodeWithPrev struct {
+ *memoryNode
+ prev []byte // RLP-encoded previous value, nil means it's non-existent
+}
+
+// unwrap returns the internal memoryNode object.
+// nolint:unused
+func (n *nodeWithPrev) unwrap() *memoryNode {
+ return n.memoryNode
+}
+
+// memorySize returns the total memory size used by this node. It overloads
+// the function in memoryNode by counting the size of previous value as well.
+// nolint: unused
+func (n *nodeWithPrev) memorySize(key int) int {
+ return n.memoryNode.memorySize(key) + len(n.prev)
+}
+
+// nodesWithOrder represents a collection of dirty nodes which includes
+// newly-inserted and updated nodes. The modification order of all nodes
+// is represented by order list.
+type nodesWithOrder struct {
+ order []string // the path list of dirty nodes, sort by insertion order
+ nodes map[string]*nodeWithPrev // the map of dirty nodes, keyed by node path
}
// NodeSet contains all dirty nodes collected during the commit operation
// Each node is keyed by path. It's not the thread-safe to use.
type NodeSet struct {
- owner common.Hash // the identifier of the trie
- paths []string // the path of dirty nodes, sort by insertion order
- nodes map[string]*memoryNode // the map of dirty nodes, keyed by node path
- leaves []*leaf // the list of dirty leaves
+ owner common.Hash // the identifier of the trie
+ updates *nodesWithOrder // the set of updated nodes(newly inserted, updated)
+ deletes map[string][]byte // the map of deleted nodes, keyed by node
+ leaves []*leaf // the list of dirty leaves
}
// NewNodeSet initializes an empty node set to be used for tracking dirty nodes
// from a specific account or storage trie. The owner is zero for the account
// trie and the owning account address hash for storage tries.
-
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
owner: owner,
- nodes: make(map[string]*memoryNode),
+ updates: &nodesWithOrder{
+ nodes: make(map[string]*nodeWithPrev),
+ },
+ deletes: make(map[string][]byte),
+ }
+}
+
+// NewNodeSetWithDeletion initializes the nodeset with provided deletion set.
+func NewNodeSetWithDeletion(owner common.Hash, paths [][]byte, prev [][]byte) *NodeSet {
+ set := NewNodeSet(owner)
+ for i, path := range paths {
+ set.markDeleted(path, prev[i])
+ }
+ return set
+}
+
+// markUpdated marks the node as dirty(newly-inserted or updated) with provided
+// node path, node object along with its previous value.
+func (set *NodeSet) markUpdated(path []byte, node *memoryNode, prev []byte) {
+ set.updates.order = append(set.updates.order, string(path))
+ set.updates.nodes[string(path)] = &nodeWithPrev{
+ memoryNode: node,
+ prev: prev,
}
}
-// add caches node with provided path and node object.
-func (set *NodeSet) add(path string, node *memoryNode) {
- set.paths = append(set.paths, path)
- set.nodes[path] = node
+// markDeleted marks the node as deleted with provided path and previous value.
+func (set *NodeSet) markDeleted(path []byte, prev []byte) {
+ set.deletes[string(path)] = prev
}
-// addLeaf caches the provided leaf node.
+// addLeaf collects the provided leaf node into set.
func (set *NodeSet) addLeaf(leaf *leaf) {
set.leaves = append(set.leaves, leaf)
}
-// Len returns the number of dirty nodes contained in the set.
-func (set *NodeSet) Len() int {
- return len(set.nodes)
+// Size returns the number of updated and deleted nodes contained in the set.
+func (set *NodeSet) Size() (int, int) {
+ return len(set.updates.order), len(set.deletes)
+}
+
+// Hashes returns the hashes of all updated nodes.
+func (set *NodeSet) Hashes() []common.Hash {
+ var ret []common.Hash
+ for _, node := range set.updates.nodes {
+ ret = append(ret, node.hash)
+ }
+ return ret
+}
+
+// Summary returns a string-representation of the NodeSet.
+func (set *NodeSet) Summary() string {
+ var out = new(strings.Builder)
+ fmt.Fprintf(out, "nodeset owner: %v\n", set.owner)
+ if set.updates != nil {
+ for _, key := range set.updates.order {
+ updated := set.updates.nodes[key]
+ if updated.prev != nil {
+ fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", key, updated.hash, updated.prev)
+ } else {
+ fmt.Fprintf(out, " [+]: %x -> %v\n", key, updated.hash)
+ }
+ }
+ }
+ for k, n := range set.deletes {
+ fmt.Fprintf(out, " [-]: %x -> %x\n", k, n)
+ }
+ for _, n := range set.leaves {
+ fmt.Fprintf(out, "[leaf]: %v\n", n)
+ }
+ return out.String()
}
// MergedNodeSet represents a merged dirty node set for a group of tries.
diff --git a/trie/proof.go b/trie/proof.go
index db113eecbd..c589971976 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -22,7 +22,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -61,8 +60,13 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
key = key[1:]
nodes = append(nodes, n)
case hashNode:
+ // Retrieve the specified node from the underlying node reader.
+ // trie.resolveAndTrack is not used since in that function the
+ // loaded blob will be tracked, while it's not required here since
+ // all loaded nodes won't be linked to trie at all and track nodes
+ // may lead to out-of-memory issue
var err error
- tn, err = t.resolveHash(n, prefix)
+ tn, err = t.reader.node(prefix, common.BytesToHash(n))
if err != nil {
log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
return err
@@ -558,7 +562,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
}
// Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one.
- tr := &Trie{root: root, db: NewDatabase(rawdb.NewMemoryDatabase())}
+ tr := &Trie{root: root, reader: newEmptyReader()}
if empty {
tr.root = nil
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 2cf1d325f8..ce69b839bb 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -54,11 +54,11 @@ type SecureTrie struct {
// Loaded nodes are kept around until their 'cache generation' expires.
// A new cache generation is created by each call to Commit.
// cachelimit sets the number of past cache generations to keep.
-func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) {
+func NewSecure(id *ID, db *Database) (*SecureTrie, error) {
if db == nil {
panic("trie.NewSecure called without a database")
}
- trie, err := New(owner, root, db)
+ trie, err := New(id, db)
if err != nil {
return nil, err
}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index 5030c5b3a6..835608a0e3 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -28,7 +28,7 @@ import (
)
func newEmptySecure() *SecureTrie {
- trie, _ := NewSecure(common.Hash{}, common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()))
+ trie, _ := NewSecure(TrieID(common.Hash{}), NewDatabase(rawdb.NewMemoryDatabase()))
return trie
}
@@ -36,7 +36,7 @@ func newEmptySecure() *SecureTrie {
func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
triedb := NewDatabase(rawdb.NewMemoryDatabase())
- trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
+ trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data
content := make(map[string][]byte)
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 095892e16e..c964608aa1 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -31,7 +31,7 @@ import (
func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
triedb := NewDatabase(rawdb.NewMemoryDatabase())
- trie, _ := NewSecure(common.Hash{}, common.Hash{}, triedb)
+ trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data
content := make(map[string][]byte)
@@ -68,7 +68,7 @@ func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
// content map.
func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
// Check root availability and trie contents
- trie, err := NewSecure(common.Hash{}, common.BytesToHash(root), db)
+ trie, err := NewSecure(TrieID(common.BytesToHash(root)), db)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
@@ -85,7 +85,7 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
// checkTrieConsistency checks that all nodes in a trie are indeed present.
func checkTrieConsistency(db *Database, root common.Hash) error {
// Create and iterate a trie rooted in a subnode
- trie, err := NewSecure(common.Hash{}, root, db)
+ trie, err := NewSecure(TrieID(root), db)
if err != nil {
return nil // Consider a non existent state consistent
}
@@ -107,7 +107,7 @@ func TestEmptySync(t *testing.T) {
dbA := NewDatabase(rawdb.NewMemoryDatabase())
dbB := NewDatabase(rawdb.NewMemoryDatabase())
emptyA := NewEmpty(dbA)
- emptyB, _ := New(common.Hash{}, emptyRoot, dbB)
+ emptyB, _ := New(TrieID(emptyRoot), dbB)
for i, trie := range []*Trie{emptyA, emptyB} {
sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB}[i].Scheme())
diff --git a/trie/trie.go b/trie/trie.go
index a1cc31c5cb..b1c3d71363 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -51,7 +51,7 @@ type Trie struct {
// db is the handler trie can retrieve nodes from. It's
// only for reading purpose and not available for writing.
- db *Database
+ reader *trieReader
// tracer is the tool to track the trie changes.
// It will be reset after each commit operation.
@@ -63,21 +63,24 @@ func (t *Trie) newFlag() nodeFlag {
return nodeFlag{dirty: true}
}
-// New creates a trie with an existing root node from db and an assigned
-// owner for storage proximity.
-//
-// If root is the zero hash or the sha3 hash of an empty string, the
-// trie is initially empty and does not require a database. Otherwise,
-// New will panic if db is nil and returns a MissingNodeError if root does
-// not exist in the database. Accessing the trie loads nodes from db on demand.
-func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
+// New creates the trie instance with provided trie id and the read-only
+// database. The state specified by trie id must be available, otherwise
+// an error will be returned. The trie root specified by trie id can be
+// zero hash or the sha3 hash of an empty string, then trie is initially
+// empty, otherwise, the root node must be present in database or returns
+// a MissingNodeError if not.
+func New(id *ID, db NodeReader) (*Trie, error) {
+ reader, err := newTrieReader(id.StateRoot, id.Owner, db)
+ if err != nil {
+ return nil, err
+ }
trie := &Trie{
- owner: owner,
- db: db,
+ owner: id.Owner,
+ reader: reader,
//tracer: newTracer(),
}
- if root != (common.Hash{}) && root != emptyRoot {
- rootnode, err := trie.resolveHash(root[:], nil)
+ if id.Root != (common.Hash{}) && id.Root != emptyRoot {
+ rootnode, err := trie.resolveAndTrack(id.Root[:], nil)
if err != nil {
return nil, err
}
@@ -88,7 +91,7 @@ func New(owner common.Hash, root common.Hash, db *Database) (*Trie, error) {
// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
func NewEmpty(db *Database) *Trie {
- tr, _ := New(common.Hash{}, common.Hash{}, db)
+ tr, _ := New(TrieID(common.Hash{}), db)
return tr
}
@@ -144,7 +147,7 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
}
return value, n, didResolve, err
case hashNode:
- child, err := t.resolveHash(n, key[:pos])
+ child, err := t.resolveAndTrack(n, key[:pos])
if err != nil {
return nil, n, true, err
}
@@ -190,7 +193,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
if hash == nil {
return nil, origNode, 0, errors.New("non-consensus node")
}
- blob, err := t.db.Node(common.BytesToHash(hash))
+ blob, err := t.reader.nodeBlob(path, common.BytesToHash(hash))
return blob, origNode, 1, err
}
@@ -221,7 +224,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
return item, n, resolved, err
case hashNode:
- child, err := t.resolveHash(n, path[:pos])
+ child, err := t.resolveAndTrack(n, path[:pos])
if err != nil {
return nil, n, 1, err
}
@@ -343,7 +346,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
// We've hit a part of the trie that isn't loaded yet. Load
// the node and insert into it. This leaves all child nodes on
// the path to the value in the trie.
- rn, err := t.resolveHash(n, prefix)
+ rn, err := t.resolveAndTrack(n, prefix)
if err != nil {
return false, nil, err
}
@@ -523,7 +526,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// We've hit a part of the trie that isn't loaded yet. Load
// the node and delete from it. This leaves all child nodes on
// the path to the value in the trie.
- rn, err := t.resolveHash(n, prefix)
+ rn, err := t.resolveAndTrack(n, prefix)
if err != nil {
return false, nil, err
}
@@ -547,19 +550,22 @@ func concat(s1 []byte, s2 ...byte) []byte {
func (t *Trie) resolve(n node, prefix []byte) (node, error) {
if n, ok := n.(hashNode); ok {
- return t.resolveHash(n, prefix)
+ return t.resolveAndTrack(n, prefix)
}
return n, nil
}
-// resolveHash loads node from the underlying database with the provided
-// node hash and path prefix.
-func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
- hash := common.BytesToHash(n)
- if node := t.db.node(hash); node != nil {
- return node, nil
+// resolveAndTrack loads node from the underlying store with the given node hash
+// and path prefix and also tracks the loaded node blob in tracer treated as the
+// node's original value. The rlp-encoded blob is preferred to be loaded from
+// database because it's easy to decode node while complex to encode node to blob.
+func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
+ blob, err := t.reader.nodeBlob(prefix, common.BytesToHash(n))
+ if err != nil {
+ return nil, err
}
- return nil, &MissingNodeError{Owner: t.owner, NodeHash: hash, Path: prefix}
+ t.tracer.onRead(prefix, blob)
+ return mustDecodeNode(n, blob), nil
}
// Hash returns the root hash of the trie. It does not write to the
@@ -595,7 +601,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
t.root = hashedNode
return rootHash, nil, nil
}
- h := newCommitter(t.owner, collectLeaf)
+ h := newCommitter(t.owner, t.tracer, collectLeaf)
newRoot, nodes, err := h.Commit(t.root)
if err != nil {
return common.Hash{}, nil, err
@@ -632,7 +638,7 @@ func (t *Trie) Copy() *Trie {
root: t.root,
owner: t.owner,
unhashed: t.unhashed,
- db: t.db,
+ reader: t.reader,
tracer: t.tracer.copy(),
}
}
diff --git a/trie/trie_id.go b/trie/trie_id.go
new file mode 100644
index 0000000000..8ab490ca3b
--- /dev/null
+++ b/trie/trie_id.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package trie
+
+import "github.com/ethereum/go-ethereum/common"
+
+// ID is the identifier for uniquely identifying a trie.
+type ID struct {
+ StateRoot common.Hash // The root of the corresponding state(block.root)
+ Owner common.Hash // The contract address hash which the trie belongs to
+ Root common.Hash // The root hash of trie
+}
+
+// StateTrieID constructs an identifier for state trie with the provided state root.
+func StateTrieID(root common.Hash) *ID {
+ return &ID{
+ StateRoot: root,
+ Owner: common.Hash{},
+ Root: root,
+ }
+}
+
+// StorageTrieID constructs an identifier for storage trie which belongs to a certain
+// state and contract specified by the stateRoot and owner.
+func StorageTrieID(stateRoot common.Hash, owner common.Hash, root common.Hash) *ID {
+ return &ID{
+ StateRoot: stateRoot,
+ Owner: owner,
+ Root: root,
+ }
+}
+
+// TrieID constructs an identifier for a standard trie(not a second-layer trie)
+// with provided root. It's mostly used in tests and some other tries like CHT trie.
+func TrieID(root common.Hash) *ID {
+ return &ID{
+ StateRoot: root,
+ Owner: common.Hash{},
+ Root: root,
+ }
+}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
new file mode 100644
index 0000000000..14186159b7
--- /dev/null
+++ b/trie/trie_reader.go
@@ -0,0 +1,106 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Reader wraps the Node and NodeBlob method of a backing trie store.
+type Reader interface {
+ // Node retrieves the trie node with the provided trie identifier, hexary
+ // node path and the corresponding node hash.
+ // No error will be returned if the node is not found.
+ Node(owner common.Hash, path []byte, hash common.Hash) (node, error)
+
+ // NodeBlob retrieves the RLP-encoded trie node blob with the provided trie
+ // identifier, hexary node path and the corresponding node hash.
+ // No error will be returned if the node is not found.
+ NodeBlob(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
+}
+
+// NodeReader wraps all the necessary functions for accessing trie node.
+type NodeReader interface {
+ // GetReader returns a reader for accessing all trie nodes with provided
+ // state root. Nil is returned in case the state is not available.
+ GetReader(root common.Hash) Reader
+}
+
+// trieReader is a wrapper of the underlying node reader. It's not safe
+// for concurrent usage.
+type trieReader struct {
+ owner common.Hash
+ reader Reader
+ banned map[string]struct{} // Marker to prevent node from being accessed, for tests
+}
+
+// newTrieReader initializes the trie reader with the given node reader.
+func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
+ reader := db.GetReader(stateRoot)
+ if reader == nil {
+ return nil, fmt.Errorf("state not found #%x", stateRoot)
+ }
+ return &trieReader{owner: owner, reader: reader}, nil
+}
+
+// newEmptyReader initializes the pure in-memory reader. All read operations
+// should be forbidden and returns the MissingNodeError.
+func newEmptyReader() *trieReader {
+ return &trieReader{}
+}
+
+// node retrieves the trie node with the provided trie node information.
+// An MissingNodeError will be returned in case the node is not found or
+// any error is encountered.
+func (r *trieReader) node(path []byte, hash common.Hash) (node, error) {
+ // Perform the logics in tests for preventing trie node access.
+ if r.banned != nil {
+ if _, ok := r.banned[string(path)]; ok {
+ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
+ }
+ }
+ if r.reader == nil {
+ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
+ }
+ node, err := r.reader.Node(r.owner, path, hash)
+ if err != nil || node == nil {
+ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
+ }
+ return node, nil
+}
+
+// node retrieves the rlp-encoded trie node with the provided trie node
+// information. An MissingNodeError will be returned in case the node is
+// not found or any error is encountered.
+func (r *trieReader) nodeBlob(path []byte, hash common.Hash) ([]byte, error) {
+ // Perform the logics in tests for preventing trie node access.
+ if r.banned != nil {
+ if _, ok := r.banned[string(path)]; ok {
+ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
+ }
+ }
+ if r.reader == nil {
+ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
+ }
+ blob, err := r.reader.NodeBlob(r.owner, path, hash)
+ if err != nil || len(blob) == 0 {
+ return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
+ }
+ return blob, nil
+}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index c9533060c1..957b7926ac 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -71,7 +71,7 @@ func TestNull(t *testing.T) {
}
func TestMissingRoot(t *testing.T) {
- trie, err := New(common.Hash{}, common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(rawdb.NewMemoryDatabase()))
+ trie, err := New(TrieID(common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")), NewDatabase(rawdb.NewMemoryDatabase()))
if trie != nil {
t.Error("New returned non-nil trie for invalid root")
}
@@ -96,27 +96,27 @@ func testMissingNode(t *testing.T, memonly bool) {
triedb.Commit(root, true, nil)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
_, err := trie.TryGet([]byte("120000"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120099"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
err = trie.TryDelete([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
@@ -129,27 +129,27 @@ func testMissingNode(t *testing.T, memonly bool) {
diskdb.Delete(hash[:])
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(common.Hash{}, root, triedb)
+ trie, _ = New(TrieID(root), triedb)
err = trie.TryDelete([]byte("123456"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
@@ -205,7 +205,7 @@ func TestGet(t *testing.T) {
}
root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
- trie, _ = New(common.Hash{}, root, db)
+ trie, _ = New(TrieID(root), db)
}
}
@@ -282,7 +282,7 @@ func TestReplication(t *testing.T) {
triedb.Update(NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work.
- trie2, err := New(common.Hash{}, exp, triedb)
+ trie2, err := New(TrieID(exp), triedb)
if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err)
}
@@ -302,7 +302,7 @@ func TestReplication(t *testing.T) {
if nodes != nil {
triedb.Update(NewWithNodeSet(nodes))
}
- trie2, err = New(common.Hash{}, hash, triedb)
+ trie2, err = New(TrieID(hash), triedb)
if err != nil {
t.Fatalf("can't recreate trie at %x: %v", exp, err)
}
@@ -386,6 +386,7 @@ const (
opCommit
opHash
opItercheckhash
+ opProve
opMax // boundary value, not an actual op
)
@@ -411,7 +412,7 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
step.key = genKey()
step.value = make([]byte, 8)
binary.BigEndian.PutUint64(step.value, uint64(i))
- case opGet, opDelete:
+ case opGet, opDelete, opProve:
step.key = genKey()
}
steps = append(steps, step)
@@ -421,9 +422,10 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
func runRandTest(rt randTest) bool {
var (
- triedb = NewDatabase(rawdb.NewMemoryDatabase())
- tr = NewEmpty(triedb)
- values = make(map[string]string) // tracks content of the trie
+ triedb = NewDatabase(rawdb.NewMemoryDatabase())
+ tr = NewEmpty(triedb)
+ origTrie = NewEmpty(triedb)
+ values = make(map[string]string) // tracks content of the trie
)
tr.tracer = newTracer()
@@ -431,6 +433,7 @@ func runRandTest(rt randTest) bool {
fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
step.op, step.key, step.value, i)
switch step.op {
+
case opUpdate:
tr.Update(step.key, step.value)
values[string(step.key)] = string(step.value)
@@ -443,23 +446,62 @@ func runRandTest(rt randTest) bool {
if string(v) != want {
rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
}
+ case opProve:
+ hash := tr.Hash()
+ if hash == emptyRoot {
+ continue
+ }
+ proofDb := rawdb.NewMemoryDatabase()
+ err := tr.Prove(step.key, 0, proofDb)
+ if err != nil {
+ rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err)
+ }
+ _, err = VerifyProof(hash, step.key, proofDb)
+ if err != nil {
+ rt[i].err = fmt.Errorf("failed for verifying key %#x, %v", step.key, err)
+ }
case opHash:
tr.Hash()
case opCommit:
- hash, nodes, err := tr.Commit(false)
+ root, nodes, err := tr.Commit(true)
if err != nil {
rt[i].err = err
return false
}
+ // Validity the returned nodeset
+ if nodes != nil {
+ for path, node := range nodes.updates.nodes {
+ blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path)))
+ got := node.prev
+ if !bytes.Equal(blob, got) {
+ rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, got, blob)
+ panic(rt[i].err)
+ }
+ }
+ for path, prev := range nodes.deletes {
+ blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path)))
+ if !bytes.Equal(blob, prev) {
+ rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, prev, blob)
+ return false
+ }
+ }
+ }
if nodes != nil {
triedb.Update(NewWithNodeSet(nodes))
}
- newtr, err := New(common.Hash{}, hash, triedb)
+ newtr, err := New(TrieID(root), triedb)
if err != nil {
rt[i].err = err
return false
}
tr = newtr
+
+ // Enable node tracing. Resolve the root node again explicitly
+ // since it's not captured at the beginning.
+ tr.tracer = newTracer()
+ tr.resolveAndTrack(root.Bytes(), nil)
+ origTrie = tr.Copy()
+
case opItercheckhash:
checktr := NewEmpty(triedb)
it := NewIterator(tr.NodeIterator(nil))
@@ -604,7 +646,7 @@ func TestTinyTrie(t *testing.T) {
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp)
}
- checktr := NewEmpty(trie.db)
+ checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
it := NewIterator(trie.NodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
@@ -1061,7 +1103,7 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
_, nodes, _ := trie.Commit(false)
triedb.Update(NewWithNodeSet(nodes))
b.StartTimer()
- trie.db.Dereference(h)
+ triedb.Dereference(h)
b.StopTimer()
}
diff --git a/trie/utils.go b/trie/utils.go
index 503c033fb2..d1cd3bdd23 100644
--- a/trie/utils.go
+++ b/trie/utils.go
@@ -52,43 +52,43 @@ func newTracer() *tracer {
// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally.
// Don't change the value outside of function since it's not deep-copied.
-func (t *tracer) onRead(key []byte, val []byte) {
+func (t *tracer) onRead(path []byte, val []byte) {
// Tracer isn't used right now, remove this check later.
if t == nil {
return
}
- t.origin[string(key)] = val
+ t.origin[string(path)] = val
}
// onInsert tracks the newly inserted trie node. If it's already
// in the delete set(resurrected node), then just wipe it from
// the deletion set as it's untouched.
-func (t *tracer) onInsert(key []byte) {
+func (t *tracer) onInsert(path []byte) {
// Tracer isn't used right now, remove this check latter.
if t == nil {
return
}
- // If the key is in the delete set, then it's a resurrected node, then wipe it.
- if _, present := t.delete[string(key)]; present {
- delete(t.delete, string(key))
+ // If the path is in the delete set, then it's a resurrected node, then wipe it.
+ if _, present := t.delete[string(path)]; present {
+ delete(t.delete, string(path))
return
}
- t.insert[string(key)] = struct{}{}
+ t.insert[string(path)] = struct{}{}
}
// OnDelete tracks the newly deleted trie node. If it's already
// in the addition set, then just wipe it from the addtion set
// as it's untouched.
-func (t *tracer) onDelete(key []byte) {
+func (t *tracer) onDelete(path []byte) {
// Tracer isn't used right now, remove this check latter.
if t == nil {
return
}
- if _, present := t.insert[string(key)]; present {
- delete(t.insert, string(key))
+ if _, present := t.insert[string(path)]; present {
+ delete(t.insert, string(path))
return
}
- t.delete[string(key)] = struct{}{}
+ t.delete[string(path)] = struct{}{}
}
// insertList returns the tracked inserted trie nodes in list format.
@@ -98,8 +98,8 @@ func (t *tracer) insertList() [][]byte {
return nil
}
var ret [][]byte
- for key := range t.insert {
- ret = append(ret, []byte(key))
+ for path := range t.insert {
+ ret = append(ret, []byte(path))
}
return ret
}
@@ -111,19 +111,36 @@ func (t *tracer) deleteList() [][]byte {
return nil
}
var ret [][]byte
- for key := range t.delete {
- ret = append(ret, []byte(key))
+ for path := range t.delete {
+ ret = append(ret, []byte(path))
}
return ret
}
+// prevList returns the tracked node blobs in list format.
+func (t *tracer) prevList() ([][]byte, [][]byte) {
+ // Tracer isn't used right now, remove this check later.
+ if t == nil {
+ return nil, nil
+ }
+ var (
+ paths [][]byte
+ blobs [][]byte
+ )
+ for path, blob := range t.origin {
+ paths = append(paths, []byte(path))
+ blobs = append(blobs, blob)
+ }
+ return paths, blobs
+}
+
// getPrev returns the cached original value of the specified node.
-func (t *tracer) getPrev(key []byte) []byte {
+func (t *tracer) getPrev(path []byte) []byte {
// Don't panic on uninitialized tracer, it's possible in testing.
if t == nil {
return nil
}
- return t.origin[string(key)]
+ return t.origin[string(path)]
}
// reset clears the content tracked by tracer.
diff --git a/trie/utils_test.go b/trie/utils_test.go
index ffae9ffad7..d9e2295442 100644
--- a/trie/utils_test.go
+++ b/trie/utils_test.go
@@ -17,6 +17,7 @@
package trie
import (
+ "bytes"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -70,7 +71,7 @@ func TestTrieTracer(t *testing.T) {
// Commit the changes
root, nodes, _ := trie.Commit(false)
db.Update(NewWithNodeSet(nodes))
- trie, _ = New(common.Hash{}, root, db)
+ trie, _ = New(TrieID(root), db)
trie.tracer = newTracer()
// Delete all the elements, check deletion set
@@ -122,3 +123,123 @@ func TestTrieTracerNoop(t *testing.T) {
t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList()))
}
}
+func TestTrieTracePrevValue(t *testing.T) {
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
+ trie.tracer = newTracer()
+
+ paths, blobs := trie.tracer.prevList()
+ if len(paths) != 0 || len(blobs) != 0 {
+ t.Fatalf("Nothing should be tracked")
+ }
+ // Insert a batch of entries, all the nodes should be marked as inserted
+ vals := []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ for _, val := range vals {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ paths, blobs = trie.tracer.prevList()
+ if len(paths) != 0 || len(blobs) != 0 {
+ t.Fatalf("Nothing should be tracked")
+ }
+
+ // Commit the changes and re-create with new root
+ root, nodes, _ := trie.Commit(false)
+ if err := db.Update(NewWithNodeSet(nodes)); err != nil {
+ t.Fatal(err)
+ }
+ trie, _ = New(TrieID(root), db)
+ trie.tracer = newTracer()
+ trie.resolveAndTrack(root.Bytes(), nil)
+
+ // Load all nodes in trie
+ for _, val := range vals {
+ trie.TryGet([]byte(val.k))
+ }
+
+ // Ensure all nodes are tracked by tracer with correct prev-values
+ iter := trie.NodeIterator(nil)
+ seen := make(map[string][]byte)
+ for iter.Next(true) {
+ // Embedded nodes are ignored since they are not present in
+ // database.
+ if iter.Hash() == (common.Hash{}) {
+ continue
+ }
+ blob, err := trie.reader.nodeBlob(iter.Path(), iter.Hash())
+ if err != nil {
+ t.Fatal(err)
+ }
+ seen[string(iter.Path())] = common.CopyBytes(blob)
+ }
+
+ paths, blobs = trie.tracer.prevList()
+ if len(paths) != len(seen) || len(blobs) != len(seen) {
+ t.Fatalf("Unexpected tracked values")
+ }
+ for i, path := range paths {
+ blob := blobs[i]
+ prev, ok := seen[string(path)]
+ if !ok {
+ t.Fatalf("Missing node %v", path)
+ }
+ if !bytes.Equal(blob, prev) {
+ t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob)
+ }
+ }
+
+ // Re-open the trie and iterate the trie, ensure nothing will be tracked.
+ // Iterator will not link any loaded nodes to trie.
+ trie, _ = New(TrieID(root), db)
+ trie.tracer = newTracer()
+
+ iter = trie.NodeIterator(nil)
+ for iter.Next(true) {
+ }
+ paths, blobs = trie.tracer.prevList()
+ if len(paths) != 0 || len(blobs) != 0 {
+ t.Fatalf("Nothing should be tracked")
+ }
+
+ // Re-open the trie and generate proof for entries, ensure nothing will
+ // be tracked. Prover will not link any loaded nodes to trie.
+ trie, _ = New(TrieID(root), db)
+ trie.tracer = newTracer()
+ for _, val := range vals {
+ trie.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase())
+ }
+ paths, blobs = trie.tracer.prevList()
+ if len(paths) != 0 || len(blobs) != 0 {
+ t.Fatalf("Nothing should be tracked")
+ }
+
+ // Delete entries from trie, ensure all previous values are correct.
+ trie, _ = New(TrieID(root), db)
+ trie.tracer = newTracer()
+ trie.resolveAndTrack(root.Bytes(), nil)
+
+ for _, val := range vals {
+ trie.TryDelete([]byte(val.k))
+ }
+ paths, blobs = trie.tracer.prevList()
+ if len(paths) != len(seen) || len(blobs) != len(seen) {
+ t.Fatalf("Unexpected tracked values")
+ }
+ for i, path := range paths {
+ blob := blobs[i]
+ prev, ok := seen[string(path)]
+ if !ok {
+ t.Fatalf("Missing node %v", path)
+ }
+ if !bytes.Equal(blob, prev) {
+ t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob)
+ }
+ }
+}
From 1b936fadf7b205b659706705db174fd59796ecdf Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Thu, 26 Sep 2024 10:22:26 +0700
Subject: [PATCH 14/41] all: prep for path-based trie storage (#582)
* all: prep for path-based trie storage
* all: use rawdb.HasLegacyNode() to check for node existance instead of check for length
---
cmd/ronin/snapshot.go | 6 +-
core/rawdb/accessors_state.go | 26 ---
core/rawdb/accessors_trie.go | 263 +++++++++++++++++++++++++
core/rawdb/schema.go | 14 ++
core/state/pruner/pruner.go | 4 +-
core/state/snapshot/conversion.go | 12 +-
core/state/snapshot/generate_test.go | 4 +-
core/state/snapshot/snapshot.go | 4 +-
core/state/sync.go | 2 +-
core/state/sync_test.go | 6 +-
eth/protocols/eth/handler_test.go | 2 +-
eth/protocols/snap/sync.go | 18 +-
eth/protocols/snap/sync_test.go | 10 +-
les/downloader/downloader.go | 2 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 2 +-
trie/database.go | 10 +-
trie/schema.go | 96 ---------
trie/sync.go | 12 +-
trie/trie_test.go | 4 +-
19 files changed, 325 insertions(+), 172 deletions(-)
create mode 100644 core/rawdb/accessors_trie.go
delete mode 100644 trie/schema.go
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index 86bde30e0e..ce9b343b90 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -394,8 +394,7 @@ func traverseRawState(ctx *cli.Context) error {
if node != (common.Hash{}) {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
- blob := rawdb.ReadTrieNode(chaindb, node)
- if len(blob) == 0 {
+ if !rawdb.HasLegacyTrieNode(chaindb, node) {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
@@ -423,8 +422,7 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
- blob := rawdb.ReadTrieNode(chaindb, node)
- if len(blob) == 0 {
+ if !rawdb.HasLegacyTrieNode(chaindb, node) {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index d6b1053b60..1438aad0ff 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -61,35 +61,9 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
}
}
-// HasTrieNode checks if the trie node with the provided hash is present in db.
-func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
- ok, _ := db.Has(hash.Bytes())
- return ok
-}
-
// DeleteCode deletes the specified contract code from the database.
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeKey(hash)); err != nil {
log.Crit("Failed to delete contract code", "err", err)
}
}
-
-// ReadTrieNode retrieves the trie node of the provided hash.
-func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
- data, _ := db.Get(hash.Bytes())
- return data
-}
-
-// WriteTrieNode writes the provided trie node database.
-func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
- if err := db.Put(hash.Bytes(), node); err != nil {
- log.Crit("Failed to store trie node", "err", err)
- }
-}
-
-// DeleteTrieNode deletes the specified trie node from the database.
-func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
- if err := db.Delete(hash.Bytes()); err != nil {
- log.Crit("Failed to delete trie node", "err", err)
- }
-}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
new file mode 100644
index 0000000000..e240213025
--- /dev/null
+++ b/core/rawdb/accessors_trie.go
@@ -0,0 +1,263 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package rawdb
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "golang.org/x/crypto/sha3"
+)
+
+// HashScheme is the legacy hash-based state scheme with which trie nodes are
+// stored in the disk with node hash as the database key. The advantage of this
+// scheme is that different versions of trie nodes can be stored in disk, which
+// is very beneficial for constructing archive nodes. The drawback is it will
+// store different trie nodes on the same path to different locations on the disk
+// with no data locality, and it's unfriendly for designing state pruning.
+//
+// Now this scheme is still kept for backward compatibility, and it will be used
+// for archive node and some other tries(e.g. light trie).
+const HashScheme = "hashScheme"
+
+// PathScheme is the new path-based state scheme with which trie nodes are stored
+// in the disk with node path as the database key. This scheme will only store one
+// version of state data in the disk, which means that the state pruning operation
+// is native. At the same time, this scheme will put adjacent trie nodes in the same
+// area of the disk with good data locality property. But this scheme needs to rely
+// on extra state diffs to survive deep reorg.
+const PathScheme = "pathScheme"
+
+// nodeHasher used to derive the hash of trie node.
+type nodeHasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) }
+func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) }
+
+func (h *nodeHasher) hashData(data []byte) (n common.Hash) {
+ h.sha.Reset()
+ h.sha.Write(data)
+ h.sha.Read(n[:])
+ return n
+}
+
+// ReadAccountTrieNode retrieves the account trie node and the associated node
+// hash with the specified node path.
+func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
+ data, err := db.Get(accountTrieNodeKey(path))
+ if err != nil {
+ return nil, common.Hash{}
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return data, hasher.hashData(data)
+}
+
+// HasAccountTrieNode checks the account trie node presence with the specified
+// node path and the associated node hash.
+func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) bool {
+ data, err := db.Get(accountTrieNodeKey(path))
+ if err != nil {
+ return false
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return hasher.hashData(data) == hash
+}
+
+// WriteAccountTrieNode writes the provided account trie node into database.
+func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
+ if err := db.Put(accountTrieNodeKey(path), node); err != nil {
+ log.Crit("Failed to store account trie node", "err", err)
+ }
+}
+
+// DeleteAccountTrieNode deletes the specified account trie node from the database.
+func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
+ if err := db.Delete(accountTrieNodeKey(path)); err != nil {
+ log.Crit("Failed to delete account trie node", "err", err)
+ }
+}
+
+// ReadStorageTrieNode retrieves the storage trie node and the associated node
+// hash with the specified node path.
+func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
+ data, err := db.Get(storageTrieNodeKey(accountHash, path))
+ if err != nil {
+ return nil, common.Hash{}
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return data, hasher.hashData(data)
+}
+
+// HasStorageTrieNode checks the storage trie node presence with the provided
+// node path and the associated node hash.
+func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte, hash common.Hash) bool {
+ data, err := db.Get(storageTrieNodeKey(accountHash, path))
+ if err != nil {
+ return false
+ }
+ hasher := newNodeHasher()
+ defer returnHasherToPool(hasher)
+ return hasher.hashData(data) == hash
+}
+
+// WriteStorageTrieNode writes the provided storage trie node into database.
+func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
+ if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
+ log.Crit("Failed to store storage trie node", "err", err)
+ }
+}
+
+// DeleteStorageTrieNode deletes the specified storage trie node from the database.
+func DeleteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte) {
+ if err := db.Delete(storageTrieNodeKey(accountHash, path)); err != nil {
+ log.Crit("Failed to delete storage trie node", "err", err)
+ }
+}
+
+// ReadLegacyTrieNode retrieves the legacy trie node with the given
+// associated node hash.
+func ReadLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
+ data, err := db.Get(hash.Bytes())
+ if err != nil {
+ return nil
+ }
+ return data
+}
+
+// HasLegacyTrieNode checks if the trie node with the provided hash is present in db.
+func HasLegacyTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
+ ok, _ := db.Has(hash.Bytes())
+ return ok
+}
+
+// WriteLegacyTrieNode writes the provided legacy trie node to database.
+func WriteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
+ if err := db.Put(hash.Bytes(), node); err != nil {
+ log.Crit("Failed to store legacy trie node", "err", err)
+ }
+}
+
+// DeleteLegacyTrieNode deletes the specified legacy trie node from database.
+func DeleteLegacyTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
+ if err := db.Delete(hash.Bytes()); err != nil {
+ log.Crit("Failed to delete legacy trie node", "err", err)
+ }
+}
+
+// HasTrieNode checks the trie node presence with the provided node info and
+// the associated node hash.
+func HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) bool {
+ switch scheme {
+ case HashScheme:
+ return HasLegacyTrieNode(db, hash)
+ case PathScheme:
+ if owner == (common.Hash{}) {
+ return HasAccountTrieNode(db, path, hash)
+ }
+ return HasStorageTrieNode(db, owner, path, hash)
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
+
+// ReadTrieNode retrieves the trie node from database with the provided node info
+// and associated node hash.
+// hashScheme-based lookup requires the following:
+// - hash
+//
+// pathScheme-based lookup requires the following:
+// - owner
+// - path
+func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash, scheme string) []byte {
+ switch scheme {
+ case HashScheme:
+ return ReadLegacyTrieNode(db, hash)
+ case PathScheme:
+ var (
+ blob []byte
+ nHash common.Hash
+ )
+ if owner == (common.Hash{}) {
+ blob, nHash = ReadAccountTrieNode(db, path)
+ } else {
+ blob, nHash = ReadStorageTrieNode(db, owner, path)
+ }
+ if nHash != hash {
+ return nil
+ }
+ return blob
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
+
+// WriteTrieNode writes the trie node into database with the provided node info
+// and associated node hash.
+// hashScheme-based lookup requires the following:
+// - hash
+//
+// pathScheme-based lookup requires the following:
+// - owner
+// - path
+func WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte, scheme string) {
+ switch scheme {
+ case HashScheme:
+ WriteLegacyTrieNode(db, hash, node)
+ case PathScheme:
+ if owner == (common.Hash{}) {
+ WriteAccountTrieNode(db, path, node)
+ } else {
+ WriteStorageTrieNode(db, owner, path, node)
+ }
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
+
+// DeleteTrieNode deletes the trie node from database with the provided node info
+// and associated node hash.
+// hashScheme-based lookup requires the following:
+// - hash
+//
+// pathScheme-based lookup requires the following:
+// - owner
+// - path
+func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, scheme string) {
+ switch scheme {
+ case HashScheme:
+ DeleteLegacyTrieNode(db, hash)
+ case PathScheme:
+ if owner == (common.Hash{}) {
+ DeleteAccountTrieNode(db, path)
+ } else {
+ DeleteStorageTrieNode(db, owner, path)
+ }
+ default:
+ panic(fmt.Sprintf("Unknown scheme %v", scheme))
+ }
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index d92a0de0f9..ae9cbcff96 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -103,6 +103,10 @@ var (
internalTxsPrefix = []byte("itxs") // internalTxsPrefix + block hash -> internal transactions
dirtyAccountsKey = []byte("dacc") // dirtyAccountsPrefix + block hash -> dirty accounts
+ // Path-based trie node scheme.
+ trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
+ trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
+
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
@@ -233,6 +237,16 @@ func genesisStateSpecKey(hash common.Hash) []byte {
return append(genesisPrefix, hash.Bytes()...)
}
+// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
+func accountTrieNodeKey(path []byte) []byte {
+ return append(trieNodeAccountPrefix, path...)
+}
+
+// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
+func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
+ return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
+}
+
func snapshotConsortiumKey(hash common.Hash) []byte {
return append(snapshotConsortiumPrefix, hash.Bytes()...)
}
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 7b01a74a2b..c46432ee68 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -265,7 +265,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
- if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
+ if !rawdb.HasLegacyTrieNode(p.db, root) {
// The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
@@ -279,7 +279,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target.
var found bool
for i := len(layers) - 2; i >= 2; i-- {
- if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 {
+ if !rawdb.HasLegacyTrieNode(p.db, layers[i].Root()) {
root = layers[i].Root()
found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index 15bb43b842..abf541aef6 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -43,7 +43,7 @@ type trieKV struct {
type (
// trieGeneratorFn is the interface of trie generation which can
// be implemented by different trie algorithm.
- trieGeneratorFn func(db ethdb.KeyValueWriter, scheme trie.NodeScheme, owner common.Hash, in chan (trieKV), out chan (common.Hash))
+ trieGeneratorFn func(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan (trieKV), out chan (common.Hash))
// leafCallbackFn is the callback invoked at the leaves of the trie,
// returns the subtrie root with the specified subtrie identifier.
@@ -52,12 +52,12 @@ type (
// GenerateAccountTrieRoot takes an account iterator and reproduces the root hash.
func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) {
- return generateTrieRoot(nil, nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
+ return generateTrieRoot(nil, "", it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true)
}
// GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash.
func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) {
- return generateTrieRoot(nil, nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true)
+ return generateTrieRoot(nil, "", it, account, stackTrieGenerate, nil, newGenerateStats(), true)
}
// GenerateTrie takes the whole snapshot tree as the input, traverses all the
@@ -243,7 +243,7 @@ func runReport(stats *generateStats, stop chan bool) {
// generateTrieRoot generates the trie hash based on the snapshot iterator.
// It can be used for generating account trie, storage trie or even the
// whole state which connects the accounts and the corresponding storages.
-func generateTrieRoot(db ethdb.KeyValueWriter, scheme trie.NodeScheme, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
+func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) {
var (
in = make(chan trieKV) // chan to pass leaves
out = make(chan common.Hash, 1) // chan to collect result
@@ -361,13 +361,13 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme trie.NodeScheme, it Iterat
return stop(nil)
}
-func stackTrieGenerate(db ethdb.KeyValueWriter, scheme trie.NodeScheme, owner common.Hash, in chan trieKV, out chan common.Hash) {
+func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc
// Implement nodeWriter in case db is existed otherwise let it be nil.
if db != nil {
nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- scheme.WriteTrieNode(db, owner, path, hash, blob)
+ rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
}
}
t := trie.NewStackTrieWithOwner(nodeWriter, owner)
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index ce1e358a3f..f4d2eeb15b 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -115,12 +115,12 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
t.Helper()
accIt := snap.AccountIterator(common.Hash{})
defer accIt.Release()
- snapRoot, err := generateTrieRoot(nil, nil, accIt, common.Hash{}, stackTrieGenerate,
+ snapRoot, err := generateTrieRoot(nil, "", accIt, common.Hash{}, stackTrieGenerate,
func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, _ := snap.StorageIterator(accountHash, common.Hash{})
defer storageIt.Release()
- hash, err := generateTrieRoot(nil, nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index f111c96313..1fcb40a354 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -767,14 +767,14 @@ func (t *Tree) Verify(root common.Hash) error {
}
defer acctIt.Release()
- got, err := generateTrieRoot(nil, nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
+ got, err := generateTrieRoot(nil, "", acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
if err != nil {
return common.Hash{}, err
}
defer storageIt.Release()
- hash, err := generateTrieRoot(nil, nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
+ hash, err := generateTrieRoot(nil, "", storageIt, accountHash, stackTrieGenerate, nil, stat, false)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/sync.go b/core/state/sync.go
index e2b414259a..104f499fb0 100644
--- a/core/state/sync.go
+++ b/core/state/sync.go
@@ -27,7 +27,7 @@ import (
)
// NewStateSync create a new state trie download scheduler.
-func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(keys [][]byte, leaf []byte) error, scheme trie.NodeScheme) *trie.Sync {
+func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
// Register the storage slot callback if the external callback is specified.
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
if onLeaf != nil {
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 58329481d6..ffea17cee2 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -663,14 +663,14 @@ func TestIncompleteStateSync(t *testing.T) {
for i, path := range addedPaths {
owner, inner := trie.ResolvePath([]byte(path))
hash := addedHashes[i]
- val := scheme.ReadTrieNode(dstDb, owner, inner, hash)
+ val := rawdb.ReadTrieNode(dstDb, owner, inner, hash, scheme)
if val == nil {
t.Error("missing trie node")
}
- scheme.DeleteTrieNode(dstDb, owner, inner, hash)
+ rawdb.DeleteTrieNode(dstDb, owner, inner, hash, scheme)
if err := checkStateConsistency(dstDb, srcRoot); err == nil {
t.Errorf("trie inconsistency not caught, missing: %v", path)
}
- scheme.WriteTrieNode(dstDb, owner, inner, hash, val)
+ rawdb.WriteTrieNode(dstDb, owner, inner, hash, val, scheme)
}
}
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index c805f76c11..a038316347 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -502,7 +502,7 @@ func testGetNodeData(t *testing.T, protocol uint, drop bool) {
// Reconstruct state tree from the received data.
reconstructDB := rawdb.NewMemoryDatabase()
for i := 0; i < len(data); i++ {
- rawdb.WriteTrieNode(reconstructDB, hashes[i], data[i])
+ rawdb.WriteLegacyTrieNode(reconstructDB, hashes[i], data[i])
}
// Sanity check whether all state matches.
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index b798f9afa2..d04358ebf3 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -415,7 +415,7 @@ type SyncPeer interface {
// - The peer delivers a refusal to serve the requested state
type Syncer struct {
db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
- scheme trie.NodeScheme // Node scheme used in node database
+ scheme string // Node scheme used in node database
root common.Hash // Current state trie root being synced
tasks []*accountTask // Current account task set being synced
@@ -481,7 +481,7 @@ type Syncer struct {
// NewSyncer creates a new snapshot syncer to download the Ethereum state over the
// snap protocol.
-func NewSyncer(db ethdb.KeyValueStore, scheme trie.NodeScheme) *Syncer {
+func NewSyncer(db ethdb.KeyValueStore, scheme string) *Syncer {
return &Syncer{
db: db,
scheme: scheme,
@@ -721,7 +721,7 @@ func (s *Syncer) loadSyncStatus() {
},
}
task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- s.scheme.WriteTrieNode(task.genBatch, owner, path, hash, val)
+ rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme)
})
for accountHash, subtasks := range task.SubTasks {
@@ -733,7 +733,7 @@ func (s *Syncer) loadSyncStatus() {
},
}
subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- s.scheme.WriteTrieNode(subtask.genBatch, owner, path, hash, val)
+ rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme)
}, accountHash)
}
}
@@ -789,7 +789,7 @@ func (s *Syncer) loadSyncStatus() {
SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch,
genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}),
})
log.Debug("Created account sync task", "from", next, "last", last)
@@ -1803,7 +1803,7 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
}
// Check if the account is a contract with an unknown storage trie
if account.Root != emptyRoot {
- if !s.scheme.HasTrieNode(s.db, res.hashes[i], nil, account.Root) {
+ if !rawdb.HasTrieNode(s.db, res.hashes[i], nil, account.Root, s.scheme) {
// If there was a previous large state retrieval in progress,
// don't restart it from scratch. This happens if a sync cycle
// is interrupted and resumed later. However, *do* update the
@@ -1976,7 +1976,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
root: acc.Root,
genBatch: batch,
genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account),
})
for r.Next() {
@@ -1992,7 +1992,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
root: acc.Root,
genBatch: batch,
genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account),
})
}
@@ -2039,7 +2039,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
if i < len(res.hashes)-1 || res.subTask == nil {
tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- s.scheme.WriteTrieNode(batch, owner, path, hash, val)
+ rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
}, account)
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 59fb52f9ff..ab3f691c84 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -624,7 +624,7 @@ func TestSyncBloatedProof(t *testing.T) {
}
}
-func setupSyncer(scheme trie.NodeScheme, peers ...*testPeer) *Syncer {
+func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
stateDb := rawdb.NewMemoryDatabase()
syncer := NewSyncer(stateDb, scheme)
for _, peer := range peers {
@@ -1355,7 +1355,7 @@ func getCodeByHash(hash common.Hash) []byte {
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
-func makeAccountTrieNoStorage(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
+func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
// Create emptry Trie
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
@@ -1390,7 +1390,7 @@ func makeAccountTrieNoStorage(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
-func makeBoundaryAccountTrie(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
+func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
@@ -1450,7 +1450,7 @@ func makeBoundaryAccountTrie(n int) (trie.NodeScheme, *trie.Trie, entrySlice) {
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set. Code is true when u pass a random code hash to the account
-func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (trie.NodeScheme, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
+func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
@@ -1503,7 +1503,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
-func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (trie.NodeScheme, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
+func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
db = trie.NewDatabase(rawdb.NewMemoryDatabase())
accTrie = trie.NewEmpty(db)
diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go
index 109406d1e1..ab637529ef 100644
--- a/les/downloader/downloader.go
+++ b/les/downloader/downloader.go
@@ -229,7 +229,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
headerProcCh: make(chan []*types.Header, 1),
quitCh: make(chan struct{}),
stateCh: make(chan dataPack),
- SnapSyncer: snap.NewSyncer(stateDb, nil),
+ SnapSyncer: snap.NewSyncer(stateDb, ""),
stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{
processed: rawdb.ReadFastTrieProgress(stateDb),
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index b230e8fd98..074e7b1c30 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -151,7 +151,7 @@ func (f *fuzzer) fuzz() int {
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB))
trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- dbB.Scheme().WriteTrieNode(spongeB, owner, path, hash, blob)
+ rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme())
})
vals kvs
useful bool
diff --git a/trie/database.go b/trie/database.go
index 28fadba104..e0225eb18a 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -415,7 +415,7 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
memcacheDirtyMissMeter.Mark(1)
// Content unavailable in memory, attempt to retrieve from disk
- enc := rawdb.ReadTrieNode(db.diskdb, hash)
+ enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash)
if len(enc) != 0 {
if db.cleans != nil {
db.cleans.Set(hash[:], enc)
@@ -580,7 +580,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
for size > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
node := db.dirties[oldest]
- rawdb.WriteTrieNode(batch, oldest, node.rlp())
+ rawdb.WriteLegacyTrieNode(batch, oldest, node.rlp())
// If we exceeded the ideal batch size, commit and reset
if batch.ValueSize() >= ethdb.IdealBatchSize {
@@ -710,7 +710,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
return err
}
// If we've reached an optimal batch size, commit and start over
- rawdb.WriteTrieNode(batch, hash, node.rlp())
+ rawdb.WriteLegacyTrieNode(batch, hash, node.rlp())
if callback != nil {
callback(hash)
}
@@ -895,6 +895,6 @@ func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, st
}
// Scheme returns the node scheme used in the database. Right now, we only support hash scheme.
-func (db *Database) Scheme() NodeScheme {
- return &hashScheme{}
+func (db *Database) Scheme() string {
+ return rawdb.HashScheme
}
diff --git a/trie/schema.go b/trie/schema.go
deleted file mode 100644
index 72b67aa7d9..0000000000
--- a/trie/schema.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/ethdb"
-)
-
-const (
- HashScheme = "hashScheme" // Identifier of hash based node scheme
-
- // Path-based scheme will be introduced in the following PRs.
- // PathScheme = "pathScheme" // Identifier of path based node scheme
-)
-
-// NodeShceme desribes the scheme for interacting nodes in disk.
-type NodeScheme interface {
- // Name returns the identifier of node scheme.
- Name() string
-
- // HasTrieNode checks the trie node presence with the provided node info and
- // the associated node hash.
- HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) bool
-
- // ReadTrieNode retrieves the trie node from database with the provided node
- // info and the associated node hash.
- ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) []byte
-
- // WriteTrieNode writes the trie node into database with the provided node
- // info and associated node hash.
- WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte)
-
- // DeleteTrieNode deletes the trie node from database with the provided node
- // info and associated node hash.
- DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash)
-
- // IsTrieNode returns an indicator if the given database key is the key of
- // trie node according to the scheme.
- IsTrieNode(key []byte) (bool, []byte)
-}
-
-type hashScheme struct{}
-
-// Name returns the identifier of hash based scheme.
-func (scheme *hashScheme) Name() string {
- return HashScheme
-}
-
-// HasTrieNode checks the trie node presence with the provided node info and
-// the associated node hash.
-func (scheme *hashScheme) HasTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) bool {
- return rawdb.HasTrieNode(db, hash)
-}
-
-// ReadTrieNode retrieves the trie node from database with the provided node info
-// and associated node hash.
-func (scheme *hashScheme) ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash common.Hash) []byte {
- return rawdb.ReadTrieNode(db, hash)
-}
-
-// WriteTrieNode writes the trie node into database with the provided node info
-// and associated node hash.
-func (scheme *hashScheme) WriteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash, node []byte) {
- rawdb.WriteTrieNode(db, hash, node)
-}
-
-// DeleteTrieNode deletes the trie node from database with the provided node info
-// and associated node hash.
-func (scheme *hashScheme) DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, hash common.Hash) {
- rawdb.DeleteTrieNode(db, hash)
-}
-
-// IsTrieNode returns an indicator if the given database key is the key of trie
-// node according to the scheme.
-func (scheme *hashScheme) IsTrieNode(key []byte) (bool, []byte) {
- if len(key) == common.HashLength {
- return true, key
- }
- return false, nil
-}
diff --git a/trie/sync.go b/trie/sync.go
index 1ea443e3f7..d68ea85673 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -137,7 +137,7 @@ func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
type Sync struct {
- scheme NodeScheme // Node scheme descriptor used in database.
+ scheme string // Node scheme descriptor used in database.
database ethdb.KeyValueReader // Persistent database to check for existing entries
membatch *syncMemBatch // Memory buffer to avoid frequent database writes
nodeReqs map[string]*nodeRequest // Pending requests pertaining to a trie node path
@@ -164,7 +164,7 @@ type Sync struct {
type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
// NewSync creates a new trie data download scheduler.
-func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom, scheme NodeScheme) *Sync {
+func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom, scheme string) *Sync {
ts := &Sync{
scheme: scheme,
database: database,
@@ -194,8 +194,8 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, par
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
- blob := rawdb.ReadTrieNode(s.database, root)
- if len(blob) > 0 {
+ owner, inner := ResolvePath(path)
+ if rawdb.HasTrieNode(s.database, owner, inner, root, s.scheme) {
return
}
// False positive, bump fault meter
@@ -362,7 +362,7 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
// Dump the membatch into a database dbw
for path, value := range s.membatch.nodes {
owner, inner := ResolvePath([]byte(path))
- s.scheme.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value)
+ rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
hash := s.membatch.hashes[path]
if s.bloom != nil {
s.bloom.Add(hash[:])
@@ -481,7 +481,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
owner, inner := ResolvePath(child.path)
- if s.scheme.HasTrieNode(s.database, owner, inner, chash) {
+ if rawdb.HasTrieNode(s.database, owner, inner, chash, s.scheme) {
continue
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 957b7926ac..02efa61043 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -859,7 +859,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- db.Scheme().WriteTrieNode(stackTrieSponge, owner, path, hash, blob)
+ rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
}
stTrie := NewStackTrie(writeFn)
// Fill the trie with elements, should start 0, otherwise nodes will be nil in the first time.
@@ -919,7 +919,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- db.Scheme().WriteTrieNode(stackTrieSponge, owner, path, hash, blob)
+ rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
}
stTrie := NewStackTrie(writeFn)
// Add a single small-element to the trie(s)
From e844c329a2aa68905a2eb788067e41ae459060e6 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Thu, 26 Sep 2024 12:14:37 +0700
Subject: [PATCH 15/41] trie: implement NodeBlob api for trie iterator (#584)
* trie: implement NodeBlob API for trie iterator
This functionality is needed in new path-based storage scheme, but
can be implemented in a seperate PR though.
When an account is deleted, then all the storage slots should be
nuked out from the disk as well. In hash-based storage scheme they
are still left in the disk but in new scheme, they will be iterated
and marked as deleted.
But why the NodeBlob API is needed in this scenario? Because when
the node is marked deleted, the previous value is also required to
be recorded to construct the reverse diff.
* fuzzers/stacktrie: enable test
---------
Co-authored-by: Gary Rong
---
tests/fuzzers/stacktrie/trie_fuzzer.go | 83 +++++++++++++-------------
trie/iterator.go | 38 ++++++++++++
trie/iterator_test.go | 51 ++++++++++++++++
trie/utils_test.go | 5 +-
4 files changed, 132 insertions(+), 45 deletions(-)
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 074e7b1c30..a0ba68e211 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
@@ -213,47 +214,47 @@ func (f *fuzzer) fuzz() int {
}
// Ensure all the nodes are persisted correctly
// Need tracked deleted nodes.
- // var (
- // nodeset = make(map[string][]byte) // path -> blob
- // trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- // if crypto.Keccak256Hash(blob) != hash {
- // panic("invalid node blob")
- // }
- // if owner != (common.Hash{}) {
- // panic("invalid node owner")
- // }
- // nodeset[string(path)] = common.CopyBytes(blob)
- // })
- // checked int
- // )
- // for _, kv := range vals {
- // trieC.Update(kv.k, kv.v)
- // }
- // rootC, _ := trieC.Commit()
- // if rootA != rootC {
- // panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
- // }
- // trieA, _ = trie.New(trie.TrieID(rootA), dbA)
- // iterA := trieA.NodeIterator(nil)
- // for iterA.Next(true) {
- // if iterA.Hash() == (common.Hash{}) {
- // if _, present := nodeset[string(iterA.Path())]; present {
- // panic("unexpected tiny node")
- // }
- // continue
- // }
- // nodeBlob, present := nodeset[string(iterA.Path())]
- // if !present {
- // panic("missing node")
- // }
- // if !bytes.Equal(nodeBlob, iterA.NodeBlob()) {
- // panic("node blob is not matched")
- // }
- // checked += 1
- // }
- // if checked != len(nodeset) {
- // panic("node number is not matched")
- // }
+ var (
+ nodeset = make(map[string][]byte) // path -> blob
+ trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ if crypto.Keccak256Hash(blob) != hash {
+ panic("invalid node blob")
+ }
+ if owner != (common.Hash{}) {
+ panic("invalid node owner")
+ }
+ nodeset[string(path)] = common.CopyBytes(blob)
+ })
+ checked int
+ )
+ for _, kv := range vals {
+ trieC.Update(kv.k, kv.v)
+ }
+ rootC, _ := trieC.Commit()
+ if rootA != rootC {
+ panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
+ }
+ trieA, _ = trie.New(trie.TrieID(rootA), dbA)
+ iterA := trieA.NodeIterator(nil)
+ for iterA.Next(true) {
+ if iterA.Hash() == (common.Hash{}) {
+ if _, present := nodeset[string(iterA.Path())]; present {
+ panic("unexpected tiny node")
+ }
+ continue
+ }
+ nodeBlob, present := nodeset[string(iterA.Path())]
+ if !present {
+ panic("missing node")
+ }
+ if !bytes.Equal(nodeBlob, iterA.NodeBlob()) {
+ panic("node blob is not matched")
+ }
+ checked += 1
+ }
+ if checked != len(nodeset) {
+ panic("node number is not matched")
+ }
return 1
}
diff --git a/trie/iterator.go b/trie/iterator.go
index 39a9ebcefc..20c4d44fb6 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -86,6 +86,10 @@ type NodeIterator interface {
// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
Path() []byte
+ // NodeBlob returns the rlp-encoded value of the current iterated node.
+ // If the node is an embedded node in its parent, nil is returned then.
+ NodeBlob() []byte
+
// Leaf returns true iff the current node is a leaf node.
Leaf() bool
@@ -227,6 +231,18 @@ func (it *nodeIterator) Path() []byte {
return it.path
}
+func (it *nodeIterator) NodeBlob() []byte {
+ if it.Hash() == (common.Hash{}) {
+ return nil // skip the non-standalone node
+ }
+ blob, err := it.resolveBlob(it.Hash().Bytes(), it.Path())
+ if err != nil {
+ it.err = err
+ return nil
+ }
+ return blob
+}
+
func (it *nodeIterator) Error() error {
if it.err == errIteratorEnd {
return nil
@@ -369,6 +385,20 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
return it.trie.reader.node(path, common.BytesToHash(hash))
}
+func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
+ if it.resolver != nil {
+ if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
+ return blob, nil
+ }
+ }
+ // Retrieve the specified node from the underlying node reader.
+ // it.trie.resolveAndTrack is not used since in that function the
+ // loaded blob will be tracked, while it's not required here since
+ // all loaded nodes won't be linked to trie at all and track nodes
+ // may lead to out-of-memory issue.
+ return it.trie.reader.nodeBlob(path, common.BytesToHash(hash))
+}
+
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
if hash, ok := st.node.(hashNode); ok {
resolved, err := it.resolveHash(hash, path)
@@ -557,6 +587,10 @@ func (it *differenceIterator) Path() []byte {
return it.b.Path()
}
+func (it *differenceIterator) NodeBlob() []byte {
+ return it.b.NodeBlob()
+}
+
func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueStore) {
panic("not implemented")
}
@@ -668,6 +702,10 @@ func (it *unionIterator) Path() []byte {
return (*it.items)[0].Path()
}
+func (it *unionIterator) NodeBlob() []byte {
+ return (*it.items)[0].NodeBlob()
+}
+
func (it *unionIterator) AddResolver(resolver ethdb.KeyValueStore) {
panic("not implemented")
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index d0e9b7f128..6fc6eea782 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -563,3 +563,54 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
}
}
+
+func TestIteratorNodeBlob(t *testing.T) {
+ var (
+ db = rawdb.NewMemoryDatabase()
+ triedb = NewDatabase(db)
+ trie = NewEmpty(triedb)
+ )
+ vals := []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ all := make(map[string]string)
+ for _, val := range vals {
+ all[val.k] = val.v
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ trie.Commit(false)
+ triedb.Cap(0)
+
+ found := make(map[common.Hash][]byte)
+ it := trie.NodeIterator(nil)
+ for it.Next(true) {
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ found[it.Hash()] = it.NodeBlob()
+ }
+
+ dbIter := db.NewIterator(nil, nil)
+ defer dbIter.Release()
+
+ var count int
+ for dbIter.Next() {
+ got, present := found[common.BytesToHash(dbIter.Key())]
+ if !present {
+ t.Fatalf("Miss trie node %v", dbIter.Key())
+ }
+ if !bytes.Equal(got, dbIter.Value()) {
+ t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
+ }
+ count += 1
+ }
+ if count != len(found) {
+ t.Fatal("Find extra trie node via iterator")
+ }
+}
diff --git a/trie/utils_test.go b/trie/utils_test.go
index d9e2295442..011d939671 100644
--- a/trie/utils_test.go
+++ b/trie/utils_test.go
@@ -173,10 +173,7 @@ func TestTrieTracePrevValue(t *testing.T) {
if iter.Hash() == (common.Hash{}) {
continue
}
- blob, err := trie.reader.nodeBlob(iter.Path(), iter.Hash())
- if err != nil {
- t.Fatal(err)
- }
+ blob := iter.NodeBlob()
seen[string(iter.Path())] = common.CopyBytes(blob)
}
From 11e077b8d8ba9d888bc47a3e607410cb52130ed7 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Thu, 26 Sep 2024 14:41:36 +0700
Subject: [PATCH 16/41] trie: refactor tracer (#581)
* trie: refactor tracer
* fix: add description
---
trie/committer.go | 50 ++---
trie/database.go | 30 ++-
trie/nodeset.go | 100 +++++-----
trie/proof.go | 2 +-
trie/{utils.go => tracer.go} | 142 ++++----------
trie/tracer_test.go | 371 +++++++++++++++++++++++++++++++++++
trie/trie.go | 16 +-
trie/trie_test.go | 68 +++++--
trie/utils_test.go | 242 -----------------------
9 files changed, 563 insertions(+), 458 deletions(-)
rename trie/{utils.go => tracer.go} (53%)
create mode 100644 trie/tracer_test.go
delete mode 100644 trie/utils_test.go
diff --git a/trie/committer.go b/trie/committer.go
index cf43e12fed..584288e625 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -60,9 +60,9 @@ var committerPool = sync.Pool{
}
// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(owner common.Hash, tracer *tracer, collectLeaf bool) *committer {
+func newCommitter(nodes *NodeSet, tracer *tracer, collectLeaf bool) *committer {
return &committer{
- nodes: NewNodeSet(owner),
+ nodes: nodes,
tracer: tracer,
collectLeaf: collectLeaf,
}
@@ -74,20 +74,6 @@ func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
if err != nil {
return nil, nil, err
}
- // Some nodes can be deleted from trie which can't be captured by committer
- // itself. Iterate all deleted nodes tracked by tracer and marked them as
- // deleted only if they are present in database previously.
- for _, path := range c.tracer.deleteList() {
- // There are a few possibilities for this scenario(the node is deleted
- // but not present in database previously), for example the node was
- // embedded in the parent and now deleted from the trie. In this case
- // it's noop from database's perspective.
- val := c.tracer.getPrev(path)
- if len(val) == 0 {
- continue
- }
- c.nodes.markDeleted(path, val)
- }
return h.(hashNode), c.nodes, nil
}
@@ -119,12 +105,6 @@ func (c *committer) commit(path []byte, n node) (node, error) {
if hn, ok := hashedNode.(hashNode); ok {
return hn, nil
}
- // The short node now is embedded in its parent. Mark the node as
- // deleted if it's present in database previously. It's equivalent
- // as deletion from database's perspective.
- if prev := c.tracer.getPrev(path); len(prev) != 0 {
- c.nodes.markDeleted(path, prev)
- }
return collapsed, nil
case *fullNode:
hashedKids, err := c.commitChildren(path, cn)
@@ -138,12 +118,6 @@ func (c *committer) commit(path []byte, n node) (node, error) {
if hn, ok := hashedNode.(hashNode); ok {
return hn, nil
}
- // The short node now is embedded in its parent. Mark the node as
- // deleted if it's present in database previously. It's equivalent
- // as deletion from database's perspective.
- if prev := c.tracer.getPrev(path); len(prev) != 0 {
- c.nodes.markDeleted(path, prev)
- }
return collapsed, nil
case hashNode:
return cn, nil
@@ -196,6 +170,13 @@ func (c *committer) store(path []byte, n node) node {
// usually is leaf node). But small value(less than 32bytes) is not
// our target(leaves in account trie only).
if hash == nil {
+ // The node is embedded in its parent, in other words, this node
+ // will not be stored in the database independently, mark it as
+ // deleted only if the node was existent in database before.
+ prev, ok := c.tracer.accessList[string(path)]
+ if ok {
+ c.nodes.addNode(path, &nodeWithPrev{&memoryNode{}, prev})
+ }
return n
}
// We have the hash already, estimate the RLP encoding-size of the node.
@@ -203,15 +184,18 @@ func (c *committer) store(path []byte, n node) node {
var (
size = estimateSize(n)
nhash = common.BytesToHash(hash)
- mnode = &memoryNode{
- hash: nhash,
- node: simplifyNode(n),
- size: uint16(size),
+ node = &nodeWithPrev{
+ &memoryNode{
+ nhash,
+ uint16(size),
+ simplifyNode(n),
+ },
+ c.tracer.accessList[string(path)],
}
)
// Collect the dirty node to nodeset for return.
- c.nodes.markUpdated(path, mnode, c.tracer.getPrev(path))
+ c.nodes.addNode(path, node)
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
// length of leaves should be exactly same.
diff --git a/trie/database.go b/trie/database.go
index e0225eb18a..6b1bc7a62d 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -782,17 +782,31 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
defer db.lock.Unlock()
// Insert dirty nodes into the database. In the same tree, it must be
// ensured that children are inserted first, then parent so that children
- // can be linked with their parent correctly. The order of writing between
- // different tries(account trie, storage tries) is not required.
- for owner, subset := range nodes.sets {
- for _, path := range subset.updates.order {
- n, ok := subset.updates.nodes[path]
- if !ok {
- return fmt.Errorf("missing node %x %v", owner, path)
+ // can be linked with their parent correctly.
+ //
+ // Note, the storage tries must be flushed before the account trie to
+ // retain the invariant that children go into the dirty cache first.
+ var order []common.Hash
+ for owner := range nodes.sets {
+ if owner == (common.Hash{}) {
+ continue
+ }
+ order = append(order, owner)
+ }
+ if _, ok := nodes.sets[common.Hash{}]; ok {
+ order = append(order, common.Hash{})
+ }
+ for _, owner := range order {
+ subset := nodes.sets[owner]
+ subset.forEachWithOrder(func(path string, n *memoryNode) {
+ if n.isDeleted() {
+ return // ignore deletion
}
db.insert(n.hash, int(n.size), n.node)
- }
+ })
}
+ // Link up the account trie and storage trie if the node points
+ // to an account trie leaf.
if set, present := nodes.sets[common.Hash{}]; present {
for _, leaf := range set.leaves {
// Looping node leaf, then reference the leaf node to the root node
diff --git a/trie/nodeset.go b/trie/nodeset.go
index a94535069e..6b99dbebc6 100644
--- a/trie/nodeset.go
+++ b/trie/nodeset.go
@@ -19,6 +19,7 @@ package trie
import (
"fmt"
"reflect"
+ "sort"
"strings"
"github.com/ethereum/go-ethereum/common"
@@ -42,8 +43,13 @@ var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size())
// memorySize returns the total memory size used by this node.
// nolint:unused
-func (n *memoryNode) memorySize(key int) int {
- return int(n.size) + memoryNodeSize + key
+func (n *memoryNode) memorySize(pathlen int) int {
+ return int(n.size) + memoryNodeSize + pathlen
+}
+
+// isDeleted returns the indicator if the node is marked as deleted.
+func (n *memoryNode) isDeleted() bool {
+ return n.hash == (common.Hash{})
}
// rlp returns the raw rlp encoded blob of the cached trie node, either directly
@@ -89,21 +95,19 @@ func (n *nodeWithPrev) memorySize(key int) int {
return n.memoryNode.memorySize(key) + len(n.prev)
}
-// nodesWithOrder represents a collection of dirty nodes which includes
-// newly-inserted and updated nodes. The modification order of all nodes
-// is represented by order list.
-type nodesWithOrder struct {
- order []string // the path list of dirty nodes, sort by insertion order
- nodes map[string]*nodeWithPrev // the map of dirty nodes, keyed by node path
-}
-
// NodeSet contains all dirty nodes collected during the commit operation
// Each node is keyed by path. It's not the thread-safe to use.
type NodeSet struct {
- owner common.Hash // the identifier of the trie
- updates *nodesWithOrder // the set of updated nodes(newly inserted, updated)
- deletes map[string][]byte // the map of deleted nodes, keyed by node
- leaves []*leaf // the list of dirty leaves
+ owner common.Hash // the identifier of the trie
+ leaves []*leaf // the list of dirty leaves
+ updates int // the count of updated and inserted nodes
+ deletes int // the count of deleted nodes
+
+ // The set of all dirty nodes. Dirty nodes include newly inserted nodes,
+ // deleted nodes and updated nodes. The original value of the newly
+ // inserted node must be nil, and the original value of the other two
+ // types must be non-nil.
+ nodes map[string]*nodeWithPrev
}
// NewNodeSet initializes an empty node set to be used for tracking dirty nodes
@@ -112,35 +116,32 @@ type NodeSet struct {
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
owner: owner,
- updates: &nodesWithOrder{
- nodes: make(map[string]*nodeWithPrev),
- },
- deletes: make(map[string][]byte),
+ nodes: make(map[string]*nodeWithPrev),
}
}
-// NewNodeSetWithDeletion initializes the nodeset with provided deletion set.
-func NewNodeSetWithDeletion(owner common.Hash, paths [][]byte, prev [][]byte) *NodeSet {
- set := NewNodeSet(owner)
- for i, path := range paths {
- set.markDeleted(path, prev[i])
+// forEachWithOrder iterates the dirty nodes with the order from bottom to top,
+// right to left, nodes with the longest path will be iterated first.
+func (set *NodeSet) forEachWithOrder(callback func(path string, n *memoryNode)) {
+ var paths sort.StringSlice
+ for path := range set.nodes {
+ paths = append(paths, path)
}
- return set
-}
-
-// markUpdated marks the node as dirty(newly-inserted or updated) with provided
-// node path, node object along with its previous value.
-func (set *NodeSet) markUpdated(path []byte, node *memoryNode, prev []byte) {
- set.updates.order = append(set.updates.order, string(path))
- set.updates.nodes[string(path)] = &nodeWithPrev{
- memoryNode: node,
- prev: prev,
+ // Bottom-up, longest path first
+ sort.Sort(sort.Reverse(paths))
+ for _, path := range paths {
+ callback(path, set.nodes[path].unwrap())
}
}
-// markDeleted marks the node as deleted with provided path and previous value.
-func (set *NodeSet) markDeleted(path []byte, prev []byte) {
- set.deletes[string(path)] = prev
+// addNode adds the provided dirty node into set.
+func (set *NodeSet) addNode(path []byte, n *nodeWithPrev) {
+ if n.isDeleted() {
+ set.deletes += 1
+ } else {
+ set.updates += 1
+ }
+ set.nodes[string(path)] = n
}
// addLeaf collects the provided leaf node into set.
@@ -150,13 +151,13 @@ func (set *NodeSet) addLeaf(leaf *leaf) {
// Size returns the number of updated and deleted nodes contained in the set.
func (set *NodeSet) Size() (int, int) {
- return len(set.updates.order), len(set.deletes)
+ return set.updates, set.deletes
}
// Hashes returns the hashes of all updated nodes.
func (set *NodeSet) Hashes() []common.Hash {
var ret []common.Hash
- for _, node := range set.updates.nodes {
+ for _, node := range set.nodes {
ret = append(ret, node.hash)
}
return ret
@@ -166,19 +167,22 @@ func (set *NodeSet) Hashes() []common.Hash {
func (set *NodeSet) Summary() string {
var out = new(strings.Builder)
fmt.Fprintf(out, "nodeset owner: %v\n", set.owner)
- if set.updates != nil {
- for _, key := range set.updates.order {
- updated := set.updates.nodes[key]
- if updated.prev != nil {
- fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", key, updated.hash, updated.prev)
- } else {
- fmt.Fprintf(out, " [+]: %x -> %v\n", key, updated.hash)
+ if set.nodes != nil {
+ for path, n := range set.nodes {
+ // Deletion
+ if n.isDeleted() {
+ fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.prev)
+ continue
}
+ // Insertion
+ if len(n.prev) == 0 {
+ fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.hash)
+ continue
+ }
+ // Update
+ fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.hash, n.prev)
}
}
- for k, n := range set.deletes {
- fmt.Fprintf(out, " [-]: %x -> %x\n", k, n)
- }
for _, n := range set.leaves {
fmt.Fprintf(out, "[leaf]: %v\n", n)
}
diff --git a/trie/proof.go b/trie/proof.go
index c589971976..29c6aa2c54 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -562,7 +562,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
}
// Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one.
- tr := &Trie{root: root, reader: newEmptyReader()}
+ tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()}
if empty {
tr.root = nil
}
diff --git a/trie/utils.go b/trie/tracer.go
similarity index 53%
rename from trie/utils.go
rename to trie/tracer.go
index d1cd3bdd23..cd5ebb85a2 100644
--- a/trie/utils.go
+++ b/trie/tracer.go
@@ -36,147 +36,89 @@ package trie
// Note tracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
type tracer struct {
- insert map[string]struct{}
- delete map[string]struct{}
- origin map[string][]byte
+ inserts map[string]struct{}
+ deletes map[string]struct{}
+ accessList map[string][]byte
}
// newTracer initlializes tride node diff tracer.
func newTracer() *tracer {
return &tracer{
- insert: make(map[string]struct{}),
- delete: make(map[string]struct{}),
- origin: make(map[string][]byte),
+ inserts: make(map[string]struct{}),
+ deletes: make(map[string]struct{}),
+ accessList: make(map[string][]byte),
}
}
// onRead tracks the newly loaded trie node and caches the rlp-encoded blob internally.
// Don't change the value outside of function since it's not deep-copied.
func (t *tracer) onRead(path []byte, val []byte) {
- // Tracer isn't used right now, remove this check later.
- if t == nil {
- return
- }
- t.origin[string(path)] = val
+ t.accessList[string(path)] = val
}
// onInsert tracks the newly inserted trie node. If it's already
// in the delete set(resurrected node), then just wipe it from
// the deletion set as it's untouched.
func (t *tracer) onInsert(path []byte) {
- // Tracer isn't used right now, remove this check latter.
- if t == nil {
- return
- }
// If the path is in the delete set, then it's a resurrected node, then wipe it.
- if _, present := t.delete[string(path)]; present {
- delete(t.delete, string(path))
+ if _, present := t.deletes[string(path)]; present {
+ delete(t.deletes, string(path))
return
}
- t.insert[string(path)] = struct{}{}
+ t.inserts[string(path)] = struct{}{}
}
// OnDelete tracks the newly deleted trie node. If it's already
// in the addition set, then just wipe it from the addtion set
// as it's untouched.
func (t *tracer) onDelete(path []byte) {
- // Tracer isn't used right now, remove this check latter.
- if t == nil {
+ if _, present := t.inserts[string(path)]; present {
+ delete(t.inserts, string(path))
return
}
- if _, present := t.insert[string(path)]; present {
- delete(t.insert, string(path))
- return
- }
- t.delete[string(path)] = struct{}{}
-}
-
-// insertList returns the tracked inserted trie nodes in list format.
-func (t *tracer) insertList() [][]byte {
- // Tracer isn't used right now, remove this check later.
- if t == nil {
- return nil
- }
- var ret [][]byte
- for path := range t.insert {
- ret = append(ret, []byte(path))
- }
- return ret
-}
-
-// deleteList returns the tracked deleted trie nodes in list format.
-func (t *tracer) deleteList() [][]byte {
- // Tracer isn't used right now, remove this check later.
- if t == nil {
- return nil
- }
- var ret [][]byte
- for path := range t.delete {
- ret = append(ret, []byte(path))
- }
- return ret
-}
-
-// prevList returns the tracked node blobs in list format.
-func (t *tracer) prevList() ([][]byte, [][]byte) {
- // Tracer isn't used right now, remove this check later.
- if t == nil {
- return nil, nil
- }
- var (
- paths [][]byte
- blobs [][]byte
- )
- for path, blob := range t.origin {
- paths = append(paths, []byte(path))
- blobs = append(blobs, blob)
- }
- return paths, blobs
-}
-
-// getPrev returns the cached original value of the specified node.
-func (t *tracer) getPrev(path []byte) []byte {
- // Don't panic on uninitialized tracer, it's possible in testing.
- if t == nil {
- return nil
- }
- return t.origin[string(path)]
+ t.deletes[string(path)] = struct{}{}
}
// reset clears the content tracked by tracer.
func (t *tracer) reset() {
- // Tracer isn't used right now, remove this check later.
- if t == nil {
- return
- }
- t.insert = make(map[string]struct{})
- t.delete = make(map[string]struct{})
- t.origin = make(map[string][]byte)
+ t.inserts = make(map[string]struct{})
+ t.deletes = make(map[string]struct{})
+ t.accessList = make(map[string][]byte)
}
// copy returns a deep copied tracer instance.
func (t *tracer) copy() *tracer {
- // Tracer isn't used right now, remove this check later.
- if t == nil {
- return nil
- }
var (
- insert = make(map[string]struct{})
- delete = make(map[string]struct{})
- origin = make(map[string][]byte)
+ inserts = make(map[string]struct{})
+ deletes = make(map[string]struct{})
+ accessList = make(map[string][]byte)
)
- for key := range t.insert {
- insert[key] = struct{}{}
+ for key := range t.inserts {
+ inserts[key] = struct{}{}
}
- for key := range t.delete {
- delete[key] = struct{}{}
+ for key := range t.deletes {
+ deletes[key] = struct{}{}
}
- for key, val := range t.origin {
- origin[key] = val
+ for key, val := range t.accessList {
+ accessList[key] = val
}
return &tracer{
- insert: insert,
- delete: delete,
- origin: origin,
+ inserts: inserts,
+ deletes: deletes,
+ accessList: accessList,
+ }
+}
+
+// markDeletions puts all tracked deletions into the provided nodeset.
+func (t *tracer) markDeletions(set *NodeSet) {
+ for path := range t.deletes {
+ // It's possible a few deleted nodes were embedded
+ // in their parent before, the deletions can be no
+ // effect by deleting nothing, filter them out.
+ prev, ok := t.accessList[path]
+ if !ok {
+ continue
+ }
+ set.addNode([]byte(path), &nodeWithPrev{&memoryNode{}, prev})
}
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
new file mode 100644
index 0000000000..f8511a5e67
--- /dev/null
+++ b/trie/tracer_test.go
@@ -0,0 +1,371 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+)
+
+var (
+ tiny = []struct{ k, v string }{
+ {"k1", "v1"},
+ {"k2", "v2"},
+ {"k3", "v3"},
+ }
+ nonAligned = []struct{ k, v string }{
+ {"do", "verb"},
+ {"ether", "wookiedoo"},
+ {"horse", "stallion"},
+ {"shaman", "horse"},
+ {"doge", "coin"},
+ {"dog", "puppy"},
+ {"somethingveryoddindeedthis is", "myothernodedata"},
+ }
+ standard = []struct{ k, v string }{
+ {string(randBytes(32)), "verb"},
+ {string(randBytes(32)), "wookiedoo"},
+ {string(randBytes(32)), "stallion"},
+ {string(randBytes(32)), "horse"},
+ {string(randBytes(32)), "coin"},
+ {string(randBytes(32)), "puppy"},
+ {string(randBytes(32)), "myothernodedata"},
+ }
+)
+
+func TestTrieTracer(t *testing.T) {
+ testTrieTracer(t, tiny)
+ testTrieTracer(t, nonAligned)
+ testTrieTracer(t, standard)
+}
+
+// Tests if the trie diffs are tracked correctly. Tracer should capture
+// all non-leaf dirty nodes, no matter the node is embedded or not.
+func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
+
+ // Determine all new nodes are tracked
+ for _, val := range vals {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ insertSet := copySet(trie.tracer.inserts) // copy before commit
+ deleteSet := copySet(trie.tracer.deletes) // copy before commit
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ seen := setKeys(iterNodes(db, root))
+ if !compareSet(insertSet, seen) {
+ t.Fatal("Unexpected insertion set")
+ }
+ if !compareSet(deleteSet, nil) {
+ t.Fatal("Unexpected deletion set")
+ }
+
+ // Determine all deletions are tracked
+ trie, _ = New(TrieID(root), db)
+ for _, val := range vals {
+ trie.Delete([]byte(val.k))
+ }
+ insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes)
+ if !compareSet(insertSet, nil) {
+ t.Fatal("Unexpected insertion set")
+ }
+ if !compareSet(deleteSet, seen) {
+ t.Fatal("Unexpected deletion set")
+ }
+}
+
+// Test that after inserting a new batch of nodes and deleting them immediately,
+// the trie tracer should be cleared normally as no operation happened.
+func TestTrieTracerNoop(t *testing.T) {
+ testTrieTracerNoop(t, tiny)
+ testTrieTracerNoop(t, nonAligned)
+ testTrieTracerNoop(t, standard)
+}
+
+func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ for _, val := range vals {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ for _, val := range vals {
+ trie.Delete([]byte(val.k))
+ }
+ if len(trie.tracer.inserts) != 0 {
+ t.Fatal("Unexpected insertion set")
+ }
+ if len(trie.tracer.deletes) != 0 {
+ t.Fatal("Unexpected deletion set")
+ }
+}
+
+// Tests if the accessList is correctly tracked.
+func TestAccessList(t *testing.T) {
+ testAccessList(t, tiny)
+ testAccessList(t, nonAligned)
+ testAccessList(t, standard)
+}
+
+func testAccessList(t *testing.T, vals []struct{ k, v string }) {
+ var (
+ db = NewDatabase(rawdb.NewMemoryDatabase())
+ trie = NewEmpty(db)
+ orig = trie.Copy()
+ )
+ // Create trie from scratch
+ for _, val := range vals {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ trie, _ = New(TrieID(root), db)
+ if err := verifyAccessList(orig, trie, nodes); err != nil {
+ t.Fatalf("Invalid accessList %v", err)
+ }
+
+ // Update trie
+ trie, _ = New(TrieID(root), db)
+ orig = trie.Copy()
+ for _, val := range vals {
+ trie.Update([]byte(val.k), randBytes(32))
+ }
+ root, nodes, _ = trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ trie, _ = New(TrieID(root), db)
+ if err := verifyAccessList(orig, trie, nodes); err != nil {
+ t.Fatalf("Invalid accessList %v", err)
+ }
+
+ // Add more new nodes
+ trie, _ = New(TrieID(root), db)
+ orig = trie.Copy()
+ var keys []string
+ for i := 0; i < 30; i++ {
+ key := randBytes(32)
+ keys = append(keys, string(key))
+ trie.Update(key, randBytes(32))
+ }
+ root, nodes, _ = trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ trie, _ = New(TrieID(root), db)
+ if err := verifyAccessList(orig, trie, nodes); err != nil {
+ t.Fatalf("Invalid accessList %v", err)
+ }
+
+ // Partial deletions
+ trie, _ = New(TrieID(root), db)
+ orig = trie.Copy()
+ for _, key := range keys {
+ trie.Update([]byte(key), nil)
+ }
+ root, nodes, _ = trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ trie, _ = New(TrieID(root), db)
+ if err := verifyAccessList(orig, trie, nodes); err != nil {
+ t.Fatalf("Invalid accessList %v", err)
+ }
+
+ // Delete all
+ trie, _ = New(TrieID(root), db)
+ orig = trie.Copy()
+ for _, val := range vals {
+ trie.Update([]byte(val.k), nil)
+ }
+ root, nodes, _ = trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ trie, _ = New(TrieID(root), db)
+ if err := verifyAccessList(orig, trie, nodes); err != nil {
+ t.Fatalf("Invalid accessList %v", err)
+ }
+}
+
+// Tests origin values won't be tracked in Iterator or Prover
+func TestAccessListLeak(t *testing.T) {
+ var (
+ db = NewDatabase(rawdb.NewMemoryDatabase())
+ trie = NewEmpty(db)
+ )
+ // Create trie from scratch
+ for _, val := range standard {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ root, nodes, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(nodes))
+
+ var cases = []struct {
+ op func(tr *Trie)
+ }{
+ {
+ func(tr *Trie) {
+ it := tr.NodeIterator(nil)
+ for it.Next(true) {
+ }
+ },
+ },
+ {
+ func(tr *Trie) {
+ it := NewIterator(tr.NodeIterator(nil))
+ for it.Next() {
+ }
+ },
+ },
+ {
+ func(tr *Trie) {
+ for _, val := range standard {
+ tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase())
+ }
+ },
+ },
+ }
+ for _, c := range cases {
+ trie, _ = New(TrieID(root), db)
+ n1 := len(trie.tracer.accessList)
+ c.op(trie)
+ n2 := len(trie.tracer.accessList)
+
+ if n1 != n2 {
+ t.Fatalf("AccessList is leaked, prev %d after %d", n1, n2)
+ }
+ }
+}
+
+// Tests whether the original tree node is correctly deleted after being embedded
+// in its parent due to the smaller size of the original tree node.
+func TestTinyTree(t *testing.T) {
+ var (
+ db = NewDatabase(rawdb.NewMemoryDatabase())
+ trie = NewEmpty(db)
+ )
+ for _, val := range tiny {
+ trie.Update([]byte(val.k), randBytes(32))
+ }
+ root, set, _ := trie.Commit(false)
+ db.Update(NewWithNodeSet(set))
+
+ trie, _ = New(TrieID(root), db)
+ orig := trie.Copy()
+ for _, val := range tiny {
+ trie.Update([]byte(val.k), []byte(val.v))
+ }
+ root, set, _ = trie.Commit(false)
+ db.Update(NewWithNodeSet(set))
+
+ trie, _ = New(TrieID(root), db)
+ if err := verifyAccessList(orig, trie, set); err != nil {
+ t.Fatalf("Invalid accessList %v", err)
+ }
+}
+
+func compareSet(setA, setB map[string]struct{}) bool {
+ if len(setA) != len(setB) {
+ return false
+ }
+ for key := range setA {
+ if _, ok := setB[key]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func forNodes(tr *Trie) map[string][]byte {
+ var (
+ it = tr.NodeIterator(nil)
+ nodes = make(map[string][]byte)
+ )
+ for it.Next(true) {
+ if it.Leaf() {
+ continue
+ }
+ blob := it.NodeBlob()
+ nodes[string(it.Path())] = common.CopyBytes(blob)
+ }
+ return nodes
+}
+
+func iterNodes(db *Database, root common.Hash) map[string][]byte {
+ tr, _ := New(TrieID(root), db)
+ return forNodes(tr)
+}
+
+func forHashedNodes(tr *Trie) map[string][]byte {
+ var (
+ it = tr.NodeIterator(nil)
+ nodes = make(map[string][]byte)
+ )
+ for it.Next(true) {
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ blob := it.NodeBlob()
+ nodes[string(it.Path())] = common.CopyBytes(blob)
+ }
+ return nodes
+}
+
+// diffTries return the diff and shared nodes between 2 tries
+func diffTries(trieA, trieB *Trie) (map[string][]byte, map[string][]byte, map[string][]byte) {
+ var (
+ nodesA = forHashedNodes(trieA)
+ nodesB = forHashedNodes(trieB)
+ inA = make(map[string][]byte) // hashed nodes in trie a but not b
+ inB = make(map[string][]byte) // hashed nodes in trie b but not a
+ both = make(map[string][]byte) // hashed nodes in both tries but different value
+ )
+ for path, blobA := range nodesA {
+ if blobB, ok := nodesB[path]; ok {
+ if bytes.Equal(blobA, blobB) {
+ continue
+ }
+ both[path] = blobA
+ continue
+ }
+ inA[path] = blobA
+ }
+ for path, blobB := range nodesB {
+ if _, ok := nodesA[path]; ok {
+ continue
+ }
+ inB[path] = blobB
+ }
+ return inA, inB, both
+}
+
+func setKeys(set map[string][]byte) map[string]struct{} {
+ keys := make(map[string]struct{})
+ for k := range set {
+ keys[k] = struct{}{}
+ }
+ return keys
+}
+
+func copySet(set map[string]struct{}) map[string]struct{} {
+ copied := make(map[string]struct{})
+ for k := range set {
+ copied[k] = struct{}{}
+ }
+ return copied
+}
diff --git a/trie/trie.go b/trie/trie.go
index b1c3d71363..596cc31d1a 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -77,7 +77,7 @@ func New(id *ID, db NodeReader) (*Trie, error) {
trie := &Trie{
owner: id.Owner,
reader: reader,
- //tracer: newTracer(),
+ tracer: newTracer(),
}
if id.Root != (common.Hash{}) && id.Root != emptyRoot {
rootnode, err := trie.resolveAndTrack(id.Root[:], nil)
@@ -571,7 +571,7 @@ func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
- hash, cached, _ := t.hashRoot()
+ hash, cached := t.hashRoot()
t.root = cached
return common.BytesToHash(hash.(hashNode))
}
@@ -584,9 +584,11 @@ func (t *Trie) Hash() common.Hash {
// be created with new root and updated trie database for following usage
func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
defer t.tracer.reset()
+ nodes := NewNodeSet(t.owner)
+ t.tracer.markDeletions(nodes)
if t.root == nil {
- return emptyRoot, nil, nil
+ return emptyRoot, nodes, nil
}
// Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed.
@@ -601,7 +603,7 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
t.root = hashedNode
return rootHash, nil, nil
}
- h := newCommitter(t.owner, t.tracer, collectLeaf)
+ h := newCommitter(nodes, t.tracer, collectLeaf)
newRoot, nodes, err := h.Commit(t.root)
if err != nil {
return common.Hash{}, nil, err
@@ -612,16 +614,16 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
}
// hashRoot calculates the root hash of the given trie
-func (t *Trie) hashRoot() (node, node, error) {
+func (t *Trie) hashRoot() (node, node) {
if t.root == nil {
- return hashNode(emptyRoot.Bytes()), nil, nil
+ return hashNode(emptyRoot.Bytes()), nil
}
// If the number of changes is below 100, we let one thread handle it
h := newHasher(t.unhashed >= 100)
defer returnHasherToPool(h)
hashed, cached := h.hash(t.root, true)
t.unhashed = 0
- return hashed, cached, nil
+ return hashed, cached
}
// Reset drops the referenced root node and cleans all internal state.
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 02efa61043..499f0574df 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -420,6 +420,49 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
return reflect.ValueOf(steps)
}
+// verifyAccessList verifies the access list of the new trie against the old trie.
+func verifyAccessList(old *Trie, new *Trie, set *NodeSet) error {
+ deletes, inserts, updates := diffTries(old, new)
+
+ // Check insertion set
+ for path := range inserts {
+ n, ok := set.nodes[path]
+ if !ok || n.isDeleted() {
+ return errors.New("expect new node")
+ }
+ if len(n.prev) > 0 {
+ return errors.New("unexpected origin value")
+ }
+ }
+ // Check deletion set
+ for path, blob := range deletes {
+ n, ok := set.nodes[path]
+ if !ok || !n.isDeleted() {
+ return errors.New("expect deleted node")
+ }
+ if len(n.prev) == 0 {
+ return errors.New("expect origin value")
+ }
+ if !bytes.Equal(n.prev, blob) {
+ return errors.New("invalid origin value")
+ }
+ }
+ // Check update set
+ for path, blob := range updates {
+ n, ok := set.nodes[path]
+ if !ok || n.isDeleted() {
+ return errors.New("expect updated node")
+ }
+ if len(n.prev) == 0 {
+ return errors.New("expect origin value")
+ }
+ if !bytes.Equal(n.prev, blob) {
+ return errors.New("invalid origin value")
+ }
+ }
+ return nil
+}
+
func runRandTest(rt randTest) bool {
var (
triedb = NewDatabase(rawdb.NewMemoryDatabase())
@@ -468,24 +511,6 @@ func runRandTest(rt randTest) bool {
rt[i].err = err
return false
}
- // Validity the returned nodeset
- if nodes != nil {
- for path, node := range nodes.updates.nodes {
- blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path)))
- got := node.prev
- if !bytes.Equal(blob, got) {
- rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, got, blob)
- panic(rt[i].err)
- }
- }
- for path, prev := range nodes.deletes {
- blob, _, _ := origTrie.TryGetNode(hexToCompact([]byte(path)))
- if !bytes.Equal(blob, prev) {
- rt[i].err = fmt.Errorf("prevalue mismatch for 0x%x, got 0x%x want 0x%x", path, prev, blob)
- return false
- }
- }
- }
if nodes != nil {
triedb.Update(NewWithNodeSet(nodes))
}
@@ -494,8 +519,13 @@ func runRandTest(rt randTest) bool {
rt[i].err = err
return false
}
+ if nodes != nil {
+ if err := verifyAccessList(origTrie, newtr, nodes); err != nil {
+ rt[i].err = err
+ return false
+ }
+ }
tr = newtr
-
// Enable node tracing. Resolve the root node again explicitly
// since it's not captured at the beginning.
tr.tracer = newTracer()
diff --git a/trie/utils_test.go b/trie/utils_test.go
deleted file mode 100644
index 011d939671..0000000000
--- a/trie/utils_test.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "bytes"
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/rawdb"
-)
-
-// Tests if the trie diffs are tracked correctly.
-func TestTrieTracer(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- trie := NewEmpty(db)
- trie.tracer = newTracer()
-
- // Insert a batch of entries, all the nodes should be marked as inserted
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- for _, val := range vals {
- trie.Update([]byte(val.k), []byte(val.v))
- }
- trie.Hash()
-
- seen := make(map[string]struct{})
- it := trie.NodeIterator(nil)
- for it.Next(true) {
- if it.Leaf() {
- continue
- }
- seen[string(it.Path())] = struct{}{}
- }
- inserted := trie.tracer.insertList()
- if len(inserted) != len(seen) {
- t.Fatalf("Unexpected inserted node tracked want %d got %d", len(seen), len(inserted))
- }
- for _, k := range inserted {
- _, ok := seen[string(k)]
- if !ok {
- t.Fatalf("Unexpected inserted node")
- }
- }
- deleted := trie.tracer.deleteList()
- if len(deleted) != 0 {
- t.Fatalf("Unexpected deleted node tracked %d", len(deleted))
- }
-
- // Commit the changes
- root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
- trie, _ = New(TrieID(root), db)
- trie.tracer = newTracer()
-
- // Delete all the elements, check deletion set
- for _, val := range vals {
- trie.Delete([]byte(val.k))
- }
- trie.Hash()
-
- inserted = trie.tracer.insertList()
- if len(inserted) != 0 {
- t.Fatalf("Unexpected inserted node tracked %d", len(inserted))
- }
- deleted = trie.tracer.deleteList()
- if len(deleted) != len(seen) {
- t.Fatalf("Unexpected deleted node tracked want %d got %d", len(seen), len(deleted))
- }
- for _, k := range deleted {
- _, ok := seen[string(k)]
- if !ok {
- t.Fatalf("Unexpected inserted node")
- }
- }
-}
-
-func TestTrieTracerNoop(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- trie.tracer = newTracer()
-
- // Insert a batch of entries, all the nodes should be marked as inserted
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- for _, val := range vals {
- trie.Update([]byte(val.k), []byte(val.v))
- }
- for _, val := range vals {
- trie.Delete([]byte(val.k))
- }
- if len(trie.tracer.insertList()) != 0 {
- t.Fatalf("Unexpected inserted node tracked %d", len(trie.tracer.insertList()))
- }
- if len(trie.tracer.deleteList()) != 0 {
- t.Fatalf("Unexpected deleted node tracked %d", len(trie.tracer.deleteList()))
- }
-}
-func TestTrieTracePrevValue(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- trie := NewEmpty(db)
- trie.tracer = newTracer()
-
- paths, blobs := trie.tracer.prevList()
- if len(paths) != 0 || len(blobs) != 0 {
- t.Fatalf("Nothing should be tracked")
- }
- // Insert a batch of entries, all the nodes should be marked as inserted
- vals := []struct{ k, v string }{
- {"do", "verb"},
- {"ether", "wookiedoo"},
- {"horse", "stallion"},
- {"shaman", "horse"},
- {"doge", "coin"},
- {"dog", "puppy"},
- {"somethingveryoddindeedthis is", "myothernodedata"},
- }
- for _, val := range vals {
- trie.Update([]byte(val.k), []byte(val.v))
- }
- paths, blobs = trie.tracer.prevList()
- if len(paths) != 0 || len(blobs) != 0 {
- t.Fatalf("Nothing should be tracked")
- }
-
- // Commit the changes and re-create with new root
- root, nodes, _ := trie.Commit(false)
- if err := db.Update(NewWithNodeSet(nodes)); err != nil {
- t.Fatal(err)
- }
- trie, _ = New(TrieID(root), db)
- trie.tracer = newTracer()
- trie.resolveAndTrack(root.Bytes(), nil)
-
- // Load all nodes in trie
- for _, val := range vals {
- trie.TryGet([]byte(val.k))
- }
-
- // Ensure all nodes are tracked by tracer with correct prev-values
- iter := trie.NodeIterator(nil)
- seen := make(map[string][]byte)
- for iter.Next(true) {
- // Embedded nodes are ignored since they are not present in
- // database.
- if iter.Hash() == (common.Hash{}) {
- continue
- }
- blob := iter.NodeBlob()
- seen[string(iter.Path())] = common.CopyBytes(blob)
- }
-
- paths, blobs = trie.tracer.prevList()
- if len(paths) != len(seen) || len(blobs) != len(seen) {
- t.Fatalf("Unexpected tracked values")
- }
- for i, path := range paths {
- blob := blobs[i]
- prev, ok := seen[string(path)]
- if !ok {
- t.Fatalf("Missing node %v", path)
- }
- if !bytes.Equal(blob, prev) {
- t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob)
- }
- }
-
- // Re-open the trie and iterate the trie, ensure nothing will be tracked.
- // Iterator will not link any loaded nodes to trie.
- trie, _ = New(TrieID(root), db)
- trie.tracer = newTracer()
-
- iter = trie.NodeIterator(nil)
- for iter.Next(true) {
- }
- paths, blobs = trie.tracer.prevList()
- if len(paths) != 0 || len(blobs) != 0 {
- t.Fatalf("Nothing should be tracked")
- }
-
- // Re-open the trie and generate proof for entries, ensure nothing will
- // be tracked. Prover will not link any loaded nodes to trie.
- trie, _ = New(TrieID(root), db)
- trie.tracer = newTracer()
- for _, val := range vals {
- trie.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase())
- }
- paths, blobs = trie.tracer.prevList()
- if len(paths) != 0 || len(blobs) != 0 {
- t.Fatalf("Nothing should be tracked")
- }
-
- // Delete entries from trie, ensure all previous values are correct.
- trie, _ = New(TrieID(root), db)
- trie.tracer = newTracer()
- trie.resolveAndTrack(root.Bytes(), nil)
-
- for _, val := range vals {
- trie.TryDelete([]byte(val.k))
- }
- paths, blobs = trie.tracer.prevList()
- if len(paths) != len(seen) || len(blobs) != len(seen) {
- t.Fatalf("Unexpected tracked values")
- }
- for i, path := range paths {
- blob := blobs[i]
- prev, ok := seen[string(path)]
- if !ok {
- t.Fatalf("Missing node %v", path)
- }
- if !bytes.Equal(blob, prev) {
- t.Fatalf("Unexpected value path: %v, want: %v, got: %v", path, prev, blob)
- }
- }
-}
From 0ded46b03731e77c19a02b9e35c7c38a171e4157 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 27 Sep 2024 12:57:27 +0700
Subject: [PATCH 17/41] eth/protocols/snap: fix batch writer when resuming an
aborted sync (#27842) (#587)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Péter Szilágyi
---
eth/protocols/snap/sync.go | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index d04358ebf3..c4b0a25dda 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -714,6 +714,8 @@ func (s *Syncer) loadSyncStatus() {
}
s.tasks = progress.Tasks
for _, task := range s.tasks {
+ task := task // closure for task.genBatch in the stacktrie writer callback
+
task.genBatch = ethdb.HookedBatch{
Batch: s.db.NewBatch(),
OnPut: func(key []byte, value []byte) {
@@ -726,6 +728,8 @@ func (s *Syncer) loadSyncStatus() {
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
+ subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
+
subtask.genBatch = ethdb.HookedBatch{
Batch: s.db.NewBatch(),
OnPut: func(key []byte, value []byte) {
From 56eb323cb50c0aa26c40bbcb71121a6dfce63a4e Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Mon, 30 Sep 2024 17:43:05 +0700
Subject: [PATCH 18/41] trie: rework trie database (#585)
---
trie/committer.go | 46 +++---
trie/database.go | 366 +++++++++++---------------------------------
trie/iterator.go | 16 +-
trie/node.go | 12 ++
trie/nodeset.go | 28 +---
trie/proof.go | 7 +-
trie/trie.go | 4 +-
trie/trie_reader.go | 37 +----
8 files changed, 146 insertions(+), 370 deletions(-)
diff --git a/trie/committer.go b/trie/committer.go
index 584288e625..b19316631f 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
@@ -182,13 +183,12 @@ func (c *committer) store(path []byte, n node) node {
// We have the hash already, estimate the RLP encoding-size of the node.
// The size is used for mem tracking, does not need to be exact
var (
- size = estimateSize(n)
- nhash = common.BytesToHash(hash)
- node = &nodeWithPrev{
+ nhash = common.BytesToHash(hash)
+ blob, _ = rlp.EncodeToBytes(n)
+ node = &nodeWithPrev{
&memoryNode{
nhash,
- uint16(size),
- simplifyNode(n),
+ blob,
},
c.tracer.accessList[string(path)],
}
@@ -209,31 +209,29 @@ func (c *committer) store(path []byte, n node) node {
return hash
}
-// estimateSize estimates the size of an rlp-encoded node, without actually
-// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie
-// with 1000 leafs, the only errors above 1% are on small shortnodes, where this
-// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35)
-func estimateSize(n node) int {
+// mptResolver the children resolver in merkle-patricia-tree.
+type mptResolver struct{}
+
+// ForEach implements childResolver, decodes the provided node and
+// traverses the children inside.
+func (resolver mptResolver) forEach(node []byte, onChild func(common.Hash)) {
+ forGatherChildren(mustDecodeNode(nil, node), onChild)
+}
+
+// forGatherChildren traverses the node hierarchy and invokes the callback
+// for all the hashnode children.
+func forGatherChildren(n node, onChild func(hash common.Hash)) {
switch n := n.(type) {
case *shortNode:
- // A short node contains a compacted key, and a value.
- return 3 + len(n.Key) + estimateSize(n.Val)
+ forGatherChildren(n.Val, onChild)
case *fullNode:
- // A full node contains up to 16 hashes (some nils), and a key
- s := 3
for i := 0; i < 16; i++ {
- if child := n.Children[i]; child != nil {
- s += estimateSize(child)
- } else {
- s++
- }
+ forGatherChildren(n.Children[i], onChild)
}
- return s
- case valueNode:
- return 1 + len(n)
case hashNode:
- return 1 + len(n)
+ onChild(common.BytesToHash(n))
+ case valueNode, nil:
default:
- panic(fmt.Sprintf("node type %T", n))
+ panic(fmt.Sprintf("unknown node type: %T", n))
}
}
diff --git a/trie/database.go b/trie/database.go
index 6b1bc7a62d..bd9d97d50b 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -18,8 +18,6 @@ package trie
import (
"errors"
- "fmt"
- "io"
"reflect"
"runtime"
"sync"
@@ -59,6 +57,12 @@ var (
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
)
+// childResolver defines the required method to decode the provided
+// trie node and iterate the children on top.
+type childResolver interface {
+ forEach(node []byte, onChild func(common.Hash))
+}
+
// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
@@ -68,7 +72,8 @@ var (
// behind this split design is to provide read access to RPC handlers and sync
// servers even while the trie is executing expensive garbage collection.
type Database struct {
- diskdb ethdb.Database // Persistent storage for matured trie nodes
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
+ resolver childResolver // Resolver for trie node children
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
@@ -89,62 +94,14 @@ type Database struct {
lock sync.RWMutex
}
-// rawNode is a simple binary blob used to differentiate between collapsed trie
-// nodes and already encoded RLP binary blobs (while at the same time store them
-// in the same cache fields).
-type rawNode []byte
-
-func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
-func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
-
-func (n rawNode) EncodeRLP(w io.Writer) error {
- _, err := w.Write(n)
- return err
-}
-
-// rawFullNode represents only the useful data content of a full node, with the
-// caches and flags stripped out to minimize its data storage. This type honors
-// the same RLP encoding as the original parent.
-type rawFullNode [17]node
-
-func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
-func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
-
-func (n rawFullNode) EncodeRLP(w io.Writer) error {
- var nodes [17]node
-
- for i, child := range n {
- if child != nil {
- nodes[i] = child
- } else {
- nodes[i] = nilValueNode
- }
- }
- return rlp.Encode(w, nodes)
-}
-
-// rawShortNode represents only the useful data content of a short node, with the
-// caches and flags stripped out to minimize its data storage. This type honors
-// the same RLP encoding as the original parent.
-type rawShortNode struct {
- Key []byte
- Val node
-}
-
-func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
-func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
-
// cachedNode is all the information we know about a single cached trie node
// in the memory database write layer.
type cachedNode struct {
- node node // Cached collapsed trie node, or raw rlp data
- size uint16 // Byte size of the useful cached data
-
- parents uint32 // Number of live nodes referencing this one
- children map[common.Hash]uint16 // External children referenced by this node
-
- flushPrev common.Hash // Previous node in the flush-list
- flushNext common.Hash // Next node in the flush-list
+ node []byte // Encoded node blob
+ parents uint32 // Number of live nodes referencing this one
+ external map[common.Hash]struct{} // The set of external children
+ flushPrev common.Hash // Previous node in the flush-list
+ flushNext common.Hash // Next node in the flush-list
}
// cachedNodeSize is the raw size of a cachedNode data structure without any
@@ -152,122 +109,14 @@ type cachedNode struct {
// than not counting them.
var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
-// cachedNodeChildrenSize is the raw size of an initialized but empty external
-// reference map.
-const cachedNodeChildrenSize = 48
-
-// rlp returns the raw rlp encoded blob of the cached trie node, either directly
-// from the cache, or by regenerating it from the collapsed node.
-func (n *cachedNode) rlp() []byte {
- if node, ok := n.node.(rawNode); ok {
- return node
- }
- blob, err := rlp.EncodeToBytes(n.node)
- if err != nil {
- panic(err)
- }
- return blob
-}
-
-// obj returns the decoded and expanded trie node, either directly from the cache,
-// or by regenerating it from the rlp encoded blob.
-func (n *cachedNode) obj(hash common.Hash) node {
- if node, ok := n.node.(rawNode); ok {
- return mustDecodeNode(hash[:], node)
- }
- return expandNode(hash[:], n.node)
-}
-
-// forChilds invokes the callback for all the tracked children of this node,
+// forChildren invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
// from outside the node.
-func (n *cachedNode) forChilds(onChild func(hash common.Hash)) {
- for child := range n.children {
+func (n *cachedNode) forChildren(resolver childResolver, onChild func(hash common.Hash)) {
+ for child := range n.external {
onChild(child)
}
- if _, ok := n.node.(rawNode); !ok {
- forGatherChildren(n.node, onChild)
- }
-}
-
-// forGatherChildren traverses the node hierarchy of a collapsed storage node and
-// invokes the callback for all the hashnode children.
-func forGatherChildren(n node, onChild func(hash common.Hash)) {
- switch n := n.(type) {
- case *rawShortNode:
- forGatherChildren(n.Val, onChild)
- case rawFullNode:
- for i := 0; i < 16; i++ {
- forGatherChildren(n[i], onChild)
- }
- case hashNode:
- onChild(common.BytesToHash(n))
- case valueNode, nil, rawNode:
- default:
- panic(fmt.Sprintf("unknown node type: %T", n))
- }
-}
-
-// simplifyNode traverses the hierarchy of an expanded memory node and discards
-// all the internal caches, returning a node that only contains the raw data.
-func simplifyNode(n node) node {
- switch n := n.(type) {
- case *shortNode:
- // Short nodes discard the flags and cascade
- return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)}
-
- case *fullNode:
- // Full nodes discard the flags and cascade
- node := rawFullNode(n.Children)
- for i := 0; i < len(node); i++ {
- if node[i] != nil {
- node[i] = simplifyNode(node[i])
- }
- }
- return node
-
- case valueNode, hashNode, rawNode:
- return n
-
- default:
- panic(fmt.Sprintf("unknown node type: %T", n))
- }
-}
-
-// expandNode traverses the node hierarchy of a collapsed storage node and converts
-// all fields and keys into expanded memory form.
-func expandNode(hash hashNode, n node) node {
- switch n := n.(type) {
- case *rawShortNode:
- // Short nodes need key and child expansion
- return &shortNode{
- Key: compactToHex(n.Key),
- Val: expandNode(nil, n.Val),
- flags: nodeFlag{
- hash: hash,
- },
- }
-
- case rawFullNode:
- // Full nodes need child expansion
- node := &fullNode{
- flags: nodeFlag{
- hash: hash,
- },
- }
- for i := 0; i < len(node.Children); i++ {
- if n[i] != nil {
- node.Children[i] = expandNode(nil, n[i])
- }
- }
- return node
-
- case valueNode, hashNode:
- return n
-
- default:
- panic(fmt.Sprintf("unknown node type: %T", n))
- }
+ resolver.forEach(n.node, onChild)
}
// Config defines all necessary options for database.
@@ -303,11 +152,10 @@ func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
}
db := &Database{
- diskdb: diskdb,
- cleans: cleans,
- dirties: map[common.Hash]*cachedNode{{}: {
- children: make(map[common.Hash]uint16),
- }},
+ diskdb: diskdb,
+ resolver: mptResolver{},
+ cleans: cleans,
+ dirties: make(map[common.Hash]*cachedNode),
preimages: preimage,
}
return db
@@ -322,20 +170,19 @@ func (db *Database) DiskDB() ethdb.KeyValueStore {
// The blob size must be specified to allow proper size tracking.
// All nodes inserted by this function will be reference tracked
// and in theory should only used for **trie nodes** insertion.
-func (db *Database) insert(hash common.Hash, size int, node node) {
+func (db *Database) insert(hash common.Hash, node []byte) {
// If the node's already cached, skip
if _, ok := db.dirties[hash]; ok {
return
}
- memcacheDirtyWriteMeter.Mark(int64(size))
+ memcacheDirtyWriteMeter.Mark(int64(len(node)))
// Create the cached entry for this node
entry := &cachedNode{
node: node,
- size: uint16(size),
flushPrev: db.newest,
}
- entry.forChilds(func(child common.Hash) {
+ entry.forChildren(db.resolver, func(child common.Hash) {
if c := db.dirties[child]; c != nil {
c.parents++
}
@@ -348,43 +195,7 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
} else {
db.dirties[db.newest].flushNext, db.newest = hash, hash
}
- db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
-}
-
-// node retrieves a cached trie node from memory, or returns nil if none can be
-// found in the memory cache.
-func (db *Database) node(hash common.Hash) node {
- // Retrieve the node from the clean cache if available
- if db.cleans != nil {
- if enc := db.cleans.Get(nil, hash[:]); enc != nil {
- memcacheCleanHitMeter.Mark(1)
- memcacheCleanReadMeter.Mark(int64(len(enc)))
- return mustDecodeNode(hash[:], enc)
- }
- }
- // Retrieve the node from the dirty cache if available
- db.lock.RLock()
- dirty := db.dirties[hash]
- db.lock.RUnlock()
-
- if dirty != nil {
- memcacheDirtyHitMeter.Mark(1)
- memcacheDirtyReadMeter.Mark(int64(dirty.size))
- return dirty.obj(hash)
- }
- memcacheDirtyMissMeter.Mark(1)
-
- // Content unavailable in memory, attempt to retrieve from disk
- enc, err := db.diskdb.Get(hash[:])
- if err != nil || enc == nil {
- return nil
- }
- if db.cleans != nil {
- db.cleans.Set(hash[:], enc)
- memcacheCleanMissMeter.Mark(1)
- memcacheCleanWriteMeter.Mark(int64(len(enc)))
- }
- return mustDecodeNode(hash[:], enc)
+ db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
}
// Node retrieves an encoded cached trie node from memory. If it cannot be found
@@ -409,8 +220,8 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
if dirty != nil {
memcacheDirtyHitMeter.Mark(1)
- memcacheDirtyReadMeter.Mark(int64(dirty.size))
- return dirty.rlp(), nil
+ memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
+ return dirty.node, nil
}
memcacheDirtyMissMeter.Mark(1)
@@ -436,9 +247,7 @@ func (db *Database) Nodes() []common.Hash {
var hashes = make([]common.Hash, 0, len(db.dirties))
for hash := range db.dirties {
- if hash != (common.Hash{}) { // Special case for "root" references/nodes
- hashes = append(hashes, hash)
- }
+ hashes = append(hashes, hash)
}
return hashes
}
@@ -461,18 +270,22 @@ func (db *Database) reference(child common.Hash, parent common.Hash) {
if !ok {
return
}
- // If the reference already exists, only duplicate for roots
- if db.dirties[parent].children == nil {
- db.dirties[parent].children = make(map[common.Hash]uint16)
- db.childrenSize += cachedNodeChildrenSize
- } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) {
+ // The reference is for state root, increase the reference counter.
+ if parent == (common.Hash{}) {
+ node.parents += 1
return
}
- node.parents++
- db.dirties[parent].children[child]++
- if db.dirties[parent].children[child] == 1 {
- db.childrenSize += common.HashLength + 2 // uint16 counter
+ // The reference is for external storage trie, don't duplicate if
+ // the reference is already existent.
+ if db.dirties[parent].external == nil {
+ db.dirties[parent].external = make(map[common.Hash]struct{})
+ }
+ if _, ok := db.dirties[parent].external[child]; ok {
+ return
}
+ node.parents++
+ db.dirties[parent].external[child] = struct{}{}
+ db.childrenSize += common.HashLength
}
// Dereference removes an existing reference from a root node.
@@ -486,7 +299,7 @@ func (db *Database) Dereference(root common.Hash) {
defer db.lock.Unlock()
nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
- db.dereference(root, common.Hash{})
+ db.dereference(root)
db.gcnodes += uint64(nodes - len(db.dirties))
db.gcsize += storage - db.dirtiesSize
@@ -501,23 +314,13 @@ func (db *Database) Dereference(root common.Hash) {
}
// dereference is the private locked version of Dereference.
-func (db *Database) dereference(child common.Hash, parent common.Hash) {
- // Dereference the parent-child
- node := db.dirties[parent]
-
- if node.children != nil && node.children[child] > 0 {
- node.children[child]--
- if node.children[child] == 0 {
- delete(node.children, child)
- db.childrenSize -= (common.HashLength + 2) // uint16 counter
- }
- }
- // If the child does not exist, it's a previously committed node.
- node, ok := db.dirties[child]
+func (db *Database) dereference(hash common.Hash) {
+ // If the hash does not exist, it's a previously committed node.
+ node, ok := db.dirties[hash]
if !ok {
return
}
- // If there are no more references to the child, delete it and cascade
+ // If there are no more references to the node, delete it and cascade
if node.parents > 0 {
// This is a special cornercase where a node loaded from disk (i.e. not in the
// memcache any more) gets reinjected as a new node (short node split into full,
@@ -527,25 +330,29 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) {
}
if node.parents == 0 {
// Remove the node from the flush-list
- switch child {
+ switch hash {
case db.oldest:
db.oldest = node.flushNext
- db.dirties[node.flushNext].flushPrev = common.Hash{}
+ if node.flushNext != (common.Hash{}) {
+ db.dirties[node.flushNext].flushPrev = common.Hash{}
+ }
case db.newest:
db.newest = node.flushPrev
- db.dirties[node.flushPrev].flushNext = common.Hash{}
+ if node.flushPrev != (common.Hash{}) {
+ db.dirties[node.flushPrev].flushNext = common.Hash{}
+ }
default:
db.dirties[node.flushPrev].flushNext = node.flushNext
db.dirties[node.flushNext].flushPrev = node.flushPrev
}
// Dereference all children and delete the node
- node.forChilds(func(hash common.Hash) {
- db.dereference(hash, child)
+ node.forChildren(db.resolver, func(child common.Hash) {
+ db.dereference(child)
})
- delete(db.dirties, child)
- db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
- if node.children != nil {
- db.childrenSize -= cachedNodeChildrenSize
+ delete(db.dirties, hash)
+ db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
+ if node.external != nil {
+ db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
}
}
}
@@ -566,8 +373,8 @@ func (db *Database) Cap(limit common.StorageSize) error {
// db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be
// counted.
- size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
- size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
+ size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
+ size += db.childrenSize
// If the preimage cache got large enough, push to disk. If it's still small
// leave for later to deduplicate writes.
@@ -580,7 +387,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
for size > limit && oldest != (common.Hash{}) {
// Fetch the oldest referenced node and push into the batch
node := db.dirties[oldest]
- rawdb.WriteLegacyTrieNode(batch, oldest, node.rlp())
+ rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
// If we exceeded the ideal batch size, commit and reset
if batch.ValueSize() >= ethdb.IdealBatchSize {
@@ -593,9 +400,9 @@ func (db *Database) Cap(limit common.StorageSize) error {
// Iterate to the next flush item, or abort if the size cap was achieved. Size
// is the total size, including the useful cached data (hash -> blob), the
// cache item metadata, as well as external children mappings.
- size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize)
- if node.children != nil {
- size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
+ size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
+ if node.external != nil {
+ size -= common.StorageSize(len(node.external) * common.HashLength)
}
oldest = node.flushNext
}
@@ -613,9 +420,9 @@ func (db *Database) Cap(limit common.StorageSize) error {
delete(db.dirties, db.oldest)
db.oldest = node.flushNext
- db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
- if node.children != nil {
- db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
+ db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
+ if node.external != nil {
+ db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
}
}
if db.oldest != (common.Hash{}) {
@@ -701,7 +508,9 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
return nil
}
var err error
- node.forChilds(func(child common.Hash) {
+
+ // Dereference all children and delete the node
+ node.forChildren(db.resolver, func(child common.Hash) {
if err == nil {
err = db.commit(child, batch, uncacher, callback)
}
@@ -710,7 +519,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
return err
}
// If we've reached an optimal batch size, commit and start over
- rawdb.WriteLegacyTrieNode(batch, hash, node.rlp())
+ rawdb.WriteLegacyTrieNode(batch, hash, node.node)
if callback != nil {
callback(hash)
}
@@ -749,19 +558,23 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
switch hash {
case c.db.oldest:
c.db.oldest = node.flushNext
- c.db.dirties[node.flushNext].flushPrev = common.Hash{}
+ if node.flushNext != (common.Hash{}) {
+ c.db.dirties[node.flushNext].flushPrev = common.Hash{}
+ }
case c.db.newest:
c.db.newest = node.flushPrev
- c.db.dirties[node.flushPrev].flushNext = common.Hash{}
+ if node.flushPrev != (common.Hash{}) {
+ c.db.dirties[node.flushPrev].flushNext = common.Hash{}
+ }
default:
c.db.dirties[node.flushPrev].flushNext = node.flushNext
c.db.dirties[node.flushNext].flushPrev = node.flushPrev
}
// Remove the node from the dirty cache
delete(c.db.dirties, hash)
- c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
- if node.children != nil {
- c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
+ c.db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
+ if node.external != nil {
+ c.db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
}
// Move the flushed node into the clean cache to prevent insta-reloads
if c.db.cleans != nil {
@@ -802,7 +615,7 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
if n.isDeleted() {
return // ignore deletion
}
- db.insert(n.hash, int(n.size), n.node)
+ db.insert(n.hash, n.node)
})
}
// Link up the account trie and storage trie if the node points
@@ -831,13 +644,12 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
// db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be
// counted.
- var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize)
- var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2))
+ var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
var preimageSize common.StorageSize
if db.preimages != nil {
preimageSize = db.preimages.size()
}
- return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize
+ return db.dirtiesSize + db.childrenSize + metadataSize, preimageSize
}
// GetReader retrieves a node reader belonging to the given state root.
@@ -855,15 +667,9 @@ func newHashReader(db *Database) *hashReader {
return &hashReader{db: db}
}
-// Node retrieves the trie node with the given node hash.
-// No error will be returned if the node is not found.
-func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) (node, error) {
- return reader.db.node(hash), nil
-}
-
-// NodeBlob retrieves the RLP-encoded trie node blob with the given node hash.
+// Node retrieves the RLP-encoded trie node blob with the given node hash.
// No error will be returned if the node is not found.
-func (reader *hashReader) NodeBlob(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
+func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
blob, _ := reader.db.Node(hash)
return blob, nil
}
diff --git a/trie/iterator.go b/trie/iterator.go
index 20c4d44fb6..5bd69af3f0 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -377,12 +377,14 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
}
}
}
- // Retrieve the specified node from the underlying node reader.
- // it.trie.resolveAndTrack is not used since in that function the
- // loaded blob will be tracked, while it's not required here since
- // all loaded nodes won't be linked to trie at all and track nodes
- // may lead to out-of-memory issue.
- return it.trie.reader.node(path, common.BytesToHash(hash))
+ blob, err := it.trie.reader.node(path, common.BytesToHash(hash))
+ if err != nil {
+ return nil, err
+ }
+ // The raw-blob format nodes are loaded either from the
+ // clean cache or the database, they are all in their own
+ // copy and safe to use unsafe decoder.
+ return mustDecodeNode(hash, blob), nil
}
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
@@ -396,7 +398,7 @@ func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error)
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
- return it.trie.reader.nodeBlob(path, common.BytesToHash(hash))
+ return it.trie.reader.node(path, common.BytesToHash(hash))
}
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
diff --git a/trie/node.go b/trie/node.go
index f4055e779a..07a6595b03 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -105,6 +105,18 @@ func (n valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", []byte(n))
}
+// rawNode is a simple binary blob used to differentiate between collapsed trie
+// nodes and already encoded RLP binary blobs (while at the same time store them
+// in the same cache fields).
+type rawNode []byte
+
+func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") }
+func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
+func (n rawNode) EncodeRLP(w io.Writer) error {
+ _, err := w.Write(n)
+ return err
+}
+
func mustDecodeNode(hash, buf []byte) node {
n, err := decodeNode(hash, buf)
if err != nil {
diff --git a/trie/nodeset.go b/trie/nodeset.go
index 6b99dbebc6..9288033548 100644
--- a/trie/nodeset.go
+++ b/trie/nodeset.go
@@ -18,33 +18,23 @@ package trie
import (
"fmt"
- "reflect"
"sort"
"strings"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
// memoryNode is all the information we know about a single cached trie node
// in the memory.
type memoryNode struct {
hash common.Hash // Node hash, computed by hashing rlp value, empty for deleted nodes
- size uint16 // Byte size of the useful cached data, 0 for deleted nodes
- node node // Cached collapsed trie node, or raw rlp data, nil for deleted nodes
+ node []byte // Encoded node blob, nil for deleted nodes
}
-// memoryNodeSize is the raw size of a memoryNode data structure without any
-// node data included. It's an approximate size, but should be a lot better
-// than not counting them.
-// nolint:unused
-var memoryNodeSize = int(reflect.TypeOf(memoryNode{}).Size())
-
// memorySize returns the total memory size used by this node.
// nolint:unused
func (n *memoryNode) memorySize(pathlen int) int {
- return int(n.size) + memoryNodeSize + pathlen
+ return len(n.node) + common.HashLength + pathlen
}
// isDeleted returns the indicator if the node is marked as deleted.
@@ -56,24 +46,14 @@ func (n *memoryNode) isDeleted() bool {
// from the cache, or by regenerating it from the collapsed node.
// nolint:unused
func (n *memoryNode) rlp() []byte {
- if node, ok := n.node.(rawNode); ok {
- return node
- }
- enc, err := rlp.EncodeToBytes(n.node)
- if err != nil {
- log.Error("Failed to encode trie node", "err", err)
- }
- return enc
+ return n.node
}
// obj returns the decoded and expanded trie node, either directly from the cache,
// or by regenerating it from the rlp encoded blob.
// nolint:unused
func (n *memoryNode) obj() node {
- if node, ok := n.node.(rawNode); ok {
- return mustDecodeNode(n.hash[:], node)
- }
- return expandNode(n.hash[:], n.node)
+ return mustDecodeNode(n.hash[:], n.node)
}
// nodeWithPrev wraps the memoryNode with the previous node value.
diff --git a/trie/proof.go b/trie/proof.go
index 29c6aa2c54..c8179eeeb4 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -65,12 +65,15 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue
- var err error
- tn, err = t.reader.node(prefix, common.BytesToHash(n))
+ blob, err := t.reader.node(prefix, common.BytesToHash(n))
if err != nil {
log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
return err
}
+ // The raw-blob format nodes are loaded either from the
+ // clean cache or the database, they are all in their own
+ // copy and safe to use unsafe decoder.
+ tn = mustDecodeNode(n, blob)
default:
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
}
diff --git a/trie/trie.go b/trie/trie.go
index 596cc31d1a..bbfb0b662f 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -193,7 +193,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new
if hash == nil {
return nil, origNode, 0, errors.New("non-consensus node")
}
- blob, err := t.reader.nodeBlob(path, common.BytesToHash(hash))
+ blob, err := t.reader.node(path, common.BytesToHash(hash))
return blob, origNode, 1, err
}
@@ -560,7 +560,7 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
// node's original value. The rlp-encoded blob is preferred to be loaded from
// database because it's easy to decode node while complex to encode node to blob.
func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
- blob, err := t.reader.nodeBlob(prefix, common.BytesToHash(n))
+ blob, err := t.reader.node(prefix, common.BytesToHash(n))
if err != nil {
return nil, err
}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 14186159b7..1f3a2b8982 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -24,15 +24,10 @@ import (
// Reader wraps the Node and NodeBlob method of a backing trie store.
type Reader interface {
- // Node retrieves the trie node with the provided trie identifier, hexary
- // node path and the corresponding node hash.
- // No error will be returned if the node is not found.
- Node(owner common.Hash, path []byte, hash common.Hash) (node, error)
-
- // NodeBlob retrieves the RLP-encoded trie node blob with the provided trie
- // identifier, hexary node path and the corresponding node hash.
- // No error will be returned if the node is not found.
- NodeBlob(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
+ // Node retrieves the RLP-encoded trie node blob with the provided trie
+ // identifier, node path and the corresponding node hash. No error will
+ // be returned if the node is not found.
+ Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
// NodeReader wraps all the necessary functions for accessing trie node.
@@ -65,30 +60,10 @@ func newEmptyReader() *trieReader {
return &trieReader{}
}
-// node retrieves the trie node with the provided trie node information.
-// An MissingNodeError will be returned in case the node is not found or
-// any error is encountered.
-func (r *trieReader) node(path []byte, hash common.Hash) (node, error) {
- // Perform the logics in tests for preventing trie node access.
- if r.banned != nil {
- if _, ok := r.banned[string(path)]; ok {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
- }
- }
- if r.reader == nil {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
- }
- node, err := r.reader.Node(r.owner, path, hash)
- if err != nil || node == nil {
- return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
- }
- return node, nil
-}
-
// node retrieves the rlp-encoded trie node with the provided trie node
// information. An MissingNodeError will be returned in case the node is
// not found or any error is encountered.
-func (r *trieReader) nodeBlob(path []byte, hash common.Hash) ([]byte, error) {
+func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
// Perform the logics in tests for preventing trie node access.
if r.banned != nil {
if _, ok := r.banned[string(path)]; ok {
@@ -98,7 +73,7 @@ func (r *trieReader) nodeBlob(path []byte, hash common.Hash) ([]byte, error) {
if r.reader == nil {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path}
}
- blob, err := r.reader.NodeBlob(r.owner, path, hash)
+ blob, err := r.reader.Node(r.owner, path, hash)
if err != nil || len(blob) == 0 {
return nil, &MissingNodeError{Owner: r.owner, NodeHash: hash, Path: path, err: err}
}
From 97ccc2ea90c0ca9e8356b812260ae69c852d8585 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Tue, 1 Oct 2024 15:58:45 +0700
Subject: [PATCH 19/41] trie: add trie db wrapper; refactor trienode (#588)
* trie: add wrapper for database
* trie: refactor trie node
* all: fix test
* rawdb, trie: fix comment
trie: change name WithPrev => NodeWithPrev
rawdb: add schema_test
---
core/blockchain.go | 8 +-
core/blockchain_repair_test.go | 6 +-
core/blockchain_sethead_test.go | 2 +-
core/blockchain_snapshot_test.go | 2 +-
core/blockchain_test.go | 2 +-
core/chain_makers.go | 2 +-
core/dao_test.go | 8 +-
core/genesis.go | 2 +-
core/rawdb/schema.go | 48 +++-
core/rawdb/schema_test.go | 227 +++++++++++++++++++
core/state/database.go | 3 +-
core/state/iterator_test.go | 2 +-
core/state/snapshot/generate.go | 6 +-
core/state/snapshot/generate_test.go | 12 +-
core/state/state_object.go | 4 +-
core/state/statedb.go | 5 +-
core/state/statedb_test.go | 6 +-
core/state/sync_test.go | 14 +-
eth/protocols/snap/sync_test.go | 19 +-
light/postprocess.go | 16 +-
light/trie.go | 3 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 6 +-
tests/fuzzers/trie/trie-fuzzer.go | 15 +-
trie/committer.go | 32 +--
trie/database_test.go | 21 +-
trie/database_wrap.go | 287 ++++++++++++++++++++++++
trie/iterator_test.go | 243 +++++++++++++-------
trie/nodeset.go | 198 -----------------
trie/secure_trie.go | 3 +-
trie/sync_test.go | 293 +++++++++++++++++++------
trie/tracer.go | 9 +-
trie/tracer_test.go | 25 ++-
trie/trie.go | 5 +-
trie/trie_reader.go | 6 +-
trie/trie_test.go | 164 +++++++-------
trie/{ => triedb/hashdb}/database.go | 193 ++++++----------
trie/trienode/node.go | 195 ++++++++++++++++
37 files changed, 1427 insertions(+), 665 deletions(-)
create mode 100644 core/rawdb/schema_test.go
create mode 100644 trie/database_wrap.go
delete mode 100644 trie/nodeset.go
rename trie/{ => triedb/hashdb}/database.go (81%)
create mode 100644 trie/trienode/node.go
diff --git a/core/blockchain.go b/core/blockchain.go
index cbc69ac6f0..588ab667f9 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -1019,14 +1019,14 @@ func (bc *BlockChain) Stop() {
recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
- if err := triedb.Commit(recent.Root(), true, nil); err != nil {
+ if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
}
}
}
if snapBase != (common.Hash{}) {
log.Info("Writing snapshot state to disk", "root", snapBase)
- if err := triedb.Commit(snapBase, true, nil); err != nil {
+ if err := triedb.Commit(snapBase, true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
}
}
@@ -1583,7 +1583,7 @@ func (bc *BlockChain) writeBlockWithState(
// If we're running an archive node, always flush
if bc.cacheConfig.TrieDirtyDisabled {
- if err := bc.triedb.Commit(root, false, nil); err != nil {
+ if err := bc.triedb.Commit(root, false); err != nil {
return NonStatTy, err
}
} else {
@@ -1623,7 +1623,7 @@ func (bc *BlockChain) writeBlockWithState(
)
}
// Flush an entire trie and restart the counters
- bc.triedb.Commit(header.Root, true, nil)
+ bc.triedb.Commit(header.Root, true)
lastWrite = chosen
bc.gcproc = 0
}
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index 9e100a854a..81503ad132 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1810,7 +1810,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
@@ -1935,7 +1935,7 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[:1], nil); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(blocks[0].Root(), true)
// Insert block B2 and commit the snapshot into disk
if _, err := chain.InsertChain(blocks[1:2], nil); err != nil {
@@ -1949,7 +1949,7 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[2:3], nil); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(blocks[2].Root(), true)
// Insert the remaining blocks
if _, err := chain.InsertChain(blocks[3:], nil); err != nil {
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 11c7693436..3a66101c1c 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -2009,7 +2009,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index e5affaab9b..c4e1229648 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -106,7 +106,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
startPoint = point
if basic.commitBlock > 0 && basic.commitBlock == point {
- chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true)
}
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
// Flushing the entire snap tree into the disk, the
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index bbc3d38f4e..ed4e3a6994 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1560,7 +1560,7 @@ func TestTrieForkGC(t *testing.T) {
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
}
- if len(chain.stateCache.TrieDB().Nodes()) > 0 {
+ if nodes, _ := chain.TrieDB().Size(); nodes > 0 {
t.Fatalf("stale tries still alive after garbase collection")
}
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 5cec7c0214..0554528a96 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -323,7 +323,7 @@ func generateChain(
panic(fmt.Sprintf("state write error: %v", err))
}
if flushDisk {
- if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
+ if err := statedb.Database().TrieDB().Commit(root, false); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
}
diff --git a/core/dao_test.go b/core/dao_test.go
index adf3464bd3..2fa5b4e26c 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -94,7 +94,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -119,7 +119,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -145,7 +145,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -165,7 +165,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
diff --git a/core/genesis.go b/core/genesis.go
index b3f6e1b7fc..5b248c5bbf 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -123,7 +123,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error {
}
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
- if err := triedb.Commit(root, true, nil); err != nil {
+ if err := triedb.Commit(root, true); err != nil {
return err
}
}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index ae9cbcff96..439e47df9f 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -22,6 +22,7 @@ import (
"encoding/binary"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
)
@@ -103,7 +104,7 @@ var (
internalTxsPrefix = []byte("itxs") // internalTxsPrefix + block hash -> internal transactions
dirtyAccountsKey = []byte("dacc") // dirtyAccountsPrefix + block hash -> dirty accounts
- // Path-based trie node scheme.
+ // Path-based storage scheme of merkle patricia trie.
trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
@@ -250,3 +251,48 @@ func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
func snapshotConsortiumKey(hash common.Hash) []byte {
return append(snapshotConsortiumPrefix, hash.Bytes()...)
}
+
+// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
+// node. The characteristics of legacy trie node are:
+// - the key length is 32 bytes
+// - the key is the hash of val
+func IsLegacyTrieNode(key []byte, val []byte) bool {
+ if len(key) != common.HashLength {
+ return false
+ }
+ return bytes.Equal(key, crypto.Keccak256(val))
+}
+
+// IsAccountTrieNode reports whether a provided database entry is an account
+// trie node in path-based state scheme.
+func IsAccountTrieNode(key []byte) (bool, []byte) {
+ if !bytes.HasPrefix(key, trieNodeAccountPrefix) {
+ return false, nil
+ }
+ // The remaining key should only consist a hex node path
+ // whose length is in the range 0 to 64 (64 is excluded
+ // since leaves are always wrapped with shortNode).
+ if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 {
+ return false, nil
+ }
+ return true, key[len(trieNodeAccountPrefix):]
+}
+
+// IsStorageTrieNode reports whether a provided database entry is a storage
+// trie node in path-based state scheme.
+func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
+ if !bytes.HasPrefix(key, trieNodeStoragePrefix) {
+ return false, common.Hash{}, nil
+ }
+ // The remaining key consists of 2 parts:
+ // - 32 bytes account hash
+ // - hex node path whose length is in the range 0 to 64
+ if len(key) < len(trieNodeStoragePrefix)+common.HashLength {
+ return false, common.Hash{}, nil
+ }
+ if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
+ return false, common.Hash{}, nil
+ }
+ accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength])
+ return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
+}
diff --git a/core/rawdb/schema_test.go b/core/rawdb/schema_test.go
new file mode 100644
index 0000000000..a1009b3bd2
--- /dev/null
+++ b/core/rawdb/schema_test.go
@@ -0,0 +1,227 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+var (
+ bytes4 = []byte{0x00, 0x01, 0x02, 0x03}
+ bytes20 = []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03}
+ bytes32 = []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}
+ bytes63 = []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e}
+ bytes64 = []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}
+ bytes65 = []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00}
+)
+
+func TestIsLegacyTrieNode(t *testing.T) {
+ tests := []struct {
+ name string
+ inputData []byte
+ inputKey []byte
+ expected bool
+ }{
+ {
+ name: "empty",
+ inputKey: []byte{},
+ expected: false,
+ },
+ {
+ name: "non-legacy (too short)",
+ inputKey: []byte{0x00, 0x01, 0x02, 0x03},
+ expected: false,
+ },
+ {
+ name: "legacy",
+ inputData: []byte{0x00, 0x01, 0x02, 0x03},
+ inputKey: crypto.Keccak256([]byte{0x00, 0x01, 0x02, 0x03}),
+ expected: true,
+ },
+ {
+ name: "non-legacy (too long)",
+ inputKey: []byte{0x00, 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00},
+ expected: false,
+ },
+ {
+ name: "non-legacy (key is not hash of data)",
+ inputData: []byte{0x00, 0x01, 0x02, 0x03},
+ inputKey: crypto.Keccak256([]byte{0x00, 0x01, 0x02, 0x04}),
+ expected: false,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ if actual := IsLegacyTrieNode(test.inputKey, test.inputData); actual != test.expected {
+ t.Errorf("expected %v, got %v", test.expected, actual)
+ }
+ })
+ }
+}
+
+func TestIsAccountTrieNode(t *testing.T) {
+ tests := []struct {
+ name string
+ inputKey []byte
+ expectedCheck bool
+ expectedKey []byte
+ }{
+ {
+ name: "empty",
+ inputKey: []byte{},
+ expectedCheck: false,
+ expectedKey: nil,
+ },
+ {
+ name: "non account prefixed",
+ inputKey: bytes4,
+ expectedCheck: false,
+ expectedKey: nil,
+ },
+ {
+ name: "storage prefixed",
+ inputKey: append(trieNodeStoragePrefix, bytes4...),
+ expectedCheck: false,
+ expectedKey: nil,
+ },
+ {
+ name: "account prefixed length 4",
+ inputKey: accountTrieNodeKey(bytes4),
+ expectedCheck: true,
+ expectedKey: bytes4,
+ },
+ {
+ name: "account prefixed length 20",
+ inputKey: accountTrieNodeKey(bytes20),
+ expectedCheck: true,
+ expectedKey: bytes20,
+ },
+ {
+ name: "account prefixed length 63",
+ inputKey: accountTrieNodeKey(bytes63),
+ expectedCheck: true,
+ expectedKey: bytes63,
+ },
+ {
+ name: "account prefixed length 64",
+ inputKey: accountTrieNodeKey(bytes64),
+ expectedCheck: false,
+ expectedKey: nil,
+ },
+ {
+ name: "account prefixed length 65",
+ inputKey: accountTrieNodeKey(bytes65),
+ expectedCheck: false,
+ expectedKey: nil,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ if check, key := IsAccountTrieNode(test.inputKey); check != test.expectedCheck || !bytes.Equal(key, test.expectedKey) {
+ t.Errorf("expected %v, %v, got %v, %v", test.expectedCheck, test.expectedKey, check, key)
+ }
+ })
+ }
+}
+
+func TestIsStorageTrieNode(t *testing.T) {
+ tests := []struct {
+ name string
+ inputKey []byte
+ expectedCheck bool
+ expectedHash common.Hash
+ expectedKey []byte
+ }{
+ {
+ name: "empty",
+ inputKey: []byte{},
+ expectedCheck: false,
+ expectedHash: common.Hash{},
+ expectedKey: nil,
+ },
+ {
+ name: "non storage prefixed",
+ inputKey: []byte{0x00, 0x01, 0x02, 0x03},
+ expectedCheck: false,
+ expectedHash: common.Hash{},
+ expectedKey: nil,
+ },
+ {
+ name: "account prefixed",
+ inputKey: accountTrieNodeKey(bytes4),
+ expectedCheck: false,
+ expectedHash: common.Hash{},
+ expectedKey: nil,
+ },
+ {
+ name: "storage prefixed hash 20 length 4",
+ inputKey: append(append(trieNodeStoragePrefix, bytes20...), bytes4...),
+ expectedCheck: false,
+ expectedHash: common.Hash{},
+ expectedKey: nil,
+ },
+ {
+ name: "storage prefixed hash 32 length 4",
+ inputKey: storageTrieNodeKey(common.BytesToHash(bytes32), bytes4),
+ expectedCheck: true,
+ expectedHash: common.BytesToHash(bytes32),
+ expectedKey: bytes4,
+ },
+ {
+ name: "storage prefixed hash 32 length 20",
+ inputKey: storageTrieNodeKey(common.BytesToHash(bytes20), bytes20),
+ expectedCheck: true,
+ expectedHash: common.BytesToHash(bytes20),
+ expectedKey: bytes20,
+ },
+ {
+ name: "storage prefixed hash 32 length 63",
+ inputKey: storageTrieNodeKey(common.BytesToHash(bytes65), bytes63),
+ expectedCheck: true,
+ expectedHash: common.BytesToHash(bytes65),
+ expectedKey: bytes63,
+ },
+ {
+ name: "storage prefixed hash 32 length 64",
+ inputKey: storageTrieNodeKey(common.BytesToHash(bytes32), bytes64),
+ expectedCheck: false,
+ expectedHash: common.Hash{},
+ expectedKey: nil,
+ },
+ {
+ name: "storage prefixed hash 32 length 65",
+ inputKey: storageTrieNodeKey(common.BytesToHash(bytes32), bytes65),
+ expectedCheck: false,
+ expectedHash: common.Hash{},
+ expectedKey: nil,
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ if check, hash, key := IsStorageTrieNode(test.inputKey); check != test.expectedCheck || !bytes.Equal(key, test.expectedKey) || hash != test.expectedHash {
+ t.Errorf("expected %v, %v, %v, got %v, %v, %v", test.expectedCheck, test.expectedHash, test.expectedKey, check, hash, key)
+ }
+ })
+ }
+}
diff --git a/core/state/database.go b/core/state/database.go
index d2837c83f9..6991e85843 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
lru "github.com/hashicorp/golang-lru/v2"
)
@@ -92,7 +93,7 @@ type Trie interface {
// corresponding node hash. All collected nodes(including dirty leaves if
// collectLeaf is true) will be encapsulated into a nodeset for return.
// The returned nodeset can be nil if the trie is clean(nothing to commit).
- Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error)
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index 7669ac97a2..b093083db2 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -27,7 +27,7 @@ import (
func TestNodeIteratorCoverage(t *testing.T) {
// Create some arbitrary test state to iterate
db, sdb, root, _ := makeTestState()
- sdb.TrieDB().Commit(root, false, nil)
+ sdb.TrieDB().Commit(root, false)
state, err := New(root, sdb, nil)
if err != nil {
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 341a37a180..065402d52f 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -29,12 +29,14 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -438,9 +440,9 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
}
root, nodes, _ := snapTrie.Commit(false)
if nodes != nil {
- snapTrieDb.Update(trie.NewWithNodeSet(nodes))
+ snapTrieDb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
- snapTrieDb.Commit(root, false, nil)
+ snapTrieDb.Commit(root, false)
}
tr := result.tr
if tr == nil {
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index f4d2eeb15b..51138bbc19 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -25,10 +25,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -138,7 +140,7 @@ type testHelper struct {
diskdb ethdb.Database
triedb *trie.Database
accTrie *trie.SecureTrie
- nodes *trie.MergedNodeSet
+ nodes *trienode.MergedNodeSet
}
func newHelper() *testHelper {
@@ -149,7 +151,7 @@ func newHelper() *testHelper {
diskdb: diskdb,
triedb: triedb,
accTrie: accTrie,
- nodes: trie.NewMergedNodeSet(),
+ nodes: trienode.NewMergedNodeSet(),
}
}
@@ -196,8 +198,8 @@ func (t *testHelper) Commit() common.Hash {
if nodes != nil {
t.nodes.Merge(nodes)
}
- t.triedb.Update(t.nodes)
- t.triedb.Commit(root, false, nil)
+ t.triedb.Update(root, types.EmptyRootHash, t.nodes)
+ t.triedb.Commit(root, false)
return root
}
@@ -385,7 +387,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
// Delete an account trie leaf and ensure the generator chokes
- helper.triedb.Commit(root, false, nil)
+ helper.triedb.Commit(root, false)
helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
diff --git a/core/state/state_object.go b/core/state/state_object.go
index bb9e3d6781..3adcff01e9 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -394,7 +394,7 @@ func (s *stateObject) updateRoot(db Database) {
// commitTrie submits the storage changes into the storage trie and re-computes
// the root. Besides, all trie changes will be collected in a nodeset and returned.
-func (s *stateObject) commitTrie(db Database) (*trie.NodeSet, error) {
+func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
return nil, nil
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 898e65597c..c15613de40 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
type revision struct {
@@ -978,7 +979,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
accountTrieNodesDeleted int
storageTrieNodesUpdated int
storageTrieNodesDeleted int
- nodes = trie.NewMergedNodeSet()
+ nodes = trienode.NewMergedNodeSet()
)
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
@@ -1082,7 +1083,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
if root != origin {
start := time.Now()
- if err := s.db.TrieDB().Update(nodes); err != nil {
+ if err := s.db.TrieDB().Update(root, origin, nodes); err != nil {
return common.Hash{}, err
}
s.originalRoot = root
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index be7d4e281e..f11a11731e 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -55,7 +55,7 @@ func TestUpdateLeaks(t *testing.T) {
}
root := state.IntermediateRoot(false)
- if err := state.Database().TrieDB().Commit(root, false, nil); err != nil {
+ if err := state.Database().TrieDB().Commit(root, false); err != nil {
t.Errorf("can not commit trie %v to persistent database", root.Hex())
}
@@ -106,7 +106,7 @@ func TestIntermediateLeaks(t *testing.T) {
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
- if err = transState.Database().TrieDB().Commit(transRoot, false, nil); err != nil {
+ if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
@@ -114,7 +114,7 @@ func TestIntermediateLeaks(t *testing.T) {
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
- if err = finalState.Database().TrieDB().Commit(finalRoot, false, nil); err != nil {
+ if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil {
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index ffea17cee2..d7334f0639 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -175,7 +175,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
// Create a random state to copy
_, srcDb, srcRoot, srcAccounts := makeTestState()
if commit {
- srcDb.TrieDB().Commit(srcRoot, false, nil)
+ srcDb.TrieDB().Commit(srcRoot, false)
}
srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
@@ -329,7 +329,8 @@ func TestIterativeDelayedStateSync(t *testing.T) {
if len(nodeElements) > 0 {
nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1)
for i, element := range nodeElements[:len(nodeResults)] {
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
}
@@ -415,7 +416,8 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
if len(nodeQueue) > 0 {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
for path, element := range nodeQueue {
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path)
}
@@ -503,7 +505,8 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
for path, element := range nodeQueue {
delete(nodeQueue, path)
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
@@ -603,7 +606,8 @@ func TestIncompleteStateSync(t *testing.T) {
if len(nodeQueue) > 0 {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
for path, element := range nodeQueue {
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index ab3f691c84..0a0f3b9d76 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -1381,7 +1382,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(trie.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1442,7 +1443,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(trie.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1458,7 +1459,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
- nodes = trie.NewMergedNodeSet()
+ nodes = trienode.NewMergedNodeSet()
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
@@ -1490,7 +1491,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(nodes)
+ db.Update(root, types.EmptyRootHash, nodes)
// Re-create tries with new root
accTrie, _ = trie.New(trie.StateTrieID(root), db)
@@ -1511,7 +1512,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
- nodes = trie.NewMergedNodeSet()
+ nodes = trienode.NewMergedNodeSet()
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
@@ -1523,7 +1524,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
// Make a storage trie
var (
stRoot common.Hash
- stNodes *trie.NodeSet
+ stNodes *trienode.NodeSet
stEntries entrySlice
)
if boundary {
@@ -1552,7 +1553,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(nodes)
+ db.Update(root, types.EmptyRootHash, nodes)
// Re-create tries with new root
accTrie, err := trie.New(trie.StateTrieID(root), db)
@@ -1573,7 +1574,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
-func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
+func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
trie, _ := trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
@@ -1596,7 +1597,7 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
-func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
+func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
diff --git a/light/postprocess.go b/light/postprocess.go
index d6ba1089a7..0f3dce0f17 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// IndexerConfig includes a set of configs for chain indexers.
@@ -138,6 +139,7 @@ type ChtIndexerBackend struct {
section, sectionSize uint64
lastHash common.Hash
trie *trie.Trie
+ originRoot common.Hash
}
// NewChtIndexer creates a Cht chain indexer
@@ -196,6 +198,7 @@ func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSecti
}
}
c.section = section
+ c.originRoot = root
return err
}
@@ -223,7 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
// Commite trie changes into trie database in case it's not nil.
if nodes != nil {
- if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ if err := c.triedb.Update(root, c.originRoot, trienode.NewWithNodeSet(nodes)); err != nil {
return err
}
}
@@ -236,7 +239,7 @@ func (c *ChtIndexerBackend) Commit() error {
if !c.disablePruning {
// Flush the triedb and track the latest trie nodes.
c.trieset.Clear()
- c.triedb.Commit(root, false, func(hash common.Hash) { c.trieset.Add(hash) })
+ c.triedb.Commit(root, false)
it := c.trieTable.NewIterator(nil, nil)
defer it.Release()
@@ -257,7 +260,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
} else {
- c.triedb.Commit(root, false, nil)
+ c.triedb.Commit(root, false)
}
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
@@ -341,6 +344,7 @@ type BloomTrieIndexerBackend struct {
bloomTrieRatio uint64
trie *trie.Trie
sectionHeads []common.Hash
+ originRoot common.Hash
}
// NewBloomTrieIndexer creates a BloomTrie chain indexer
@@ -470,7 +474,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
if nodes != nil {
- if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ if err := b.triedb.Update(root, b.originRoot, trienode.NewWithNodeSet(nodes)); err != nil {
return err
}
}
@@ -484,7 +488,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
if !b.disablePruning {
// Flush the triedb and track the latest trie nodes.
b.trieset.Clear()
- b.triedb.Commit(root, false, func(hash common.Hash) { b.trieset.Add(hash) })
+ b.triedb.Commit(root, false)
it := b.trieTable.NewIterator(nil, nil)
defer it.Release()
@@ -505,7 +509,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
} else {
- b.triedb.Commit(root, false, nil)
+ b.triedb.Commit(root, false)
}
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
diff --git a/light/trie.go b/light/trie.go
index e60ad49c97..a09488a4ba 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -136,7 +137,7 @@ func (t *odrTrie) TryDelete(key []byte) error {
})
}
-func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error) {
+func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
if t.trie == nil {
return t.id.Root, nil, nil
}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index a0ba68e211..f1f8e94c3d 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -27,9 +27,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -187,10 +189,10 @@ func (f *fuzzer) fuzz() int {
panic(err)
}
if nodes != nil {
- dbA.Update(trie.NewWithNodeSet(nodes))
+ dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
// Flush memdb -> disk (sponge)
- dbA.Commit(rootA, false, nil)
+ dbA.Commit(rootA, false)
// Stacktrie requires sorted insertion
sort.Sort(vals)
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 4be8ebb9e8..12165d5f54 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -22,7 +22,9 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// randTest performs random trie operations.
@@ -142,10 +144,12 @@ func Fuzz(input []byte) int {
}
func runRandTest(rt randTest) error {
- triedb := trie.NewDatabase(rawdb.NewMemoryDatabase())
-
- tr := trie.NewEmpty(triedb)
- values := make(map[string]string) // tracks content of the trie
+ var (
+ triedb = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ tr = trie.NewEmpty(triedb)
+ origin = types.EmptyRootHash
+ values = make(map[string]string) // tracks content of the trie
+ )
for i, step := range rt {
switch step.op {
@@ -169,7 +173,7 @@ func runRandTest(rt randTest) error {
return err
}
if nodes != nil {
- if err := triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ if err := triedb.Update(hash, origin, trienode.NewWithNodeSet(nodes)); err != nil {
return err
}
}
@@ -178,6 +182,7 @@ func runRandTest(rt randTest) error {
return err
}
tr = newtr
+ origin = hash
case opItercheckhash:
checktr := trie.NewEmpty(triedb)
it := trie.NewIterator(tr.NodeIterator(nil))
diff --git a/trie/committer.go b/trie/committer.go
index b19316631f..add2c02efe 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -30,13 +31,6 @@ import (
// some parallelism but not incur too much memory overhead.
const leafChanSize = 200
-// leaf represents a trie leaf value
-type leaf struct {
- blob []byte // raw blob of leaf
- parent common.Hash // the hash of parent node
- path []byte // the path from the root node
-}
-
// committer is a type used for the trie Commit operation. The committer will
// capture all dirty nodes during the commit process and keep them cached in
// insertion order.
@@ -45,7 +39,7 @@ type committer struct {
sha crypto.KeccakState
owner common.Hash // TODO: same as nodes.owner, consider removing
- nodes *NodeSet
+ nodes *trienode.NodeSet
tracer *tracer
collectLeaf bool
}
@@ -61,7 +55,7 @@ var committerPool = sync.Pool{
}
// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(nodes *NodeSet, tracer *tracer, collectLeaf bool) *committer {
+func newCommitter(nodes *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer {
return &committer{
nodes: nodes,
tracer: tracer,
@@ -70,7 +64,7 @@ func newCommitter(nodes *NodeSet, tracer *tracer, collectLeaf bool) *committer {
}
// Commit collapses a node down into a hash node and inserts it into the database
-func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
+func (c *committer) Commit(n node) (hashNode, *trienode.NodeSet, error) {
h, err := c.commit(nil, n)
if err != nil {
return nil, nil, err
@@ -176,7 +170,7 @@ func (c *committer) store(path []byte, n node) node {
// deleted only if the node was existent in database before.
prev, ok := c.tracer.accessList[string(path)]
if ok {
- c.nodes.addNode(path, &nodeWithPrev{&memoryNode{}, prev})
+ c.nodes.AddNode(path, trienode.NewNodeWithPrev(common.Hash{}, nil, prev))
}
return n
}
@@ -185,24 +179,22 @@ func (c *committer) store(path []byte, n node) node {
var (
nhash = common.BytesToHash(hash)
blob, _ = rlp.EncodeToBytes(n)
- node = &nodeWithPrev{
- &memoryNode{
- nhash,
- blob,
- },
+ node = trienode.NewNodeWithPrev(
+ nhash,
+ blob,
c.tracer.accessList[string(path)],
- }
+ )
)
// Collect the dirty node to nodeset for return.
- c.nodes.addNode(path, node)
+ c.nodes.AddNode(path, node)
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
// length of leaves should be exactly same.
if c.collectLeaf {
if sn, ok := n.(*shortNode); ok {
if val, ok := sn.Val.(valueNode); ok {
- c.nodes.addLeaf(&leaf{blob: val, parent: nhash})
+ c.nodes.AddLeaf(nhash, val)
}
}
}
@@ -214,7 +206,7 @@ type mptResolver struct{}
// ForEach implements childResolver, decodes the provided node and
// traverses the children inside.
-func (resolver mptResolver) forEach(node []byte, onChild func(common.Hash)) {
+func (resolver mptResolver) ForEach(node []byte, onChild func(common.Hash)) {
forGatherChildren(mustDecodeNode(nil, node), onChild)
}
diff --git a/trie/database_test.go b/trie/database_test.go
index 54d7529476..f81dc135ca 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -17,17 +17,20 @@
package trie
import (
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
)
-// Tests that the trie database returns a missing trie node error if attempting
-// to retrieve the meta root.
-func TestDatabaseMetarootFetch(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- if _, err := db.Node(common.Hash{}); err == nil {
- t.Fatalf("metaroot retrieval succeeded")
+// newTestDatabase initializes the trie database with specified scheme.
+
+func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
+ db := prepare(diskdb, nil)
+ if scheme == rawdb.HashScheme {
+ db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
}
+ //} else {
+ // db.backend = snap.New(diskdb, db.cleans, nil)
+ //}
+ return db
}
diff --git a/trie/database_wrap.go b/trie/database_wrap.go
new file mode 100644
index 0000000000..43b9615f77
--- /dev/null
+++ b/trie/database_wrap.go
@@ -0,0 +1,287 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "errors"
+ "runtime"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
+// Config defines all necessary options for database.
+type Config struct {
+ Cache int // Memory allowance (MB) to use for caching trie nodes in memory
+ Journal string // Journal of clean cache to survive node restarts
+ Preimages bool // Flag whether the preimage of trie key is recorded
+}
+
+// backend defines the methods needed to access/update trie nodes in different
+// state scheme.
+type backend interface {
+ // Scheme returns the identifier of used storage scheme.
+ Scheme() string
+
+ // Initialized returns an indicator if the state data is already initialized
+ // according to the state scheme.
+ Initialized(genesisRoot common.Hash) bool
+
+ // Size returns the current storage size of the memory cache in front of the
+ // persistent database layer.
+ Size() common.StorageSize
+
+ // Update performs a state transition by committing dirty nodes contained
+ // in the given set in order to update state from the specified parent to
+ // the specified root.
+ Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
+
+ // Nodes retrieves the hashes of all the nodes cached within the memory database.
+ // This method is extremely expensive and should only be used to validate internal
+ // states in test code.
+ Nodes() []common.Hash
+
+ // DiskDB retrieves the persistent storage backing the trie database.
+ DiskDB() ethdb.KeyValueStore
+
+ // Commit writes all relevant trie nodes belonging to the specified state
+ // to disk. Report specifies whether logs will be displayed in info level.
+ Commit(root common.Hash, report bool) error
+
+ // Close closes the trie database backend and releases all held resources.
+ Close() error
+}
+
+// Database is the wrapper of the underlying backend which is shared by different
+// types of node backend as an entrypoint. It's responsible for all interactions
+// relevant with trie nodes and node preimages.
+type Database struct {
+ config *Config // Configuration for trie database
+ diskdb ethdb.Database // Persistent database to store the snapshot
+ cleans *fastcache.Cache // Megabytes permitted using for read caches
+ preimages *preimageStore // The store for caching preimages
+ backend backend // The backend for managing trie nodes
+}
+
+// prepare initializes the database with provided configs, but the
+// database backend is still left as nil.
+func prepare(diskdb ethdb.Database, config *Config) *Database {
+ var cleans *fastcache.Cache
+ if config != nil && config.Cache > 0 {
+ if config.Journal == "" {
+ cleans = fastcache.New(config.Cache * 1024 * 1024)
+ } else {
+ cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
+ }
+ }
+ var preimages *preimageStore
+ if config != nil && config.Preimages {
+ preimages = newPreimageStore(diskdb)
+ }
+ return &Database{
+ config: config,
+ diskdb: diskdb,
+ cleans: cleans,
+ preimages: preimages,
+ }
+}
+
+// NewDatabase initializes the trie database with default settings, namely
+// the legacy hash-based scheme is used by default.
+func NewDatabase(diskdb ethdb.Database) *Database {
+ return NewDatabaseWithConfig(diskdb, nil)
+}
+
+// NewDatabaseWithConfig initializes the trie database with provided configs.
+// The path-based scheme is not activated yet, always initialized with legacy
+// hash-based scheme by default.
+func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
+ db := prepare(diskdb, config)
+ db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ return db
+}
+
+// Reader returns a reader for accessing all trie nodes with provided state root.
+// Nil is returned in case the state is not available.
+func (db *Database) Reader(blockRoot common.Hash) Reader {
+ return db.backend.(*hashdb.Database).Reader(blockRoot)
+}
+
+// Update performs a state transition by committing dirty nodes contained in the
+// given set in order to update state from the specified parent to the specified
+// root. The held pre-images accumulated up to this point will be flushed in case
+// the size exceeds the threshold.
+func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+ if db.preimages != nil {
+ db.preimages.commit(false)
+ }
+ return db.backend.Update(root, parent, nodes)
+}
+
+// Commit iterates over all the children of a particular node, writes them out
+// to disk. As a side effect, all pre-images accumulated up to this point are
+// also written.
+func (db *Database) Commit(root common.Hash, report bool) error {
+ if db.preimages != nil {
+ db.preimages.commit(true)
+ }
+ return db.backend.Commit(root, report)
+}
+
+// Size returns the storage size of dirty trie nodes in front of the persistent
+// database and the size of cached preimages.
+func (db *Database) Size() (common.StorageSize, common.StorageSize) {
+ var (
+ storages common.StorageSize
+ preimages common.StorageSize
+ )
+ storages = db.backend.Size()
+ if db.preimages != nil {
+ preimages = db.preimages.size()
+ }
+ return storages, preimages
+}
+
+// Initialized returns an indicator if the state data is already initialized
+// according to the state scheme.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ return db.backend.Initialized(genesisRoot)
+}
+
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return db.backend.Scheme()
+}
+
+// DiskDB retrieves the persistent storage backing the trie database.
+func (db *Database) DiskDB() ethdb.KeyValueStore {
+ return db.backend.DiskDB()
+}
+
+// Nodes retrieves the hashes of all the nodes cached within the memory database.
+// This method is extremely expensive and should only be used to validate internal
+// states in test code.
+func (db *Database) Nodes() []common.Hash {
+ return db.backend.Nodes()
+}
+
+// Close flushes the dangling preimages to disk and closes the trie database.
+// It is meant to be called when closing the blockchain object, so that all
+// resources held can be released correctly.
+func (db *Database) Close() error {
+ if db.preimages != nil {
+ db.preimages.commit(true)
+ }
+ return db.backend.Close()
+}
+
+// saveCache saves clean state cache to given directory path
+// using specified CPU cores.
+func (db *Database) saveCache(dir string, threads int) error {
+ if db.cleans == nil {
+ return nil
+ }
+ log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
+
+ start := time.Now()
+ err := db.cleans.SaveToFileConcurrent(dir, threads)
+ if err != nil {
+ log.Error("Failed to persist clean trie cache", "error", err)
+ return err
+ }
+ log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
+ return nil
+}
+
+// SaveCache atomically saves fast cache data to the given dir using all
+// available CPU cores.
+func (db *Database) SaveCache(dir string) error {
+ return db.saveCache(dir, runtime.GOMAXPROCS(0))
+}
+
+// SaveCachePeriodically atomically saves fast cache data to the given dir with
+// the specified interval. All dump operation will only use a single CPU core.
+func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ db.saveCache(dir, 1)
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+// Cap iteratively flushes old but still referenced trie nodes until the total
+// memory usage goes below the given threshold. The held pre-images accumulated
+// up to this point will be flushed in case the size exceeds the threshold.
+//
+// It's only supported by hash-based database and will return an error for others.
+func (db *Database) Cap(limit common.StorageSize) error {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ if db.preimages != nil {
+ db.preimages.commit(false)
+ }
+ return hdb.Cap(limit)
+}
+
+// Reference adds a new reference from a parent node to a child node. This function
+// is used to add reference between internal trie node and external node(e.g. storage
+// trie root), all internal trie nodes are referenced together by database itself.
+//
+// It's only supported by hash-based database and will return an error for others.
+func (db *Database) Reference(root common.Hash, parent common.Hash) error {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ hdb.Reference(root, parent)
+ return nil
+}
+
+// Dereference removes an existing reference from a root node. It's only
+// supported by hash-based database and will return an error for others.
+func (db *Database) Dereference(root common.Hash) error {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ hdb.Dereference(root)
+ return nil
+}
+
+// Node retrieves the rlp-encoded node blob with provided node hash. It's
+// only supported by hash-based database and will return an error for others.
+// Note, this function should be deprecated once ETH66 is deprecated.
+func (db *Database) Node(hash common.Hash) ([]byte, error) {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return nil, errors.New("not supported")
+ }
+ return hdb.Node(hash)
+}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 6fc6eea782..f01c154d26 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -25,9 +25,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
func TestEmptyIterator(t *testing.T) {
@@ -64,7 +66,7 @@ func TestIterator(t *testing.T) {
if err != nil {
t.Fatalf("Failed to commit trie %v", err)
}
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil))
@@ -117,39 +119,61 @@ func TestIteratorLargeData(t *testing.T) {
}
}
-// Tests that the node iterator indeed walks over the entire database contents.
+type iterationElement struct {
+ hash common.Hash
+ path []byte
+ blob []byte
+}
+
func TestNodeIteratorCoverage(t *testing.T) {
+
+ testNodeIteratorCoverage(t, rawdb.HashScheme)
+ //testNodeIteratorCoverage(t, rawdb.PathScheme)
+}
+
+func testNodeIteratorCoverage(t *testing.T, scheme string) {
// Create some arbitrary test trie to iterate
- db, trie, _ := makeTestTrie()
+ db, nodeDb, trie, _ := makeTestTrie(scheme)
// Gather all the node hashes found by the iterator
- hashes := make(map[common.Hash]struct{})
+ var elements = make(map[common.Hash]iterationElement)
for it := trie.NodeIterator(nil); it.Next(true); {
if it.Hash() != (common.Hash{}) {
- hashes[it.Hash()] = struct{}{}
+ elements[it.Hash()] = iterationElement{
+ hash: it.Hash(),
+ path: common.CopyBytes(it.Path()),
+ blob: common.CopyBytes(it.NodeBlob()),
+ }
}
}
// Cross check the hashes and the database itself
- for hash := range hashes {
- if _, err := db.Node(hash); err != nil {
- t.Errorf("failed to retrieve reported node %x: %v", hash, err)
+ for _, element := range elements {
+ if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil {
+ t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
+ } else if !bytes.Equal(blob, element.blob) {
+ t.Errorf("node blob is different, want %v got %v", element.blob, blob)
}
}
- for hash, obj := range db.dirties {
- if obj != nil && hash != (common.Hash{}) {
- if _, ok := hashes[hash]; !ok {
- t.Errorf("state entry not reported %x", hash)
- }
- }
- }
- it := db.diskdb.NewIterator(nil, nil)
+ var (
+ count int
+ it = db.NewIterator(nil, nil)
+ )
for it.Next() {
- key := it.Key()
- if _, ok := hashes[common.BytesToHash(key)]; !ok {
- t.Errorf("state entry not reported %x", key)
+ res, _, _ := isTrieNode(nodeDb.Scheme(), it.Key(), it.Value())
+ if !res {
+ continue
+ }
+ count += 1
+ if elem, ok := elements[crypto.Keccak256Hash(it.Value())]; !ok {
+ t.Error("state entry not reported")
+ } else if !bytes.Equal(it.Value(), elem.blob) {
+ t.Errorf("node blob is different, want %v got %v", elem.blob, it.Value())
}
}
it.Release()
+ if count != len(elements) {
+ t.Errorf("state entry is mismatched %d %d", count, len(elements))
+ }
}
type kvs struct{ k, v string }
@@ -225,7 +249,7 @@ func TestDifferenceIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(NewWithNodeSet(nodesA))
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -234,7 +258,7 @@ func TestDifferenceIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(NewWithNodeSet(nodesB))
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
@@ -267,7 +291,7 @@ func TestUnionIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(NewWithNodeSet(nodesA))
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -276,7 +300,7 @@ func TestUnionIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(NewWithNodeSet(nodesB))
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
@@ -322,79 +346,98 @@ func TestIteratorNoDups(t *testing.T) {
}
// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
-func TestIteratorContinueAfterErrorDisk(t *testing.T) { testIteratorContinueAfterError(t, false) }
-func TestIteratorContinueAfterErrorMemonly(t *testing.T) { testIteratorContinueAfterError(t, true) }
+func TestIteratorContinueAfterError(t *testing.T) {
+ testIteratorContinueAfterError(t, false, rawdb.HashScheme)
+ testIteratorContinueAfterError(t, true, rawdb.HashScheme)
+ // testIteratorContinueAfterError(t, false, rawdb.PathScheme)
+ // testIteratorContinueAfterError(t, true, rawdb.PathScheme)
+}
-func testIteratorContinueAfterError(t *testing.T, memonly bool) {
+func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
+ tdb := newTestDatabase(diskdb, scheme)
- tr := NewEmpty(triedb)
+ tr := NewEmpty(tdb)
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
- _, nodes, _ := tr.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ root, nodes, _ := tr.Commit(false)
+ tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(tr.Hash(), true, nil)
+ tdb.Commit(root, false)
}
+ tr, _ = New(TrieID(root), tdb)
wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
var (
- diskKeys [][]byte
- memKeys []common.Hash
+ paths [][]byte
+ hashes []common.Hash
)
if memonly {
- memKeys = triedb.Nodes()
+ for path, n := range nodes.Nodes {
+ paths = append(paths, []byte(path))
+ hashes = append(hashes, n.Hash)
+ }
} else {
it := diskdb.NewIterator(nil, nil)
for it.Next() {
- diskKeys = append(diskKeys, it.Key())
+ ok, path, hash := isTrieNode(tdb.Scheme(), it.Key(), it.Value())
+ if !ok {
+ continue
+ }
+ paths = append(paths, path)
+ hashes = append(hashes, hash)
}
it.Release()
}
for i := 0; i < 20; i++ {
// Create trie that will load all nodes from DB.
- tr, _ := New(TrieID(tr.Hash()), triedb)
+ tr, _ := New(TrieID(tr.Hash()), tdb)
// Remove a random node from the database. It can't be the root node
// because that one is already loaded.
var (
- rkey common.Hash
- rval []byte
- robj *cachedNode
+ rval []byte
+ rpath []byte
+ rhash common.Hash
)
for {
if memonly {
- rkey = memKeys[rand.Intn(len(memKeys))]
+ rpath = paths[rand.Intn(len(paths))]
+ n := nodes.Nodes[string(rpath)]
+ if n == nil {
+ continue
+ }
+ rhash = n.Hash
} else {
- copy(rkey[:], diskKeys[rand.Intn(len(diskKeys))])
+ index := rand.Intn(len(paths))
+ rpath = paths[index]
+ rhash = hashes[index]
}
- if rkey != tr.Hash() {
+ if rhash != tr.Hash() {
break
}
}
if memonly {
- robj = triedb.dirties[rkey]
- delete(triedb.dirties, rkey)
+ tr.reader.banned = map[string]struct{}{string(rpath): {}}
} else {
- rval, _ = diskdb.Get(rkey[:])
- diskdb.Delete(rkey[:])
+ rval = rawdb.ReadTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
+ rawdb.DeleteTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
}
// Iterate until the error is hit.
seen := make(map[string]bool)
it := tr.NodeIterator(nil)
checkIteratorNoDups(t, it, seen)
missing, ok := it.Error().(*MissingNodeError)
- if !ok || missing.NodeHash != rkey {
+ if !ok || missing.NodeHash != rhash {
t.Fatal("didn't hit missing node, got", it.Error())
}
// Add the node back and continue iteration.
if memonly {
- triedb.dirties[rkey] = robj
+ delete(tr.reader.banned, string(rpath))
} else {
- diskdb.Put(rkey[:], rval)
+ rawdb.WriteTrieNode(diskdb, common.Hash{}, rpath, rhash, rval, tdb.Scheme())
}
checkIteratorNoDups(t, it, seen)
if it.Error() != nil {
@@ -409,42 +452,48 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
// Similar to the test above, this one checks that failure to create nodeIterator at a
// certain key prefix behaves correctly when Next is called. The expectation is that Next
// should retry seeking before returning true for the first time.
-func TestIteratorContinueAfterSeekErrorDisk(t *testing.T) {
- testIteratorContinueAfterSeekError(t, false)
-}
-func TestIteratorContinueAfterSeekErrorMemonly(t *testing.T) {
- testIteratorContinueAfterSeekError(t, true)
+func TestIteratorContinueAfterSeekError(t *testing.T) {
+ testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
+ testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
+ // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
+ // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
}
-func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
+func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
// Commit test trie to db, then remove the node containing "bars".
+ var (
+ barNodePath []byte
+ barNodeHash = common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
+ )
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
-
+ triedb := newTestDatabase(diskdb, scheme)
ctr := NewEmpty(triedb)
for _, val := range testdata1 {
ctr.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := ctr.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ for path, n := range nodes.Nodes {
+ if n.Hash == barNodeHash {
+ barNodePath = []byte(path)
+ break
+ }
+ }
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(root, true, nil)
+ triedb.Commit(root, false)
}
- barNodeHash := common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
var (
barNodeBlob []byte
- barNodeObj *cachedNode
)
+ tr, _ := New(TrieID(root), triedb)
if memonly {
- barNodeObj = triedb.dirties[barNodeHash]
- delete(triedb.dirties, barNodeHash)
+ tr.reader.banned = map[string]struct{}{string(barNodePath): {}}
} else {
- barNodeBlob, _ = diskdb.Get(barNodeHash[:])
- diskdb.Delete(barNodeHash[:])
+ barNodeBlob = rawdb.ReadTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
+ rawdb.DeleteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- tr, _ := New(TrieID(root), triedb)
it := tr.NodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
@@ -454,9 +503,9 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
}
// Reinsert the missing node.
if memonly {
- triedb.dirties[barNodeHash] = barNodeObj
+ delete(tr.reader.banned, string(barNodePath))
} else {
- diskdb.Put(barNodeHash[:], barNodeBlob)
+ rawdb.WriteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, barNodeBlob, triedb.Scheme())
}
// Check that iteration produces the right set of values.
if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil {
@@ -477,6 +526,11 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in
return len(seen)
}
+func TestIteratorNodeBlob(t *testing.T) {
+ testIteratorNodeBlob(t, rawdb.HashScheme)
+ //testIteratorNodeBlob(t, rawdb.PathScheme)
+}
+
type loggingDb struct {
getCount uint64
backend ethdb.KeyValueStore
@@ -544,8 +598,8 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
val = crypto.Keccak256(val)
trie.Update(key, val)
}
- _, nodes, _ := trie.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Return the generated trie
return triedb, trie, logDb
}
@@ -564,10 +618,10 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
}
}
-func TestIteratorNodeBlob(t *testing.T) {
+func testIteratorNodeBlob(t *testing.T, scheme string) {
var (
db = rawdb.NewMemoryDatabase()
- triedb = NewDatabase(db)
+ triedb = newTestDatabase(db, scheme)
trie = NewEmpty(triedb)
)
vals := []struct{ k, v string }{
@@ -584,10 +638,12 @@ func TestIteratorNodeBlob(t *testing.T) {
all[val.k] = val.v
trie.Update([]byte(val.k), []byte(val.v))
}
- trie.Commit(false)
- triedb.Cap(0)
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Commit(root, false)
- found := make(map[common.Hash][]byte)
+ var found = make(map[common.Hash][]byte)
+ trie, _ = New(TrieID(root), triedb)
it := trie.NodeIterator(nil)
for it.Next(true) {
if it.Hash() == (common.Hash{}) {
@@ -595,15 +651,18 @@ func TestIteratorNodeBlob(t *testing.T) {
}
found[it.Hash()] = it.NodeBlob()
}
-
dbIter := db.NewIterator(nil, nil)
defer dbIter.Release()
var count int
for dbIter.Next() {
- got, present := found[common.BytesToHash(dbIter.Key())]
+ ok, _, _ := isTrieNode(triedb.Scheme(), dbIter.Key(), dbIter.Value())
+ if !ok {
+ continue
+ }
+ got, present := found[crypto.Keccak256Hash(dbIter.Value())]
if !present {
- t.Fatalf("Miss trie node %v", dbIter.Key())
+ t.Fatal("Miss trie node")
}
if !bytes.Equal(got, dbIter.Value()) {
t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
@@ -612,5 +671,33 @@ func TestIteratorNodeBlob(t *testing.T) {
}
if count != len(found) {
t.Fatal("Find extra trie node via iterator")
+
+ }
+}
+
+// isTrieNode is a helper function which reports if the provided
+// database entry belongs to a trie node or not. Note in tests
+
+// only single layer trie is used, namely storage trie is not
+// considered at all.
+func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) {
+ var (
+ path []byte
+ hash common.Hash
+ )
+ if scheme == rawdb.HashScheme {
+ ok := rawdb.IsLegacyTrieNode(key, val)
+ if !ok {
+ return false, nil, common.Hash{}
+ }
+ hash = common.BytesToHash(key)
+ } else {
+ ok, remain := rawdb.IsAccountTrieNode(key)
+ if !ok {
+ return false, nil, common.Hash{}
+ }
+ path = common.CopyBytes(remain)
+ hash = crypto.Keccak256Hash(val)
}
+ return true, path, hash
}
diff --git a/trie/nodeset.go b/trie/nodeset.go
deleted file mode 100644
index 9288033548..0000000000
--- a/trie/nodeset.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-// memoryNode is all the information we know about a single cached trie node
-// in the memory.
-type memoryNode struct {
- hash common.Hash // Node hash, computed by hashing rlp value, empty for deleted nodes
- node []byte // Encoded node blob, nil for deleted nodes
-}
-
-// memorySize returns the total memory size used by this node.
-// nolint:unused
-func (n *memoryNode) memorySize(pathlen int) int {
- return len(n.node) + common.HashLength + pathlen
-}
-
-// isDeleted returns the indicator if the node is marked as deleted.
-func (n *memoryNode) isDeleted() bool {
- return n.hash == (common.Hash{})
-}
-
-// rlp returns the raw rlp encoded blob of the cached trie node, either directly
-// from the cache, or by regenerating it from the collapsed node.
-// nolint:unused
-func (n *memoryNode) rlp() []byte {
- return n.node
-}
-
-// obj returns the decoded and expanded trie node, either directly from the cache,
-// or by regenerating it from the rlp encoded blob.
-// nolint:unused
-func (n *memoryNode) obj() node {
- return mustDecodeNode(n.hash[:], n.node)
-}
-
-// nodeWithPrev wraps the memoryNode with the previous node value.
-type nodeWithPrev struct {
- *memoryNode
- prev []byte // RLP-encoded previous value, nil means it's non-existent
-}
-
-// unwrap returns the internal memoryNode object.
-// nolint:unused
-func (n *nodeWithPrev) unwrap() *memoryNode {
- return n.memoryNode
-}
-
-// memorySize returns the total memory size used by this node. It overloads
-// the function in memoryNode by counting the size of previous value as well.
-// nolint: unused
-func (n *nodeWithPrev) memorySize(key int) int {
- return n.memoryNode.memorySize(key) + len(n.prev)
-}
-
-// NodeSet contains all dirty nodes collected during the commit operation
-// Each node is keyed by path. It's not the thread-safe to use.
-type NodeSet struct {
- owner common.Hash // the identifier of the trie
- leaves []*leaf // the list of dirty leaves
- updates int // the count of updated and inserted nodes
- deletes int // the count of deleted nodes
-
- // The set of all dirty nodes. Dirty nodes include newly inserted nodes,
- // deleted nodes and updated nodes. The original value of the newly
- // inserted node must be nil, and the original value of the other two
- // types must be non-nil.
- nodes map[string]*nodeWithPrev
-}
-
-// NewNodeSet initializes an empty node set to be used for tracking dirty nodes
-// from a specific account or storage trie. The owner is zero for the account
-// trie and the owning account address hash for storage tries.
-func NewNodeSet(owner common.Hash) *NodeSet {
- return &NodeSet{
- owner: owner,
- nodes: make(map[string]*nodeWithPrev),
- }
-}
-
-// forEachWithOrder iterates the dirty nodes with the order from bottom to top,
-// right to left, nodes with the longest path will be iterated first.
-func (set *NodeSet) forEachWithOrder(callback func(path string, n *memoryNode)) {
- var paths sort.StringSlice
- for path := range set.nodes {
- paths = append(paths, path)
- }
- // Bottom-up, longest path first
- sort.Sort(sort.Reverse(paths))
- for _, path := range paths {
- callback(path, set.nodes[path].unwrap())
- }
-}
-
-// addNode adds the provided dirty node into set.
-func (set *NodeSet) addNode(path []byte, n *nodeWithPrev) {
- if n.isDeleted() {
- set.deletes += 1
- } else {
- set.updates += 1
- }
- set.nodes[string(path)] = n
-}
-
-// addLeaf collects the provided leaf node into set.
-func (set *NodeSet) addLeaf(leaf *leaf) {
- set.leaves = append(set.leaves, leaf)
-}
-
-// Size returns the number of updated and deleted nodes contained in the set.
-func (set *NodeSet) Size() (int, int) {
- return set.updates, set.deletes
-}
-
-// Hashes returns the hashes of all updated nodes.
-func (set *NodeSet) Hashes() []common.Hash {
- var ret []common.Hash
- for _, node := range set.nodes {
- ret = append(ret, node.hash)
- }
- return ret
-}
-
-// Summary returns a string-representation of the NodeSet.
-func (set *NodeSet) Summary() string {
- var out = new(strings.Builder)
- fmt.Fprintf(out, "nodeset owner: %v\n", set.owner)
- if set.nodes != nil {
- for path, n := range set.nodes {
- // Deletion
- if n.isDeleted() {
- fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.prev)
- continue
- }
- // Insertion
- if len(n.prev) == 0 {
- fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.hash)
- continue
- }
- // Update
- fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.hash, n.prev)
- }
- }
- for _, n := range set.leaves {
- fmt.Fprintf(out, "[leaf]: %v\n", n)
- }
- return out.String()
-}
-
-// MergedNodeSet represents a merged dirty node set for a group of tries.
-type MergedNodeSet struct {
- sets map[common.Hash]*NodeSet
-}
-
-// NewMergedNodeSet initializes an empty merged set.
-func NewMergedNodeSet() *MergedNodeSet {
- return &MergedNodeSet{sets: make(map[common.Hash]*NodeSet)}
-}
-
-// NewWithNodeSet constructs a merged nodeset with the provided single set.
-func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
- merged := NewMergedNodeSet()
- merged.Merge(set)
- return merged
-}
-
-// Merge merges the provided dirty nodes of a trie into the set. The assumption
-// is held that no duplicated set belonging to the same trie will be merged twice.
-func (set *MergedNodeSet) Merge(other *NodeSet) error {
- _, present := set.sets[other.owner]
- if present {
- return fmt.Errorf("duplicate trie for owner %#x", other.owner)
- }
- set.sets[other.owner] = other
- return nil
-}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index ce69b839bb..973596a58f 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// SecureTrie wraps a trie with key hashing. In a secure trie, all
@@ -166,7 +167,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
// collectLeaf is true) will be encapsulated into a nodeset for return.
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// All cached preimages will be also flushed if preimages recording is enabled.
-func (t *SecureTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
+func (t *SecureTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil { // Ugly direct check but avoids the below write lock
diff --git a/trie/sync_test.go b/trie/sync_test.go
index c964608aa1..d1d26de5eb 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -23,14 +23,19 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// makeTestTrie create a sample test trie to test node-wise reconstruction.
-func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
+func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ db := rawdb.NewMemoryDatabase()
+
+ triedb := newTestDatabase(db, scheme)
trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data
@@ -52,27 +57,31 @@ func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
trie.Update(key, val)
}
}
- _, nodes, err := trie.Commit(false)
+ root, nodes, err := trie.Commit(false)
if err != nil {
panic(fmt.Errorf("failed to commit trie: %v", err))
}
- if err := triedb.Update(NewWithNodeSet(nodes)); err != nil {
+ if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
+ if err := triedb.Commit(root, false); err != nil {
+ panic(err)
+ }
// Return the generated trie
- return triedb, trie, content
+ return db, triedb, trie, content
}
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
-func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
+func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) {
// Check root availability and trie contents
- trie, err := NewSecure(TrieID(common.BytesToHash(root)), db)
+ ndb := newTestDatabase(db, scheme)
+ trie, err := NewSecure(TrieID(common.BytesToHash(root)), ndb)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
- if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
+ if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil {
t.Fatalf("inconsistent trie at %x: %v", root, err)
}
for key, val := range content {
@@ -83,9 +92,9 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
}
// checkTrieConsistency checks that all nodes in a trie are indeed present.
-func checkTrieConsistency(db *Database, root common.Hash) error {
- // Create and iterate a trie rooted in a subnode
- trie, err := NewSecure(TrieID(root), db)
+func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error {
+ ndb := newTestDatabase(db, scheme)
+ trie, err := NewSecure(TrieID(root), ndb)
if err != nil {
return nil // Consider a non existent state consistent
}
@@ -106,11 +115,16 @@ type trieElement struct {
func TestEmptySync(t *testing.T) {
dbA := NewDatabase(rawdb.NewMemoryDatabase())
dbB := NewDatabase(rawdb.NewMemoryDatabase())
+ //dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
+ //dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
+
emptyA := NewEmpty(dbA)
emptyB, _ := New(TrieID(emptyRoot), dbB)
+ //emptyC := NewEmpty(dbC)
+ //emptyD, _ := New(TrieID(types.EmptyRootHash), dbD)
- for i, trie := range []*Trie{emptyA, emptyB} {
- sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB}[i].Scheme())
+ for i, trie := range []*Trie{emptyA, emptyB /*emptyC, emptyD*/} {
+ sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB /*dbC, dbD*/}[i].Scheme())
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes)
}
@@ -119,18 +133,23 @@ func TestEmptySync(t *testing.T) {
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go.
-func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1, false) }
-func TestIterativeSyncBatched(t *testing.T) { testIterativeSync(t, 100, false) }
-func TestIterativeSyncIndividualByPath(t *testing.T) { testIterativeSync(t, 1, true) }
-func TestIterativeSyncBatchedByPath(t *testing.T) { testIterativeSync(t, 100, true) }
+func TestIterativeSync(t *testing.T) {
+ testIterativeSync(t, 1, false, rawdb.HashScheme)
+ testIterativeSync(t, 100, false, rawdb.HashScheme)
+ testIterativeSync(t, 1, true, rawdb.HashScheme)
+ testIterativeSync(t, 100, true, rawdb.HashScheme)
+ // testIterativeSync(t, 1, false, rawdb.PathScheme)
+ // testIterativeSync(t, 100, false, rawdb.PathScheme)
+ // testIterativeSync(t, 1, true, rawdb.PathScheme)
+ // testIterativeSync(t, 100, true, rawdb.PathScheme)
+}
-func testIterativeSync(t *testing.T, count int, bypath bool) {
+func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -148,7 +167,8 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
results := make([]NodeSyncResult, len(elements))
if !bypath {
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
}
@@ -185,18 +205,21 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned, and the others sent only later.
func TestIterativeDelayedSync(t *testing.T) {
- // Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ testIterativeDelayedSync(t, rawdb.HashScheme)
+ //testIterativeDelayedSync(t, rawdb.PathScheme)
+}
+func testIterativeDelayedSync(t *testing.T, scheme string) {
+ // Create a random trie to copy
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -214,7 +237,8 @@ func TestIterativeDelayedSync(t *testing.T) {
// Sync only half of the scheduled nodes
results := make([]NodeSyncResult, len(elements)/2+1)
for i, element := range elements[:len(results)] {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -242,22 +266,25 @@ func TestIterativeDelayedSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go, however in a
// random order.
-func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) }
-func TestIterativeRandomSyncBatched(t *testing.T) { testIterativeRandomSync(t, 100) }
+func TestIterativeRandomSyncIndividual(t *testing.T) {
+ testIterativeRandomSync(t, 1, rawdb.HashScheme)
+ testIterativeRandomSync(t, 100, rawdb.HashScheme)
+ // testIterativeRandomSync(t, 1, rawdb.PathScheme)
+ // testIterativeRandomSync(t, 100, rawdb.PathScheme)
+}
-func testIterativeRandomSync(t *testing.T, count int) {
+func testIterativeRandomSync(t *testing.T, count int, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -275,7 +302,8 @@ func testIterativeRandomSync(t *testing.T, count int) {
// Fetch all the queued nodes in a random order
results := make([]NodeSyncResult, 0, len(queue))
for path, element := range queue {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -304,18 +332,22 @@ func testIterativeRandomSync(t *testing.T, count int) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned (Even those randomly), others sent only later.
func TestIterativeRandomDelayedSync(t *testing.T) {
+ testIterativeRandomDelayedSync(t, rawdb.HashScheme)
+ // testIterativeRandomDelayedSync(t, rawdb.PathScheme)
+}
+
+func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -333,7 +365,8 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
// Sync only half of the scheduled nodes, even those in random order
results := make([]NodeSyncResult, 0, len(queue)/2+1)
for path, element := range queue {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -367,18 +400,22 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that a trie sync will not request nodes multiple times, even if they
// have such references.
func TestDuplicateAvoidanceSync(t *testing.T) {
+ testDuplicateAvoidanceSync(t, rawdb.HashScheme)
+ // testDuplicateAvoidanceSync(t, rawdb.PathScheme)
+}
+
+func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -397,7 +434,8 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
for len(elements) > 0 {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -430,26 +468,33 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that at any point in time during a sync, only complete sub-tries are in
// the database.
-func TestIncompleteSync(t *testing.T) {
+func TestIncompleteSyncHash(t *testing.T) {
+ testIncompleteSync(t, rawdb.HashScheme)
+ // testIncompleteSync(t, rawdb.PathScheme)
+}
+
+func testIncompleteSync(t *testing.T, scheme string) {
+ t.Parallel()
+
// Create a random trie to copy
- srcDb, srcTrie, _ := makeTestTrie()
+ _, srcDb, srcTrie, _ := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
var (
- added []common.Hash
- elements []trieElement
- root = srcTrie.Hash()
+ addedKeys []string
+ addedHashes []common.Hash
+ elements []trieElement
+ root = srcTrie.Hash()
)
paths, nodes, _ := sched.Missing(1)
for i := 0; i < len(paths); i++ {
@@ -463,7 +508,8 @@ func TestIncompleteSync(t *testing.T) {
// Fetch a batch of trie nodes
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -484,11 +530,8 @@ func TestIncompleteSync(t *testing.T) {
for _, result := range results {
hash := crypto.Keccak256Hash(result.Data)
if hash != root {
- added = append(added, hash)
- }
- // Check that all known sub-tries in the synced trie are complete
- if err := checkTrieConsistency(triedb, hash); err != nil {
- t.Fatalf("trie inconsistent: %v", err)
+ addedKeys = append(addedKeys, result.Path)
+ addedHashes = append(addedHashes, crypto.Keccak256Hash(result.Data))
}
}
// Fetch the next batch to retrieve
@@ -503,25 +546,31 @@ func TestIncompleteSync(t *testing.T) {
}
}
// Sanity check that removing any node from the database is detected
- for _, hash := range added {
- value, _ := diskdb.Get(hash.Bytes())
- diskdb.Delete(hash.Bytes())
- if err := checkTrieConsistency(triedb, root); err == nil {
- t.Fatalf("trie inconsistency not caught, missing: %x", hash)
- }
- diskdb.Put(hash.Bytes(), value)
+ for i, path := range addedKeys {
+ owner, inner := ResolvePath([]byte(path))
+ nodeHash := addedHashes[i]
+ value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme)
+ rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme)
+ if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil {
+ t.Fatalf("trie inconsistency not caught, missing: %x", path)
+ }
+ rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme)
}
}
// Tests that trie nodes get scheduled lexicographically when having the same
// depth.
func TestSyncOrdering(t *testing.T) {
+ testSyncOrdering(t, rawdb.HashScheme)
+ // testSyncOrdering(t, rawdb.PathScheme)
+}
+
+func testSyncOrdering(t *testing.T, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler, tracking the requests
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -543,7 +592,8 @@ func TestSyncOrdering(t *testing.T) {
for len(elements) > 0 {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -572,7 +622,7 @@ func TestSyncOrdering(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
// Check that the trie nodes have been requested path-ordered
for i := 0; i < len(reqs)-1; i++ {
@@ -586,3 +636,116 @@ func TestSyncOrdering(t *testing.T) {
}
}
}
+
+func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database) {
+ // Create a destination trie and sync with the scheduler
+ sched := NewSync(root, db, nil, NewSyncBloom(1, db), srcDb.Scheme())
+
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(1)
+ var elements []trieElement
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
+ for len(elements) > 0 {
+ results := make([]NodeSyncResult, len(elements))
+ for i, element := range elements {
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(root).Node(owner, inner, element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
+ }
+ results[i] = NodeSyncResult{element.path, data}
+ }
+ for index, result := range results {
+ if err := sched.ProcessNode(result); err != nil {
+ t.Fatalf("failed to process result[%d][%v] data %v %v", index, []byte(result.Path), result.Data, err)
+ }
+ }
+ batch := db.NewBatch()
+ if err := sched.Commit(batch); err != nil {
+ t.Fatalf("failed to commit data: %v", err)
+ }
+ batch.Write()
+
+ paths, nodes, _ = sched.Missing(1)
+ elements = elements[:0]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
+ }
+}
+
+// Tests that the syncing target is keeping moving which may overwrite the stale
+// states synced in the last cycle.
+func TestSyncMovingTarget(t *testing.T) {
+ testSyncMovingTarget(t, rawdb.HashScheme)
+ // testSyncMovingTarget(t, rawdb.PathScheme)
+}
+
+func testSyncMovingTarget(t *testing.T, scheme string) {
+ // Create a random trie to copy
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
+
+ // Create a destination trie and sync with the scheduler
+ diskdb := rawdb.NewMemoryDatabase()
+ syncWith(t, srcTrie.Hash(), diskdb, srcDb)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+
+ // Push more modifications into the src trie, to see if dest trie can still
+ // sync with it(overwrite stale states)
+ var (
+ preRoot = srcTrie.Hash()
+ diff = make(map[string][]byte)
+ )
+ for i := byte(0); i < 10; i++ {
+ key, val := randBytes(32), randBytes(32)
+ srcTrie.Update(key, val)
+ diff[string(key)] = val
+ }
+ root, nodes, _ := srcTrie.Commit(false)
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ panic(err)
+ }
+ if err := srcDb.Commit(root, false); err != nil {
+ panic(err)
+ }
+ preRoot = root
+ srcTrie, _ = NewSecure(TrieID(root), srcDb)
+
+ syncWith(t, srcTrie.Hash(), diskdb, srcDb)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff)
+
+ // Revert added modifications from the src trie, to see if dest trie can still
+ // sync with it(overwrite reverted states)
+ var reverted = make(map[string][]byte)
+ for k := range diff {
+ srcTrie.Delete([]byte(k))
+ reverted[k] = nil
+ }
+ for k := range srcData {
+ val := randBytes(32)
+ srcTrie.Update([]byte(k), val)
+ reverted[k] = val
+ }
+ root, nodes, _ = srcTrie.Commit(false)
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ panic(err)
+ }
+ if err := srcDb.Commit(root, false); err != nil {
+ panic(err)
+ }
+ srcTrie, _ = NewSecure(TrieID(root), srcDb)
+
+ syncWith(t, srcTrie.Hash(), diskdb, srcDb)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted)
+}
diff --git a/trie/tracer.go b/trie/tracer.go
index cd5ebb85a2..796b792afc 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -16,6 +16,11 @@
package trie
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
// tracer tracks the changes of trie nodes. During the trie operations,
// some nodes can be deleted from the trie, while these deleted nodes
// won't be captured by trie.Hasher or trie.Commiter. Thus, these deleted
@@ -110,7 +115,7 @@ func (t *tracer) copy() *tracer {
}
// markDeletions puts all tracked deletions into the provided nodeset.
-func (t *tracer) markDeletions(set *NodeSet) {
+func (t *tracer) markDeletions(set *trienode.NodeSet) {
for path := range t.deletes {
// It's possible a few deleted nodes were embedded
// in their parent before, the deletions can be no
@@ -119,6 +124,6 @@ func (t *tracer) markDeletions(set *NodeSet) {
if !ok {
continue
}
- set.addNode([]byte(path), &nodeWithPrev{&memoryNode{}, prev})
+ set.AddNode([]byte(path), trienode.NewNodeWithPrev(common.Hash{}, nil, prev))
}
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index f8511a5e67..2421d88202 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -22,6 +22,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -69,7 +71,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -135,7 +137,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -143,13 +145,14 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Update trie
+ parent := root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, val := range vals {
trie.Update([]byte(val.k), randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -157,6 +160,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Add more new nodes
+ parent = root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
var keys []string
@@ -166,7 +170,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update(key, randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -174,13 +178,14 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Partial deletions
+ parent = root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, key := range keys {
trie.Update([]byte(key), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -188,13 +193,14 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Delete all
+ parent = root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, val := range vals {
trie.Update([]byte(val.k), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -213,7 +219,7 @@ func TestAccessListLeak(t *testing.T) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
var cases = []struct {
op func(tr *Trie)
@@ -263,15 +269,16 @@ func TestTinyTree(t *testing.T) {
trie.Update([]byte(val.k), randBytes(32))
}
root, set, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(set))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
+ parent := root
trie, _ = New(TrieID(root), db)
orig := trie.Copy()
for _, val := range tiny {
trie.Update([]byte(val.k), []byte(val.v))
}
root, set, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(set))
+ db.Update(root, parent, trienode.NewWithNodeSet(set))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
diff --git a/trie/trie.go b/trie/trie.go
index bbfb0b662f..ae00e542e5 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -582,9 +583,9 @@ func (t *Trie) Hash() common.Hash {
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
-func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
+func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
defer t.tracer.reset()
- nodes := NewNodeSet(t.owner)
+ nodes := trienode.NewNodeSet(t.owner)
t.tracer.markDeletions(nodes)
if t.root == nil {
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 1f3a2b8982..58a9f7ed86 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -32,9 +32,9 @@ type Reader interface {
// NodeReader wraps all the necessary functions for accessing trie node.
type NodeReader interface {
- // GetReader returns a reader for accessing all trie nodes with provided
+ // Reader returns a reader for accessing all trie nodes with provided
// state root. Nil is returned in case the state is not available.
- GetReader(root common.Hash) Reader
+ Reader(root common.Hash) Reader
}
// trieReader is a wrapper of the underlying node reader. It's not safe
@@ -47,7 +47,7 @@ type trieReader struct {
// newTrieReader initializes the trie reader with the given node reader.
func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
- reader := db.GetReader(stateRoot)
+ reader := db.Reader(stateRoot)
if reader == nil {
return nil, fmt.Errorf("state not found #%x", stateRoot)
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 499f0574df..0223466222 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -80,20 +81,24 @@ func TestMissingRoot(t *testing.T) {
}
}
-func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) }
-func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) }
+func TestMissingNode(t *testing.T) {
+ testMissingNode(t, false, rawdb.HashScheme)
+ //testMissingNode(t, false, rawdb.PathScheme)
+ testMissingNode(t, true, rawdb.HashScheme)
+ //testMissingNode(t, true, rawdb.PathScheme)
+}
-func testMissingNode(t *testing.T, memonly bool) {
+func testMissingNode(t *testing.T, memonly bool, scheme string) {
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
+ triedb := newTestDatabase(diskdb, scheme)
trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, nodes, _ := trie.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(root, true, nil)
+ triedb.Commit(root, true)
}
trie, _ = New(TrieID(root), triedb)
@@ -122,34 +127,39 @@ func testMissingNode(t *testing.T, memonly bool) {
t.Errorf("Unexpected error: %v", err)
}
- hash := common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
+ var (
+ path []byte
+ hash = common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
+ )
+ for p, n := range nodes.Nodes {
+ if n.Hash == hash {
+ path = common.CopyBytes([]byte(p))
+ break
+ }
+ }
+ trie, _ = New(TrieID(root), triedb)
if memonly {
- delete(triedb.dirties, hash)
+ trie.reader.banned = map[string]struct{}{string(path): {}}
} else {
- diskdb.Delete(hash[:])
+ rawdb.DeleteTrieNode(diskdb, common.Hash{}, path, hash, scheme)
}
- trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
err = trie.TryDelete([]byte("123456"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
@@ -204,7 +214,7 @@ func TestGet(t *testing.T) {
return
}
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
}
}
@@ -261,8 +271,8 @@ func TestEmptyValues(t *testing.T) {
}
func TestReplication(t *testing.T) {
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
- trie := NewEmpty(triedb)
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -275,16 +285,16 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- exp, nodes, err := trie.Commit(false)
+ root, nodes, err := trie.Commit(false)
if err != nil {
t.Fatalf("commit error: %v", err)
}
- triedb.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work.
- trie2, err := New(TrieID(exp), triedb)
+ trie2, err := New(TrieID(root), db)
if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", exp, err)
+ t.Fatalf("can't recreate trie at %x: %v", root, err)
}
for _, kv := range vals {
if string(getString(trie2, kv.k)) != kv.v {
@@ -295,16 +305,16 @@ func TestReplication(t *testing.T) {
if err != nil {
t.Fatalf("commit error: %v", err)
}
- if hash != exp {
- t.Errorf("root failure. expected %x got %x", exp, hash)
+ if hash != root {
+ t.Errorf("root failure. expected %x got %x", root, hash)
}
// recreate the trie after commit
if nodes != nil {
- triedb.Update(NewWithNodeSet(nodes))
+ db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
- trie2, err = New(TrieID(hash), triedb)
+ trie2, err = New(TrieID(hash), db)
if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", exp, err)
+ t.Fatalf("can't recreate trie at %x: %v", root, err)
}
// perform some insertions on the new trie.
@@ -322,8 +332,8 @@ func TestReplication(t *testing.T) {
for _, val := range vals2 {
updateString(trie2, val.k, val.v)
}
- if hash := trie2.Hash(); hash != exp {
- t.Errorf("root failure. expected %x got %x", exp, hash)
+ if trie2.Hash() != hash {
+ t.Errorf("root failure. expected %x got %x", hash, hash)
}
}
@@ -421,42 +431,42 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
}
// verifyAccessList verifies the access list of the new trie against the old trie.
-func verifyAccessList(old *Trie, new *Trie, set *NodeSet) error {
+func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
deletes, inserts, updates := diffTries(old, new)
// Check insertion set
for path := range inserts {
- n, ok := set.nodes[path]
- if !ok || n.isDeleted() {
+ n, ok := set.Nodes[path]
+ if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- if len(n.prev) > 0 {
+ if len(n.Prev) > 0 {
return errors.New("unexpected origin value")
}
}
// Check deletion set
for path, blob := range deletes {
- n, ok := set.nodes[path]
- if !ok || !n.isDeleted() {
+ n, ok := set.Nodes[path]
+ if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- if len(n.prev) == 0 {
+ if len(n.Prev) == 0 {
return errors.New("expect origin value")
}
- if !bytes.Equal(n.prev, blob) {
+ if !bytes.Equal(n.Prev, blob) {
return errors.New("invalid origin value")
}
}
// Check update set
for path, blob := range updates {
- n, ok := set.nodes[path]
- if !ok || n.isDeleted() {
+ n, ok := set.Nodes[path]
+ if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- if len(n.prev) == 0 {
+ if len(n.Prev) == 0 {
return errors.New("expect origin value")
}
- if !bytes.Equal(n.prev, blob) {
+ if !bytes.Equal(n.Prev, blob) {
return errors.New("invalid origin value")
}
}
@@ -464,8 +474,13 @@ func verifyAccessList(old *Trie, new *Trie, set *NodeSet) error {
}
func runRandTest(rt randTest) bool {
+ var scheme = rawdb.HashScheme
+ //if rand.Intn(2) == 0 {
+ // scheme = rawdb.PathScheme
+ //}
var (
- triedb = NewDatabase(rawdb.NewMemoryDatabase())
+ origin = types.EmptyRootHash
+ triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
tr = NewEmpty(triedb)
origTrie = NewEmpty(triedb)
values = make(map[string]string) // tracks content of the trie
@@ -512,7 +527,7 @@ func runRandTest(rt randTest) bool {
return false
}
if nodes != nil {
- triedb.Update(NewWithNodeSet(nodes))
+ triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -531,6 +546,7 @@ func runRandTest(rt randTest) bool {
tr.tracer = newTracer()
tr.resolveAndTrack(root.Bytes(), nil)
origTrie = tr.Copy()
+ origin = root
case opItercheckhash:
checktr := NewEmpty(triedb)
@@ -787,42 +803,30 @@ func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
// to check whether changes to the trie modifies the write order or data in any way.
func TestCommitSequence(t *testing.T) {
for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- expCallbackSeqHash []byte
+ count int
+ expWriteSeqHash []byte
}{
- {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"),
- common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")},
- {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"),
- common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")},
- {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"),
- common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")},
+ {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066")},
+ {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e")},
+ {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7")},
} {
addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
- // Another sponge is used to check the callback-sequence
- callbackSponge := sha3.NewLegacyKeccak256()
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, func(c common.Hash) {
- // And spongify the callback-order
- callbackSponge.Write(c[:])
- })
+ db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
}
- if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
- t.Errorf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
- }
}
}
@@ -830,24 +834,18 @@ func TestCommitSequence(t *testing.T) {
// but uses random blobs instead of 'accounts'
func TestCommitSequenceRandomBlobs(t *testing.T) {
for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- expCallbackSeqHash []byte
+ count int
+ expWriteSeqHash []byte
}{
- {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc"),
- common.FromHex("450238d73bc36dc6cc6f926987e5428535e64be403877c4560e238a52749ba24")},
- {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554"),
- common.FromHex("0ace0b03d6cb8c0b82f6289ef5b1a1838306b455a62dafc63cada8e2924f2550")},
- {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424"),
- common.FromHex("117d30dafaa62a1eed498c3dfd70982b377ba2b46dd3e725ed6120c80829e518")},
+ {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc")},
+ {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554")},
+ {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424")},
} {
prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
- // Another sponge is used to check the callback-sequence
- callbackSponge := sha3.NewLegacyKeccak256()
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
key := make([]byte, 32)
@@ -864,18 +862,12 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, func(c common.Hash) {
- // And spongify the callback-order
- callbackSponge.Write(c[:])
- })
+ db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
}
- if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
- t.Fatalf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
- }
}
}
@@ -910,9 +902,9 @@ func TestCommitSequenceStackTrie(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, nil)
+ db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
if err != nil {
@@ -959,9 +951,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, nil)
+ db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
if err != nil {
@@ -1130,8 +1122,8 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
- _, nodes, _ := trie.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
diff --git a/trie/database.go b/trie/triedb/hashdb/database.go
similarity index 81%
rename from trie/database.go
rename to trie/triedb/hashdb/database.go
index bd9d97d50b..096cd632d5 100644
--- a/trie/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -14,12 +14,11 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package trie
+package hashdb
import (
"errors"
"reflect"
- "runtime"
"sync"
"time"
@@ -31,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -57,10 +57,10 @@ var (
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
)
-// childResolver defines the required method to decode the provided
+// ChildResolver defines the required method to decode the provided
// trie node and iterate the children on top.
-type childResolver interface {
- forEach(node []byte, onChild func(common.Hash))
+type ChildResolver interface {
+ ForEach(node []byte, onChild func(common.Hash))
}
// Database is an intermediate write layer between the trie data structures and
@@ -73,7 +73,7 @@ type childResolver interface {
// servers even while the trie is executing expensive garbage collection.
type Database struct {
diskdb ethdb.Database // Persistent storage for matured trie nodes
- resolver childResolver // Resolver for trie node children
+ resolver ChildResolver // Resolver for trie node children
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
@@ -90,7 +90,6 @@ type Database struct {
dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata)
childrenSize common.StorageSize // Storage size of the external children tracking
- preimages *preimageStore // Store for caching preimages of trie nodes
lock sync.RWMutex
}
@@ -112,11 +111,11 @@ var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
// forChildren invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
// from outside the node.
-func (n *cachedNode) forChildren(resolver childResolver, onChild func(hash common.Hash)) {
+func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) {
for child := range n.external {
onChild(child)
}
- resolver.forEach(n.node, onChild)
+ resolver.ForEach(n.node, onChild)
}
// Config defines all necessary options for database.
@@ -126,37 +125,14 @@ type Config struct {
Preimages bool // Flag whether the preimage of trie key is recorded
}
-// NewDatabase creates a new trie database to store ephemeral trie content before
-// its written out to disk or garbage collected. No read cache is created, so all
-// data retrievals will hit the underlying disk database.
-// Using ethdb.Database which covers KeyValueStore and Freezer Interfaces.
-func NewDatabase(diskdb ethdb.Database) *Database {
- return NewDatabaseWithConfig(diskdb, nil)
-}
-
-// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content
-// before its written out to disk or garbage collected. It also acts as a read cache
-// for nodes loaded from disk.
-func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
- var cleans *fastcache.Cache
- if config != nil && config.Cache > 0 {
- if config.Journal == "" {
- cleans = fastcache.New(config.Cache * 1024 * 1024)
- } else {
- cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
- }
- }
- var preimage *preimageStore
- if config != nil && config.Preimages {
- preimage = newPreimageStore(diskdb)
- }
+// New initializes the hash-based node database.
+func New(diskdb ethdb.Database, cleans *fastcache.Cache, resolver ChildResolver) *Database {
db := &Database{
- diskdb: diskdb,
- resolver: mptResolver{},
- cleans: cleans,
- dirties: make(map[common.Hash]*cachedNode),
- preimages: preimage,
+ diskdb: diskdb,
+ resolver: resolver,
+ cleans: cleans,
+ dirties: make(map[common.Hash]*cachedNode),
}
return db
}
@@ -376,12 +352,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
size += db.childrenSize
- // If the preimage cache got large enough, push to disk. If it's still small
- // leave for later to deduplicate writes.
- if db.preimages != nil {
- db.preimages.commit(false)
- }
-
// Keep committing nodes from the flush-list until we're below allowance
oldest := db.oldest
for size > limit && oldest != (common.Hash{}) {
@@ -448,7 +418,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
//
// Note, this method is a non-synchronized mutator. It is unsafe to call this
// concurrently with other mutators.
-func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error {
+func (db *Database) Commit(node common.Hash, report bool) error {
// Create a database batch to flush persistent data out. It is important that
// outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured
@@ -456,15 +426,11 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
start := time.Now()
batch := db.diskdb.NewBatch()
- // Move all of the accumulated preimages into a write batch
- if db.preimages != nil {
- db.preimages.commit(true)
- }
// Move the trie itself into the batch, flushing if enough data is accumulated
nodes, storage := len(db.dirties), db.dirtiesSize
uncacher := &cleaner{db}
- if err := db.commit(node, batch, uncacher, callback); err != nil {
+ if err := db.commit(node, batch, uncacher); err != nil {
log.Error("Failed to commit trie from trie database", "err", err)
return err
}
@@ -501,7 +467,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
}
// commit is the private locked version of Commit.
-func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner, callback func(common.Hash)) error {
+func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
// If the node does not exist, it's a previously committed node
node, ok := db.dirties[hash]
if !ok {
@@ -512,7 +478,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
// Dereference all children and delete the node
node.forChildren(db.resolver, func(child common.Hash) {
if err == nil {
- err = db.commit(child, batch, uncacher, callback)
+ err = db.commit(child, batch, uncacher)
}
})
if err != nil {
@@ -520,9 +486,6 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
}
// If we've reached an optimal batch size, commit and start over
rawdb.WriteLegacyTrieNode(batch, hash, node.node)
- if callback != nil {
- callback(hash)
- }
if batch.ValueSize() >= ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
@@ -588,9 +551,23 @@ func (c *cleaner) Delete(key []byte) error {
panic("not implemented")
}
-// Update inserts the dirty nodes in the provided nodeset into database and
-// link the account trie with multiple storage tries if necessary.
-func (db *Database) Update(nodes *MergedNodeSet) error {
+// Initialized returns an indicator if state data is already initialized
+// in hash-based scheme by checking the presence of genesis state.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
+}
+
+// Update inserts the dirty nodes in provided nodeset into database and link the
+// account trie with multiple storage tries if necessary.
+//
+// root and parent are used for path-based only
+func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+ // Ensure the parent state is present and signal a warning if not.
+ if parent != types.EmptyRootHash {
+ if blob, _ := db.Node(parent); len(blob) == 0 {
+ log.Error("parent state is not present")
+ }
+ }
db.lock.Lock()
defer db.lock.Unlock()
// Insert dirty nodes into the database. In the same tree, it must be
@@ -600,44 +577,47 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
// Note, the storage tries must be flushed before the account trie to
// retain the invariant that children go into the dirty cache first.
var order []common.Hash
- for owner := range nodes.sets {
+ for owner := range nodes.Sets {
if owner == (common.Hash{}) {
continue
}
order = append(order, owner)
}
- if _, ok := nodes.sets[common.Hash{}]; ok {
+ if _, ok := nodes.Sets[common.Hash{}]; ok {
order = append(order, common.Hash{})
}
for _, owner := range order {
- subset := nodes.sets[owner]
- subset.forEachWithOrder(func(path string, n *memoryNode) {
- if n.isDeleted() {
+ subset := nodes.Sets[owner]
+ subset.ForEachWithOrder(func(path string, n *trienode.Node) {
+ if n.IsDeleted() {
return // ignore deletion
}
- db.insert(n.hash, n.node)
+ db.insert(n.Hash, n.Blob)
})
}
// Link up the account trie and storage trie if the node points
// to an account trie leaf.
- if set, present := nodes.sets[common.Hash{}]; present {
- for _, leaf := range set.leaves {
+ if set, present := nodes.Sets[common.Hash{}]; present {
+ for _, leaf := range set.Leaves {
// Looping node leaf, then reference the leaf node to the root node
var account types.StateAccount
- if err := rlp.DecodeBytes(leaf.blob, &account); err != nil {
+ if err := rlp.DecodeBytes(leaf.Blob, &account); err != nil {
return err
}
- if account.Root != emptyRoot {
- db.reference(account.Root, leaf.parent)
+ if account.Root != types.EmptyRootHash {
+ db.reference(account.Root, leaf.Parent)
}
}
}
return nil
}
+// Close closes the trie database and releases all held resources.
+func (db *Database) Close() error { return nil }
+
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
-func (db *Database) Size() (common.StorageSize, common.StorageSize) {
+func (db *Database) Size() common.StorageSize {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -645,76 +625,27 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
// the total memory consumption, the maintenance metadata is also needed to be
// counted.
var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
- var preimageSize common.StorageSize
- if db.preimages != nil {
- preimageSize = db.preimages.size()
- }
- return db.dirtiesSize + db.childrenSize + metadataSize, preimageSize
+ return db.dirtiesSize + db.childrenSize + metadataSize
}
-// GetReader retrieves a node reader belonging to the given state root.
-func (db *Database) GetReader(root common.Hash) Reader {
- return newHashReader(db)
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return rawdb.HashScheme
}
-// hashReader is reader of hashDatabase which implements the Reader interface.
-type hashReader struct {
- db *Database
+// Reader retrieves a node reader belonging to the given state root.
+func (db *Database) Reader(root common.Hash) *reader {
+ return &reader{db: db}
}
-// newHashReader initializes the hash reader.
-func newHashReader(db *Database) *hashReader {
- return &hashReader{db: db}
+// reader is a state reader of Database which implements the Reader interface.
+type reader struct {
+ db *Database
}
-// Node retrieves the RLP-encoded trie node blob with the given node hash.
+// Node retrieves the trie node with the given node hash.
// No error will be returned if the node is not found.
-func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
+func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
blob, _ := reader.db.Node(hash)
return blob, nil
}
-
-// saveCache saves clean state cache to given directory path
-// using specified CPU cores.
-func (db *Database) saveCache(dir string, threads int) error {
- if db.cleans == nil {
- return nil
- }
- log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
-
- start := time.Now()
- err := db.cleans.SaveToFileConcurrent(dir, threads)
- if err != nil {
- log.Error("Failed to persist clean trie cache", "error", err)
- return err
- }
- log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
- return nil
-}
-
-// SaveCache atomically saves fast cache data to the given dir using all
-// available CPU cores.
-func (db *Database) SaveCache(dir string) error {
- return db.saveCache(dir, runtime.GOMAXPROCS(0))
-}
-
-// SaveCachePeriodically atomically saves fast cache data to the given dir with
-// the specified interval. All dump operation will only use a single CPU core.
-func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- db.saveCache(dir, 1)
- case <-stopCh:
- return
- }
- }
-}
-
-// Scheme returns the node scheme used in the database. Right now, we only support hash scheme.
-func (db *Database) Scheme() string {
- return rawdb.HashScheme
-}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
new file mode 100644
index 0000000000..63a8493140
--- /dev/null
+++ b/trie/trienode/node.go
@@ -0,0 +1,195 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package trienode
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Node is a wrapper which contains the encoded blob of the trie node and its
+// unique hash identifier. It is general enough that can be used to represent
+// trie nodes corresponding to different trie implementations.
+type Node struct {
+ Hash common.Hash // Node hash, empty for deleted node
+ Blob []byte // Encoded node blob, nil for the deleted node
+}
+
+// Size returns the total memory size used by this node.
+func (n *Node) Size() int {
+ return len(n.Blob) + common.HashLength
+}
+
+// IsDeleted returns the indicator if the node is marked as deleted.
+func (n *Node) IsDeleted() bool {
+ return n.Hash == (common.Hash{})
+}
+
+// NodeWithPrev wraps the Node with the previous node value attached.
+type NodeWithPrev struct {
+ *Node
+ Prev []byte // Encoded original value, nil means it's non-existent
+}
+
+// Unwrap returns the internal Node object.
+func (n *NodeWithPrev) Unwrap() *Node {
+ return n.Node
+}
+
+// Size returns the total memory size used by this node. It overloads
+// the function in Node by counting the size of previous value as well.
+func (n *NodeWithPrev) Size() int {
+ return n.Node.Size() + len(n.Prev)
+}
+
+// New constructs a node with provided node information.
+func New(hash common.Hash, blob []byte) *Node {
+ return &Node{Hash: hash, Blob: blob}
+}
+
+// NewNodeWithPrev constructs a node with provided node information.
+func NewNodeWithPrev(hash common.Hash, blob []byte, prev []byte) *NodeWithPrev {
+ return &NodeWithPrev{
+ Node: New(hash, blob),
+ Prev: prev,
+ }
+}
+
+// leaf represents a trie leaf node
+type leaf struct {
+ Blob []byte // raw blob of leaf
+ Parent common.Hash // the hash of parent node
+}
+
+// NodeSet contains a set of nodes collected during the commit operation.
+// Each node is keyed by path. It's not thread-safe to use.
+type NodeSet struct {
+ Owner common.Hash
+ Leaves []*leaf
+ Nodes map[string]*NodeWithPrev
+ updates int // the count of updated and inserted nodes
+ deletes int // the count of deleted nodes
+}
+
+// NewNodeSet initializes a node set. The owner is zero for the account trie and
+// the owning account address hash for storage tries.
+func NewNodeSet(owner common.Hash) *NodeSet {
+ return &NodeSet{
+ Owner: owner,
+ Nodes: make(map[string]*NodeWithPrev),
+ }
+}
+
+// ForEachWithOrder iterates the nodes with the order from bottom to top,
+// right to left, nodes with the longest path will be iterated first.
+func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
+ var paths sort.StringSlice
+ for path := range set.Nodes {
+ paths = append(paths, path)
+ }
+ // Bottom-up, longest path first
+ sort.Sort(sort.Reverse(paths))
+ for _, path := range paths {
+ callback(path, set.Nodes[path].Unwrap())
+ }
+}
+
+// AddNode adds the provided node into set.
+func (set *NodeSet) AddNode(path []byte, n *NodeWithPrev) {
+ if n.IsDeleted() {
+ set.deletes += 1
+ } else {
+ set.updates += 1
+ }
+ set.Nodes[string(path)] = n
+}
+
+// AddLeaf adds the provided leaf node into set.
+func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
+ set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent})
+}
+
+// Size returns the number of dirty nodes in set.
+func (set *NodeSet) Size() (int, int) {
+ return set.updates, set.deletes
+}
+
+// Hashes returns the hashes of all updated nodes.
+func (set *NodeSet) Hashes() []common.Hash {
+ var ret []common.Hash
+ for _, node := range set.Nodes {
+ ret = append(ret, node.Hash)
+ }
+ return ret
+}
+
+// Summary returns a string-representation of the NodeSet.
+func (set *NodeSet) Summary() string {
+ var out = new(strings.Builder)
+ fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
+ if set.Nodes != nil {
+ for path, n := range set.Nodes {
+ // Deletion
+ if n.IsDeleted() {
+ fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev)
+ continue
+ }
+ // Insertion
+ if len(n.Prev) == 0 {
+ fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash)
+ continue
+ }
+ // Update
+ fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev)
+ }
+ }
+ for _, n := range set.Leaves {
+ fmt.Fprintf(out, "[leaf]: %v\n", n)
+ }
+ return out.String()
+}
+
+// MergedNodeSet represents a merged node set for a group of tries.
+type MergedNodeSet struct {
+ Sets map[common.Hash]*NodeSet
+}
+
+// NewMergedNodeSet initializes an empty merged set.
+func NewMergedNodeSet() *MergedNodeSet {
+ return &MergedNodeSet{Sets: make(map[common.Hash]*NodeSet)}
+}
+
+// NewWithNodeSet constructs a merged nodeset with the provided single set.
+func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
+ merged := NewMergedNodeSet()
+ merged.Merge(set)
+ return merged
+}
+
+// Merge merges the provided dirty nodes of a trie into the set. The assumption
+// is held that no duplicated set belonging to the same trie will be merged twice.
+func (set *MergedNodeSet) Merge(other *NodeSet) error {
+ _, present := set.Sets[other.Owner]
+ if present {
+ return fmt.Errorf("duplicate trie for owner %#x", other.Owner)
+ }
+ set.Sets[other.Owner] = other
+ return nil
+}
From 1287e483885090071865048a030b9ab6b12d4147 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Thu, 3 Oct 2024 15:01:46 +0700
Subject: [PATCH 20/41] trie, core: track state changes in statedb (#589)
* trie: triestate/Set to track changes
* core/state: track state changes
journal.go: in resetObjectChange
- add account in resetObjectChange (ref https://github.com/ethereum/go-ethereum/pull/27339)
- add prevAccount and prevStorage (ref https://github.com/ethereum/go-ethereum/pull/27376)
- add prevAccountOrigin and prevStorageOrigin to track changes
state_object.go: add origin for tracking the original StateAccount before change
statedb.go:
- add accountsOrigin and storagesOrigin, same functions as above
- stateObjectsDestruct now track the previous state before destruct
- add functions for handle destructing old states
* all: apply changes to tests
---
core/state/dump.go | 2 +-
core/state/journal.go | 23 +-
core/state/metrics.go | 7 +
core/state/snapshot/generate.go | 2 +-
core/state/snapshot/generate_test.go | 2 +-
core/state/state_object.go | 106 ++++---
core/state/state_test.go | 14 +-
core/state/statedb.go | 400 +++++++++++++++++++------
core/state/statedb_fuzz_test.go | 362 ++++++++++++++++++++++
core/state/statedb_test.go | 2 +-
core/types/state_account.go | 26 ++
eth/protocols/snap/sync_test.go | 8 +-
light/postprocess.go | 4 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 2 +-
tests/fuzzers/trie/trie-fuzzer.go | 2 +-
trie/{database_wrap.go => database.go} | 9 +-
trie/iterator_test.go | 18 +-
trie/sync_test.go | 6 +-
trie/tracer_test.go | 18 +-
trie/trie_test.go | 20 +-
trie/trienode/node.go | 24 +-
trie/triestate/state.go | 28 ++
22 files changed, 900 insertions(+), 185 deletions(-)
create mode 100644 core/state/statedb_fuzz_test.go
rename trie/{database_wrap.go => database.go} (97%)
create mode 100644 trie/triestate/state.go
diff --git a/core/state/dump.go b/core/state/dump.go
index bfcc035435..320809492d 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -162,7 +162,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
account.SecureKey = it.Key
}
addr := common.BytesToAddress(addrBytes)
- obj := newObject(s, addr, data)
+ obj := newObject(s, addr, &data)
if !conf.SkipCode {
account.Code = obj.Code(s.db)
}
diff --git a/core/state/journal.go b/core/state/journal.go
index 20f18fb981..44ae7faf67 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -90,8 +90,18 @@ type (
account *common.Address
}
resetObjectChange struct {
+ account *common.Address
prev *stateObject
prevdestruct bool
+
+ // tracking previous states of accounts and storages in snapshot, before each transaction
+ prevAccount []byte
+ prevStorage map[common.Hash][]byte
+
+ // tracking previous states of accounts and storages in trie, before each commit
+ prevAccountOriginExist bool
+ prevAccountOrigin []byte
+ prevStorageOrigin map[common.Hash][]byte
}
selfDestructChange struct {
account *common.Address
@@ -157,12 +167,21 @@ func (ch createObjectChange) dirtied() *common.Address {
func (ch resetObjectChange) revert(s *StateDB) {
s.setStateObject(ch.prev)
if !ch.prevdestruct && s.snap != nil {
- delete(s.snapDestructs, ch.prev.addrHash)
+ delete(s.stateObjectsDestruct, ch.prev.address)
+ }
+ if ch.prevAccountOriginExist {
+ s.accountsOrigin[ch.prev.addrHash] = ch.prevAccountOrigin
+ }
+ if ch.prevAccount != nil {
+ s.accounts[ch.prev.addrHash] = ch.prevAccount
+ }
+ if ch.prevStorage != nil {
+ s.storages[ch.prev.addrHash] = ch.prevStorage
}
}
func (ch resetObjectChange) dirtied() *common.Address {
- return nil
+ return ch.account
}
func (ch selfDestructChange) revert(s *StateDB) {
diff --git a/core/state/metrics.go b/core/state/metrics.go
index e702ef3a81..64c651461e 100644
--- a/core/state/metrics.go
+++ b/core/state/metrics.go
@@ -27,4 +27,11 @@ var (
storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil)
accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil)
storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil)
+
+ slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil)
+ slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil)
+ slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil)
+ slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil)
+ slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil)
+ slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil)
)
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 065402d52f..cc5864a10c 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -440,7 +440,7 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
}
root, nodes, _ := snapTrie.Commit(false)
if nodes != nil {
- snapTrieDb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ snapTrieDb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
}
snapTrieDb.Commit(root, false)
}
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 51138bbc19..65ec9ee66f 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -198,7 +198,7 @@ func (t *testHelper) Commit() common.Hash {
if nodes != nil {
t.nodes.Merge(nodes)
}
- t.triedb.Update(root, types.EmptyRootHash, t.nodes)
+ t.triedb.Update(root, types.EmptyRootHash, t.nodes, nil)
t.triedb.Commit(root, false)
return root
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 3adcff01e9..0abc03d7fc 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -62,13 +62,14 @@ func (s Storage) Copy() Storage {
//
// The usage pattern is as follows:
// First you need to obtain a state object.
-// Account values can be accessed and modified through the object.
-// Finally, call CommitTrie to write the modified storage trie into a database.
+// Account values as well as storages can be accessed and modified through the object.
+// Finally, call commit to return the changes of storage trie and update account data.
type stateObject struct {
- address common.Address
- addrHash common.Hash // hash of ethereum address of the account
- data types.StateAccount
+ address common.Address // address of the account
+ addrHash common.Hash // hash of ethereum address of the account
+ data types.StateAccount // Account data with all mutations applied in the scope of block
db *StateDB
+ origin *types.StateAccount // Account original data without any change applied, nil means it was not existent before
// DB error.
// State objects are used by the consensus core and VM which are
@@ -81,17 +82,22 @@ type stateObject struct {
trie Trie // storage trie, which becomes non-nil on first access
code Code // contract bytecode, which gets set when code is loaded
- originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction
+ originStorage Storage // Storage cache of original entries to dedup rewrites
pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block
- dirtyStorage Storage // Storage entries that have been modified in the current transaction execution
+ dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction
fakeStorage Storage // Fake storage which constructed by caller for debugging purpose.
// Cache flags.
- // When an object is marked self-destructed it will be delete from the trie
- // during the "update" phase of the state transition.
- dirtyCode bool // true if the code was updated
+ dirtyCode bool // true if the code was updated
+
+ // Flag whether the account was marked as selfDestructed. The selfDestructed account
+ // is still accessible in the scope of same transaction.
selfDestructed bool
- deleted bool
+
+ // Flag whether the account was marked as deleted. The selfDestructed account
+ // or the account is considered as empty will be marked as deleted at
+ // the end of transaction and no longer accessible anymore.
+ deleted bool
// Flag whether the object was created in the current transaction
created bool
@@ -103,21 +109,19 @@ func (s *stateObject) empty() bool {
}
// newObject creates a state object.
-func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject {
- if data.Balance == nil {
- data.Balance = new(big.Int)
- }
- if data.CodeHash == nil {
- data.CodeHash = emptyCodeHash
- }
- if data.Root == (common.Hash{}) {
- data.Root = emptyRoot
+func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject {
+ // origin is supposed to not be changed directly but only be reassigned with the current state when committing
+ // so it's safe to use pointer here.
+ origin := acct
+ if acct == nil {
+ acct = types.NewEmptyStateAccount()
}
return &stateObject{
db: db,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
- data: data,
+ origin: origin,
+ data: *acct,
originStorage: make(Storage),
pendingStorage: make(Storage),
dirtyStorage: make(Storage),
@@ -227,7 +231,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
// 1) resurrect happened, and new slot values were set -- those should
// have been handles via pendingStorage above.
// 2) we don't have new values, and can deliver empty response back
- if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
+ if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed {
return common.Hash{}
}
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
@@ -334,7 +338,10 @@ func (s *stateObject) updateTrie(db Database) Trie {
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
}
// The snapshot storage map for the object
- var storage map[common.Hash][]byte
+ var (
+ storage map[common.Hash][]byte
+ origin map[common.Hash][]byte
+ )
// Insert all the pending updates into the trie
tr := s.getTrie(db)
hasher := s.db.hasher
@@ -345,6 +352,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
if value == s.originStorage[key] {
continue
}
+ prev := s.originStorage[key]
s.originStorage[key] = value
var v []byte
@@ -357,17 +365,35 @@ func (s *stateObject) updateTrie(db Database) Trie {
s.setError(tr.TryUpdate(key[:], v))
s.db.StorageUpdated += 1
}
- // If state snapshotting is active, cache the data til commit
- if s.db.snap != nil {
- if storage == nil {
- // Retrieve the old storage map, if available, create a new one otherwise
- if storage = s.db.snapStorage[s.addrHash]; storage == nil {
- storage = make(map[common.Hash][]byte)
- s.db.snapStorage[s.addrHash] = storage
- }
+ // Cache the mutated storage slots until commit
+ if storage == nil {
+ // Retrieve the old storage map, if available, create a new one otherwise
+ if storage = s.db.storages[s.addrHash]; storage == nil {
+ storage = make(map[common.Hash][]byte)
+ s.db.storages[s.addrHash] = storage
}
- storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted
}
+ khash := crypto.HashData(hasher, key[:])
+ storage[khash] = v // v will be nil if it's deleted
+ // Cache the original value of mutated storage slots
+ if origin == nil {
+ if origin = s.db.storagesOrigin[s.addrHash]; origin == nil {
+ origin = make(map[common.Hash][]byte)
+ s.db.storagesOrigin[s.addrHash] = origin
+ }
+ }
+ // Track the original value of slot only if it's mutated first time
+ if _, ok := origin[khash]; !ok {
+ if prev == (common.Hash{}) {
+ origin[khash] = nil // nil if it was not present previously
+ } else {
+ // Encoding []byte cannot fail, ok to ignore the error.
+ b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:]))
+ origin[khash] = b
+ }
+ }
+
+ // Cache the items for preloading
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
}
if s.db.prefetcher != nil {
@@ -392,11 +418,11 @@ func (s *stateObject) updateRoot(db Database) {
s.data.Root = s.trie.Hash()
}
-// commitTrie submits the storage changes into the storage trie and re-computes
-// the root. Besides, all trie changes will be collected in a nodeset and returned.
-func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) {
+// commit returns the changes made in storage trie and updates the account data.
+func (s *stateObject) commit(db Database) (*trienode.NodeSet, error) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
+ s.origin = s.data.Copy() // Update original account data after commit
return nil, nil
}
if s.dbErr != nil {
@@ -410,6 +436,8 @@ func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) {
if err == nil {
s.data.Root = root
}
+ // Update original account data after commit
+ s.origin = s.data.Copy()
return nodes, err
}
@@ -449,7 +477,13 @@ func (s *stateObject) setBalance(amount *big.Int) {
}
func (s *stateObject) deepCopy(db *StateDB) *stateObject {
- stateObject := newObject(db, s.address, s.data)
+ stateObject := &stateObject{
+ db: db,
+ address: s.address,
+ addrHash: s.addrHash,
+ origin: s.origin,
+ data: s.data,
+ }
if s.trie != nil {
stateObject.trie = db.db.CopyTrie(s.trie)
}
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 8f19a5ff2c..afcc0cf70d 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -28,21 +28,21 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
-type stateTest struct {
+type stateEnv struct {
db ethdb.Database
state *StateDB
}
-func newStateTest() *stateTest {
+func newStateEnv() *stateEnv {
db := rawdb.NewMemoryDatabase()
sdb, _ := New(common.Hash{}, NewDatabase(db), nil)
- return &stateTest{db: db, state: sdb}
+ return &stateEnv{db: db, state: sdb}
}
func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
- s := &stateTest{db: db, state: sdb}
+ s := &stateEnv{db: db, state: sdb}
// generate a few entries
obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01}))
@@ -92,7 +92,7 @@ func TestDump(t *testing.T) {
}
func TestNull(t *testing.T) {
- s := newStateTest()
+ s := newStateEnv()
address := common.HexToAddress("0x823140710bf13990e4500136726d8b55")
s.state.CreateAccount(address)
//value := common.FromHex("0x823140710bf13990e4500136726d8b55")
@@ -114,7 +114,7 @@ func TestSnapshot(t *testing.T) {
var storageaddr common.Hash
data1 := common.BytesToHash([]byte{42})
data2 := common.BytesToHash([]byte{43})
- s := newStateTest()
+ s := newStateEnv()
// snapshot the genesis state
genesis := s.state.Snapshot()
@@ -145,7 +145,7 @@ func TestSnapshot(t *testing.T) {
}
func TestSnapshotEmpty(t *testing.T) {
- s := newStateTest()
+ s := newStateEnv()
s.state.RevertToSnapshot(s.state.Snapshot())
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index c15613de40..c2566c56e3 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -35,6 +35,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
type revision struct {
@@ -73,16 +74,21 @@ type StateDB struct {
// It will be updated when the Commit is called.
originalRoot common.Hash
- snaps *snapshot.Tree
- snap snapshot.Snapshot
- snapDestructs map[common.Hash]struct{}
- snapAccounts map[common.Hash][]byte
- snapStorage map[common.Hash]map[common.Hash][]byte
+ snaps *snapshot.Tree
+ snap snapshot.Snapshot
+
+ // These maps hold the state changes (including the corresponding
+ // original value) that occurred in this **block**.
+ accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding
+ storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format
+ accountsOrigin map[common.Hash][]byte // The original value of mutated accounts in 'slim RLP' encoding
+ storagesOrigin map[common.Hash]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format
// This map holds 'live' objects, which will get modified while processing a state transition.
- stateObjects map[common.Address]*stateObject
- stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
- stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ stateObjects map[common.Address]*stateObject
+ stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie
+ stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution
+ stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value
// DB error.
// State objects are used by the consensus core and VM which are
@@ -140,26 +146,27 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
return nil, err
}
sdb := &StateDB{
- db: db,
- trie: tr,
- originalRoot: root,
- snaps: snaps,
- stateObjects: make(map[common.Address]*stateObject),
- stateObjectsPending: make(map[common.Address]struct{}),
- stateObjectsDirty: make(map[common.Address]struct{}),
- logs: make(map[common.Hash][]*types.Log),
- preimages: make(map[common.Hash][]byte),
- journal: newJournal(),
- accessList: newAccessList(),
- transientStorage: newTransientStorage(),
- hasher: crypto.NewKeccakState(),
+ db: db,
+ trie: tr,
+ originalRoot: root,
+ snaps: snaps,
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountsOrigin: make(map[common.Hash][]byte),
+ storagesOrigin: make(map[common.Hash]map[common.Hash][]byte),
+ stateObjects: make(map[common.Address]*stateObject),
+ stateObjectsPending: make(map[common.Address]struct{}),
+ stateObjectsDirty: make(map[common.Address]struct{}),
+ stateObjectsDestruct: make(map[common.Address]*types.StateAccount),
+ logs: make(map[common.Hash][]*types.Log),
+ preimages: make(map[common.Hash][]byte),
+ journal: newJournal(),
+ accessList: newAccessList(),
+ transientStorage: newTransientStorage(),
+ hasher: crypto.NewKeccakState(),
}
if sdb.snaps != nil {
- if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
- sdb.snapDestructs = make(map[common.Hash]struct{})
- sdb.snapAccounts = make(map[common.Hash][]byte)
- sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
- }
+ sdb.snap = sdb.snaps.Snapshot(root)
}
return sdb, nil
}
@@ -431,6 +438,14 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
// SetStorage replaces the entire storage for the specified account with given
// storage. This function should only be used for debugging.
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
+ // SetStorage needs to wipe existing storage. We achieve this by pretending
+ // that the account self-destructed earlier in this block, by flagging
+ // it in stateObjectsDestruct. The effect of doing so is that storage lookups
+ // will not hit disk, since it is assumed that the disk-data is belonging
+ // to a previous incarnation of the object.
+ if _, ok := s.stateObjectsDestruct[addr]; !ok {
+ s.stateObjectsDestruct[addr] = nil
+ }
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
stateObject.SetStorage(storage)
@@ -514,12 +529,20 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
}
- // If state snapshotting is active, cache the data til commit. Note, this
- // update mechanism is not symmetric to the deletion, because whereas it is
- // enough to track account updates at commit time, deletions need tracking
- // at transaction boundary level to ensure we capture state clearing.
- if s.snap != nil {
- s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ // Cache the data until commit. Note, this update mechanism is not symmetric
+ // to the deletion, because whereas it is enough to track account updates
+ // at commit time, deletions need tracking at transaction boundary level to
+ // ensure we capture state clearing.
+ s.accounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ // Track the original value of mutated account, nil means it was not present.
+ // Skip if it has been tracked (because updateStateObject may be called
+ // multiple times in a block).
+ if _, ok := s.accountsOrigin[obj.addrHash]; !ok {
+ if obj.origin == nil {
+ s.accountsOrigin[obj.addrHash] = nil
+ } else {
+ s.accountsOrigin[obj.addrHash] = snapshot.SlimAccountRLP(obj.origin.Nonce, obj.origin.Balance, obj.origin.Root, obj.origin.CodeHash)
+ }
}
}
@@ -603,7 +626,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
}
}
// Insert into the live set
- obj := newObject(s, addr, *data)
+ obj := newObject(s, addr, data)
s.setStateObject(obj)
return obj
}
@@ -624,20 +647,38 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject {
// createObject creates a new state object. If there is an existing account with
// the given address, it is overwritten and returned as the second return value.
func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) {
+ // The original account should be marked as destructed and all cached
+ // account and storage data should be cleared as well. Note, it must
+ // be done here, otherwise the destruction event of original one will
+ // be lost.
prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that!
var prevdestruct bool
- if s.snap != nil && prev != nil {
- _, prevdestruct = s.snapDestructs[prev.addrHash]
+ if prev != nil {
+ _, prevdestruct = s.stateObjectsDestruct[prev.address]
if !prevdestruct {
- s.snapDestructs[prev.addrHash] = struct{}{}
+ s.stateObjectsDestruct[prev.address] = prev.origin
}
}
- newobj = newObject(s, addr, types.StateAccount{})
+ newobj = newObject(s, addr, nil)
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
- s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct})
+ prevAccount, ok := s.accountsOrigin[prev.addrHash]
+ s.journal.append(resetObjectChange{
+ account: &addr,
+ prev: prev,
+ prevdestruct: prevdestruct,
+ prevAccount: s.accounts[prev.addrHash],
+ prevStorage: s.storages[prev.addrHash],
+ prevAccountOriginExist: ok,
+ prevAccountOrigin: prevAccount,
+ prevStorageOrigin: s.storagesOrigin[prev.addrHash],
+ })
+ delete(s.accounts, prev.addrHash)
+ delete(s.storages, prev.addrHash)
+ delete(s.accountsOrigin, prev.addrHash)
+ delete(s.storagesOrigin, prev.addrHash)
}
newobj.created = true
@@ -712,18 +753,30 @@ func (s *StateDB) Blacklisted(contractAddr *common.Address, addr *common.Address
func (s *StateDB) Copy() *StateDB {
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
- db: s.db,
- trie: s.db.CopyTrie(s.trie),
- originalRoot: s.originalRoot,
- stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
- stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
- stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
- refund: s.refund,
- logs: make(map[common.Hash][]*types.Log, len(s.logs)),
- logSize: s.logSize,
- preimages: make(map[common.Hash][]byte, len(s.preimages)),
- journal: newJournal(),
- hasher: crypto.NewKeccakState(),
+ db: s.db,
+ trie: s.db.CopyTrie(s.trie),
+ originalRoot: s.originalRoot,
+ accounts: copySet(s.accounts),
+ storages: copy2DSet(s.storages),
+ accountsOrigin: copySet(s.accountsOrigin),
+ storagesOrigin: copy2DSet(s.storagesOrigin),
+ stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)),
+ stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)),
+ stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)),
+ stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)),
+ refund: s.refund,
+ logs: make(map[common.Hash][]*types.Log, len(s.logs)),
+ logSize: s.logSize,
+ preimages: make(map[common.Hash][]byte, len(s.preimages)),
+ journal: newJournal(),
+ hasher: crypto.NewKeccakState(),
+
+ // In order for the block producer to be able to use and make additions
+ // to the snapshot tree, we need to copy that as well. Otherwise, any
+ // block mined by ourselves will cause gaps in the tree, and force the
+ // miner to operate trie-backed only.
+ snaps: s.snaps,
+ snap: s.snap,
}
// Copy the dirty states, logs, and preimages
for addr := range s.journal.dirties {
@@ -756,6 +809,12 @@ func (s *StateDB) Copy() *StateDB {
}
state.stateObjectsDirty[addr] = struct{}{}
}
+
+ // Deep copy the destruction markers.
+ for addr, value := range s.stateObjectsDestruct {
+ state.stateObjectsDestruct[addr] = value
+ }
+
for hash, logs := range s.logs {
cpy := make([]*types.Log, len(logs))
for i, l := range logs {
@@ -782,31 +841,7 @@ func (s *StateDB) Copy() *StateDB {
if s.prefetcher != nil {
state.prefetcher = s.prefetcher.copy()
}
- if s.snaps != nil {
- // In order for the miner to be able to use and make additions
- // to the snapshot tree, we need to copy that aswell.
- // Otherwise, any block mined by ourselves will cause gaps in the tree,
- // and force the miner to operate trie-backed only
- state.snaps = s.snaps
- state.snap = s.snap
- // deep copy needed
- state.snapDestructs = make(map[common.Hash]struct{})
- for k, v := range s.snapDestructs {
- state.snapDestructs[k] = v
- }
- state.snapAccounts = make(map[common.Hash][]byte)
- for k, v := range s.snapAccounts {
- state.snapAccounts[k] = v
- }
- state.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
- for k, v := range s.snapStorage {
- temp := make(map[common.Hash][]byte)
- for kk, vv := range v {
- temp[kk] = vv
- }
- state.snapStorage[k] = temp
- }
- }
+
return state
}
@@ -858,15 +893,20 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) {
obj.deleted = true
- // If state snapshotting is active, also mark the destruction there.
+ // We need to maintain account deletions explicitly (will remain
+ // set indefinitely). Note only the first occurred self-destruct
+ // event is tracked.
+ if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
+ s.stateObjectsDestruct[obj.address] = obj.origin
+ }
+
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
// ressurrect an account; but the snapshotter needs both events.
- if s.snap != nil {
- s.snapDestructs[obj.addrHash] = struct{}{} // We need to maintain account deletions explicitly (will remain set indefinitely)
- delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a ressurrect)
- delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect)
- }
+ delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
+ delete(s.accountsOrigin, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storagesOrigin, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
} else {
obj.finalise(true) // Prefetch slots in the background
}
@@ -949,6 +989,134 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
return s.trie.Hash()
}
+// deleteStorage iterates the storage trie belongs to the account and mark all
+// slots inside as deleted.
+func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
+ start := time.Now()
+ tr, err := s.db.OpenStorageTrie(s.originalRoot, addrHash, root)
+ if err != nil {
+ return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
+ }
+ it := tr.NodeIterator(nil)
+ var (
+ set = trienode.NewNodeSet(addrHash)
+ slots = make(map[common.Hash][]byte)
+ stateSize common.StorageSize
+ nodeSize common.StorageSize
+ )
+ for it.Next(true) {
+ // arbitrary stateSize limit, make it configurable
+ if stateSize+nodeSize > 512*1024*1024 {
+ log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize)
+ if metrics.EnabledExpensive {
+ slotDeletionSkip.Inc(1)
+ }
+ return true, nil, nil, nil
+ }
+ if it.Leaf() {
+ slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
+ stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob()))
+ continue
+ }
+ if it.Hash() == (common.Hash{}) {
+ continue
+ }
+ nodeSize += common.StorageSize(len(it.Path()) + len(it.NodeBlob()))
+ set.AddNode(it.Path(), trienode.NewNodeWithPrev(common.Hash{}, nil, it.NodeBlob()))
+ }
+ if err := it.Error(); err != nil {
+ return false, nil, nil, err
+ }
+ if metrics.EnabledExpensive {
+ if int64(len(slots)) > slotDeletionMaxCount.Value() {
+ slotDeletionMaxCount.Update(int64(len(slots)))
+ }
+ if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() {
+ slotDeletionMaxSize.Update(int64(stateSize + nodeSize))
+ }
+ slotDeletionTimer.UpdateSince(start)
+ slotDeletionCount.Mark(int64(len(slots)))
+ slotDeletionSize.Mark(int64(stateSize + nodeSize))
+ }
+ return false, slots, set, nil
+}
+
+// handleDestruction processes all destruction markers and deletes the account
+// and associated storage slots if necessary. There are four possible situations
+// here:
+//
+// - the account was not existent and be marked as destructed
+//
+// - the account was not existent and be marked as destructed,
+// however, it's resurrected later in the same block.
+//
+// - the account was existent and be marked as destructed
+//
+// - the account was existent and be marked as destructed,
+// however it's resurrected later in the same block.
+//
+// In case (a), nothing needs be deleted, nil to nil transition can be ignored.
+//
+// In case (b), nothing needs be deleted, nil is used as the original value for
+// newly created account and storages
+//
+// In case (c), **original** account along with its storages should be deleted,
+// with their values be tracked as original value.
+//
+// In case (d), **original** account along with its storages should be deleted,
+// with their values be tracked as original value.
+func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Hash]struct{}, error) {
+ incomplete := make(map[common.Hash]struct{})
+ for addr, prev := range s.stateObjectsDestruct {
+ // The original account was non-existing, and it's marked as destructed
+ // in the scope of block. It can be case (a) or (b).
+ // - for (a), skip it without doing anything.
+ // - for (b), track account's original value as nil. It may overwrite
+ // the data cached in s.accountsOrigin set by 'updateStateObject'.
+ addrHash := crypto.Keccak256Hash(addr[:])
+ if prev == nil {
+ if _, ok := s.accounts[addrHash]; ok {
+ s.accountsOrigin[addrHash] = nil // case (b)
+ }
+ continue
+ }
+ // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'.
+ s.accountsOrigin[addrHash] = snapshot.SlimAccountRLP(prev.Nonce, prev.Balance, prev.Root, prev.CodeHash) // case (c) or (d)
+
+ // Short circuit if the storage was empty.
+ if prev.Root == types.EmptyRootHash {
+ continue
+ }
+ // Remove storage slots belong to the account.
+ aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
+ if err != nil {
+ return nil, fmt.Errorf("failed to delete storage, err: %w", err)
+ }
+ // The storage is too huge to handle, skip it but mark as incomplete.
+ // For case (d), the account is resurrected might with a few slots
+ // created. In this case, wipe the entire storage state diff because
+ // of aborted deletion.
+ if aborted {
+ incomplete[addrHash] = struct{}{}
+ delete(s.storagesOrigin, addrHash)
+ continue
+ }
+ if s.storagesOrigin[addrHash] == nil {
+ s.storagesOrigin[addrHash] = slots
+ } else {
+ // It can overwrite the data in s.storagesOrigin[addrHash] set by
+ // 'object.updateTrie'.
+ for key, val := range slots {
+ s.storagesOrigin[addrHash][key] = val
+ }
+ }
+ if err := nodes.Merge(set); err != nil {
+ return nil, err
+ }
+ }
+ return incomplete, nil
+}
+
// SetTxContext sets the current transaction hash and index which are
// used when the EVM emits new state logs. It should be invoked before
// transaction execution.
@@ -982,6 +1150,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
nodes = trienode.NewMergedNodeSet()
)
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+ // Handle all state deletions first
+ incomplete, err := s.handleDestruction(nodes)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ // Handle all state updates afterwards
for addr := range s.stateObjectsDirty {
if obj := s.stateObjects[addr]; !obj.deleted {
// Write any contract code associated with the state object
@@ -990,7 +1164,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
- nodeSet, err := obj.commitTrie(s.db)
+ nodeSet, err := obj.commit(s.db)
if err != nil {
return common.Hash{}, err
}
@@ -1006,15 +1180,6 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
}
}
- // If the contract is destructed, the storage is still left in the
- // database as dangling data. Theoretically it's should be wiped from
- // database as well, but in hash-based-scheme it's extremely hard to
- // determine that if the trie nodes are also referenced by other storage,
- // and in path-based-scheme some technical challenges are still unsolved.
- // Although it won't affect the correctness but please fix it TODO(rjl493456442).
- if len(s.stateObjectsDirty) > 0 {
- s.stateObjectsDirty = make(map[common.Address]struct{})
- }
if codeWriter.ValueSize() > 0 {
if err := codeWriter.Write(); err != nil {
log.Crit("Failed to commit dirty codes", "error", err)
@@ -1056,7 +1221,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
start := time.Now()
// Only update if there's a state transition (skip empty Clique blocks)
if parent := s.snap.Root(); parent != root {
- if err := s.snaps.Update(root, parent, s.snapDestructs, s.snapAccounts, s.snapStorage); err != nil {
+ if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", parent, "to", root, "err", err)
}
// Keep 128 diff layers in the memory, persistent layer is 129th.
@@ -1067,7 +1232,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
}
}
- s.snap, s.snapDestructs, s.snapAccounts, s.snapStorage = nil, nil, nil, nil
+ s.snap = nil
if metrics.EnabledExpensive {
s.SnapshotCommits += time.Since(start)
}
@@ -1083,7 +1248,12 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
if root != origin {
start := time.Now()
- if err := s.db.TrieDB().Update(root, origin, nodes); err != nil {
+ set := &triestate.Set{
+ Accounts: s.accountsOrigin,
+ Storages: s.storagesOrigin,
+ Incomplete: incomplete,
+ }
+ if err := s.db.TrieDB().Update(root, origin, nodes, set); err != nil {
return common.Hash{}, err
}
s.originalRoot = root
@@ -1091,6 +1261,13 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
s.TrieDBCommits += time.Since(start)
}
}
+ // Clear all internal flags at the end of commit operation.
+ s.accounts = make(map[common.Hash][]byte)
+ s.storages = make(map[common.Hash]map[common.Hash][]byte)
+ s.accountsOrigin = make(map[common.Hash][]byte)
+ s.storagesOrigin = make(map[common.Hash]map[common.Hash][]byte)
+ s.stateObjectsDirty = make(map[common.Address]struct{})
+ s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount)
return root, nil
}
@@ -1197,3 +1374,38 @@ func (s *StateDB) DirtyAccounts(hash common.Hash, number uint64) []*types.DirtyS
return dirtyAccounts
}
+
+// convertAccountSet converts a provided account set from address keyed to hash keyed.
+func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} {
+ ret := make(map[common.Hash]struct{}, len(set))
+ for addr := range set {
+ obj, exist := s.stateObjects[addr]
+ if !exist {
+ ret[crypto.Keccak256Hash(addr[:])] = struct{}{}
+ } else {
+ ret[obj.addrHash] = struct{}{}
+ }
+ }
+ return ret
+}
+
+// copySet returns a deep-copied set.
+func copySet[k comparable](set map[k][]byte) map[k][]byte {
+ copied := make(map[k][]byte, len(set))
+ for key, val := range set {
+ copied[key] = common.CopyBytes(val)
+ }
+ return copied
+}
+
+// copy2DSet returns a two-dimensional deep-copied set.
+func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte {
+ copied := make(map[k]map[common.Hash][]byte, len(set))
+ for addr, subset := range set {
+ copied[addr] = make(map[common.Hash][]byte, len(subset))
+ for key, val := range subset {
+ copied[addr][key] = common.CopyBytes(val)
+ }
+ }
+ return copied
+}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
new file mode 100644
index 0000000000..0f627b2d69
--- /dev/null
+++ b/core/state/statedb_fuzz_test.go
@@ -0,0 +1,362 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package state
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "strings"
+ "testing"
+ "testing/quick"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+// A stateTest checks that the state changes are correctly captured. Instances
+// of this test with pseudorandom content are created by Generate.
+//
+// The test works as follows:
+//
+// A list of states are created by applying actions. The state changes between
+// each state instance are tracked and be verified.
+type stateTest struct {
+ addrs []common.Address // all account addresses
+ actions [][]testAction // modifications to the state, grouped by block
+ chunk int // The number of actions per chunk
+ err error // failure details are reported through this field
+}
+
+// newStateTestAction creates a random action that changes state.
+func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction {
+ actions := []testAction{
+ {
+ name: "SetBalance",
+ fn: func(a testAction, s *StateDB) {
+ s.SetBalance(addr, big.NewInt(a.args[0]))
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetNonce",
+ fn: func(a testAction, s *StateDB) {
+ s.SetNonce(addr, uint64(a.args[0]))
+ },
+ args: make([]int64, 1),
+ },
+ {
+ name: "SetState",
+ fn: func(a testAction, s *StateDB) {
+ var key, val common.Hash
+ binary.BigEndian.PutUint16(key[:], uint16(a.args[0]))
+ binary.BigEndian.PutUint16(val[:], uint16(a.args[1]))
+ s.SetState(addr, key, val)
+ },
+ args: make([]int64, 2),
+ },
+ {
+ name: "SetCode",
+ fn: func(a testAction, s *StateDB) {
+ code := make([]byte, 16)
+ binary.BigEndian.PutUint64(code, uint64(a.args[0]))
+ binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
+ s.SetCode(addr, code)
+ },
+ args: make([]int64, 2),
+ },
+ {
+ name: "CreateAccount",
+ fn: func(a testAction, s *StateDB) {
+ s.CreateAccount(addr)
+ },
+ },
+ {
+ name: "Suicide",
+ fn: func(a testAction, s *StateDB) {
+ s.SelfDestruct(addr)
+ },
+ },
+ }
+ var nonRandom = index != -1
+ if index == -1 {
+ index = r.Intn(len(actions))
+ }
+ action := actions[index]
+ var names []string
+ if !action.noAddr {
+ names = append(names, addr.Hex())
+ }
+ for i := range action.args {
+ if nonRandom {
+ action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero
+ } else {
+ action.args[i] = rand.Int63n(10000)
+ }
+ names = append(names, fmt.Sprint(action.args[i]))
+ }
+ action.name += " " + strings.Join(names, ", ")
+ return action
+}
+
+// Generate returns a new snapshot test of the given size. All randomness is
+// derived from r.
+func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value {
+ addrs := make([]common.Address, 5)
+ for i := range addrs {
+ addrs[i][0] = byte(i)
+ }
+ actions := make([][]testAction, rand.Intn(5)+1)
+
+ for i := 0; i < len(actions); i++ {
+ actions[i] = make([]testAction, size)
+ for j := range actions[i] {
+ if j == 0 {
+ // Always include a set balance action to make sure
+ // the state changes are not empty.
+ actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0)
+ continue
+ }
+ actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1)
+ }
+ }
+ chunk := int(math.Sqrt(float64(size)))
+ if size > 0 && chunk == 0 {
+ chunk = 1
+ }
+ return reflect.ValueOf(&stateTest{
+ addrs: addrs,
+ actions: actions,
+ chunk: chunk,
+ })
+}
+
+func (test *stateTest) String() string {
+ out := new(bytes.Buffer)
+ for i, actions := range test.actions {
+ fmt.Fprintf(out, "---- block %d ----\n", i)
+ for j, action := range actions {
+ if j%test.chunk == 0 {
+ fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk)
+ }
+ fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name)
+ }
+ }
+ return out.String()
+}
+
+func (test *stateTest) run() bool {
+ var (
+ roots []common.Hash
+ accountList []map[common.Hash][]byte
+ storageList []map[common.Hash]map[common.Hash][]byte
+ onCommit = func(states *triestate.Set) {
+ accountList = append(accountList, copySet(states.Accounts))
+ storageList = append(storageList, copy2DSet(states.Storages))
+ }
+ disk = rawdb.NewMemoryDatabase()
+ tdb = trie.NewDatabaseWithConfig(disk, &trie.Config{OnCommit: onCommit})
+ sdb = NewDatabaseWithNodeDB(disk, tdb)
+ byzantium = rand.Intn(2) == 0
+ )
+ for i, actions := range test.actions {
+ root := types.EmptyRootHash
+ if i != 0 {
+ root = roots[len(roots)-1]
+ }
+ state, err := New(root, sdb, nil)
+ if err != nil {
+ panic(err)
+ }
+ for i, action := range actions {
+ if i%test.chunk == 0 && i != 0 {
+ if byzantium {
+ state.Finalise(true) // call finalise at the transaction boundary
+ } else {
+ state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
+ }
+ }
+ action.fn(action, state)
+ }
+ if byzantium {
+ state.Finalise(true) // call finalise at the transaction boundary
+ } else {
+ state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
+ }
+ nroot, err := state.Commit(true) // call commit at the block boundary
+ if err != nil {
+ panic(err)
+ }
+ if nroot == root {
+ return true // filter out non-change state transition
+ }
+ roots = append(roots, nroot)
+ }
+ for i := 0; i < len(test.actions); i++ {
+ root := types.EmptyRootHash
+ if i != 0 {
+ root = roots[i-1]
+ }
+ test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i])
+ if test.err != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// verifyAccountCreation this function is called once the state diff says that
+// specific account was not present. A serial of checks will be performed to
+// ensure the state diff is correct, includes:
+//
+// - the account was indeed not present in trie
+// - the account is present in new trie, nil->nil is regarded as invalid
+// - the slots transition is correct
+func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addrHash common.Hash, slots map[common.Hash][]byte) error {
+ // Verify account change
+ oBlob := otr.Get(addrHash.Bytes())
+ nBlob := ntr.Get(addrHash.Bytes())
+ if len(oBlob) != 0 {
+ return fmt.Errorf("unexpected account in old trie, %x", addrHash)
+ }
+ if len(nBlob) == 0 {
+ return fmt.Errorf("missing account in new trie, %x", addrHash)
+ }
+
+ // Verify storage changes
+ var nAcct types.StateAccount
+ if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
+ return err
+ }
+ // Account has no slot, empty slot set is expected
+ if nAcct.Root == types.EmptyRootHash {
+ if len(slots) != 0 {
+ return fmt.Errorf("unexpected slot changes %x", addrHash)
+ }
+ return nil
+ }
+ // Account has slots, ensure all new slots are contained
+ st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db)
+ if err != nil {
+ return err
+ }
+ for key, val := range slots {
+ st.Update(key.Bytes(), val)
+ }
+ if st.Hash() != types.EmptyRootHash {
+ return errors.New("invalid slot changes")
+ }
+ return nil
+}
+
+// verifyAccountUpdate this function is called once the state diff says that
+// specific account was present. A serial of checks will be performed to
+// ensure the state diff is correct, includes:
+//
+// - the account was indeed present in trie
+// - the account in old trie matches the provided value
+// - the slots transition is correct
+func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addrHash common.Hash, origin []byte, slots map[common.Hash][]byte) error {
+ // Verify account change
+ oBlob := otr.Get(addrHash.Bytes())
+ nBlob := ntr.Get(addrHash.Bytes())
+ if len(oBlob) == 0 {
+ return fmt.Errorf("missing account in old trie, %x", addrHash)
+ }
+ full, err := snapshot.FullAccountRLP(origin)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(full, oBlob) {
+ return fmt.Errorf("account value is not matched, %x", addrHash)
+ }
+
+ // Decode accounts
+ var (
+ oAcct types.StateAccount
+ nAcct types.StateAccount
+ nRoot common.Hash
+ )
+ if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil {
+ return err
+ }
+ if len(nBlob) == 0 {
+ nRoot = types.EmptyRootHash
+ } else {
+ if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil {
+ return err
+ }
+ nRoot = nAcct.Root
+ }
+
+ // Verify storage
+ st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db)
+ if err != nil {
+ return err
+ }
+ for key, val := range slots {
+ st.Update(key.Bytes(), val)
+ }
+ if st.Hash() != oAcct.Root {
+ return errors.New("invalid slot changes")
+ }
+ return nil
+}
+
+func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Hash][]byte, storagesOrigin map[common.Hash]map[common.Hash][]byte) error {
+ otr, err := trie.New(trie.StateTrieID(root), db)
+ if err != nil {
+ return err
+ }
+ ntr, err := trie.New(trie.StateTrieID(next), db)
+ if err != nil {
+ return err
+ }
+ for addrHash, account := range accountsOrigin {
+ var err error
+ if len(account) == 0 {
+ err = test.verifyAccountCreation(next, db, otr, ntr, addrHash, storagesOrigin[addrHash])
+ } else {
+ err = test.verifyAccountUpdate(next, db, otr, ntr, addrHash, accountsOrigin[addrHash], storagesOrigin[addrHash])
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func TestStateChanges(t *testing.T) {
+ config := &quick.Config{MaxCount: 1000}
+ err := quick.Check((*stateTest).run, config)
+ if cerr, ok := err.(*quick.CheckError); ok {
+ test := cerr.In[0].(*stateTest)
+ t.Errorf("%v:\n%s", test.err, test)
+ } else if err != nil {
+ t.Error(err)
+ }
+}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index f11a11731e..eac8cbe06d 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -481,7 +481,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
}
func TestTouchDelete(t *testing.T) {
- s := newStateTest()
+ s := newStateEnv()
s.state.GetOrNewStateObject(common.Address{})
root, _ := s.state.Commit(false)
s.state, _ = New(root, s.state.db, s.state.snaps)
diff --git a/core/types/state_account.go b/core/types/state_account.go
index a80a048f16..5853fb58b5 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -20,8 +20,11 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
)
+var emptyCodeHash = crypto.Keccak256(nil)
+
// StateAccount is the Ethereum consensus representation of accounts.
// These objects are stored in the main account trie.
type StateAccount struct {
@@ -31,6 +34,29 @@ type StateAccount struct {
CodeHash []byte
}
+// NewEmptyStateAccount constructs an empty state account.
+func NewEmptyStateAccount() *StateAccount {
+ return &StateAccount{
+ Balance: new(big.Int),
+ Root: EmptyRootHash,
+ CodeHash: emptyCodeHash,
+ }
+}
+
+// Copy returns a deep-copied state account object.
+func (acct *StateAccount) Copy() *StateAccount {
+ var balance *big.Int
+ if acct.Balance != nil {
+ balance = new(big.Int).Set(acct.Balance)
+ }
+ return &StateAccount{
+ Nonce: acct.Nonce,
+ Balance: balance,
+ Root: acct.Root,
+ CodeHash: common.CopyBytes(acct.CodeHash),
+ }
+}
+
type DirtyStateAccount struct {
Address common.Address `json:"address"`
Nonce uint64 `json:"nonce"`
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 0a0f3b9d76..966bec7a2e 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1382,7 +1382,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1443,7 +1443,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1491,7 +1491,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(root, types.EmptyRootHash, nodes)
+ db.Update(root, types.EmptyRootHash, nodes, nil)
// Re-create tries with new root
accTrie, _ = trie.New(trie.StateTrieID(root), db)
@@ -1553,7 +1553,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(root, types.EmptyRootHash, nodes)
+ db.Update(root, types.EmptyRootHash, nodes, nil)
// Re-create tries with new root
accTrie, err := trie.New(trie.StateTrieID(root), db)
diff --git a/light/postprocess.go b/light/postprocess.go
index 0f3dce0f17..7957d98807 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -226,7 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
// Commite trie changes into trie database in case it's not nil.
if nodes != nil {
- if err := c.triedb.Update(root, c.originRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := c.triedb.Update(root, c.originRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
@@ -474,7 +474,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
if nodes != nil {
- if err := b.triedb.Update(root, b.originRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := b.triedb.Update(root, b.originRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index f1f8e94c3d..e45b172c76 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -189,7 +189,7 @@ func (f *fuzzer) fuzz() int {
panic(err)
}
if nodes != nil {
- dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
}
// Flush memdb -> disk (sponge)
dbA.Commit(rootA, false)
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 12165d5f54..1b67d81e6b 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -173,7 +173,7 @@ func runRandTest(rt randTest) error {
return err
}
if nodes != nil {
- if err := triedb.Update(hash, origin, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := triedb.Update(hash, origin, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
diff --git a/trie/database_wrap.go b/trie/database.go
similarity index 97%
rename from trie/database_wrap.go
rename to trie/database.go
index 43b9615f77..96d201aabe 100644
--- a/trie/database_wrap.go
+++ b/trie/database.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
// Config defines all necessary options for database.
@@ -34,6 +35,9 @@ type Config struct {
Cache int // Memory allowance (MB) to use for caching trie nodes in memory
Journal string // Journal of clean cache to survive node restarts
Preimages bool // Flag whether the preimage of trie key is recorded
+
+ // Testing hooks
+ OnCommit func(states *triestate.Set) // Hook invoked when commit is performed
}
// backend defines the methods needed to access/update trie nodes in different
@@ -130,7 +134,10 @@ func (db *Database) Reader(blockRoot common.Hash) Reader {
// given set in order to update state from the specified parent to the specified
// root. The held pre-images accumulated up to this point will be flushed in case
// the size exceeds the threshold.
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ if db.config != nil && db.config.OnCommit != nil {
+ db.config.OnCommit(states)
+ }
if db.preimages != nil {
db.preimages.commit(false)
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index f01c154d26..a7d9c96b1f 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -66,7 +66,7 @@ func TestIterator(t *testing.T) {
if err != nil {
t.Fatalf("Failed to commit trie %v", err)
}
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil))
@@ -249,7 +249,7 @@ func TestDifferenceIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -258,7 +258,7 @@ func TestDifferenceIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
@@ -291,7 +291,7 @@ func TestUnionIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -300,7 +300,7 @@ func TestUnionIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
@@ -362,7 +362,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
tr.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
tdb.Commit(root, false)
}
@@ -478,7 +478,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
break
}
}
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
triedb.Commit(root, false)
}
@@ -599,7 +599,7 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
trie.Update(key, val)
}
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
// Return the generated trie
return triedb, trie, logDb
}
@@ -639,7 +639,7 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
triedb.Commit(root, false)
var found = make(map[common.Hash][]byte)
diff --git a/trie/sync_test.go b/trie/sync_test.go
index d1d26de5eb..f2f9d1e6d1 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -61,7 +61,7 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[st
if err != nil {
panic(fmt.Errorf("failed to commit trie: %v", err))
}
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
if err := triedb.Commit(root, false); err != nil {
@@ -713,7 +713,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
diff[string(key)] = val
}
root, nodes, _ := srcTrie.Commit(false)
- if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(err)
}
if err := srcDb.Commit(root, false); err != nil {
@@ -738,7 +738,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
reverted[k] = val
}
root, nodes, _ = srcTrie.Commit(false)
- if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(err)
}
if err := srcDb.Commit(root, false); err != nil {
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index 2421d88202..5ca3c528b6 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -71,7 +71,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -137,7 +137,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -152,7 +152,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -170,7 +170,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update(key, randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -185,7 +185,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(key), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -200,7 +200,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -219,7 +219,7 @@ func TestAccessListLeak(t *testing.T) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
var cases = []struct {
op func(tr *Trie)
@@ -269,7 +269,7 @@ func TestTinyTree(t *testing.T) {
trie.Update([]byte(val.k), randBytes(32))
}
root, set, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set), nil)
parent := root
trie, _ = New(TrieID(root), db)
@@ -278,7 +278,7 @@ func TestTinyTree(t *testing.T) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, set, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(set))
+ db.Update(root, parent, trienode.NewWithNodeSet(set), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 0223466222..447374a1af 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -96,7 +96,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
triedb.Commit(root, true)
}
@@ -214,7 +214,7 @@ func TestGet(t *testing.T) {
return
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
}
}
@@ -289,7 +289,7 @@ func TestReplication(t *testing.T) {
if err != nil {
t.Fatalf("commit error: %v", err)
}
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
// create a new trie on top of the database and check that lookups work.
trie2, err := New(TrieID(root), db)
@@ -310,7 +310,7 @@ func TestReplication(t *testing.T) {
}
// recreate the trie after commit
if nodes != nil {
- db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
}
trie2, err = New(TrieID(hash), db)
if err != nil {
@@ -527,7 +527,7 @@ func runRandTest(rt randTest) bool {
return false
}
if nodes != nil {
- triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, origin, trienode.NewWithNodeSet(nodes), nil)
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -821,7 +821,7 @@ func TestCommitSequence(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -862,7 +862,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -902,7 +902,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
// And flush stacktrie -> disk
@@ -951,7 +951,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
// And flush stacktrie -> disk
@@ -1123,7 +1123,7 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
}
h := trie.Hash()
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index 63a8493140..ddadbbf371 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -121,6 +121,26 @@ func (set *NodeSet) AddNode(path []byte, n *NodeWithPrev) {
set.Nodes[string(path)] = n
}
+// Merge adds a set of nodes into the set.
+func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*NodeWithPrev) error {
+ if set.Owner != owner {
+ return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
+ }
+ for path, node := range nodes {
+ prev, ok := set.Nodes[path]
+ if ok {
+ // overwrite happens, revoke the counter
+ if prev.IsDeleted() {
+ set.deletes -= 1
+ } else {
+ set.updates -= 1
+ }
+ }
+ set.AddNode([]byte(path), node)
+ }
+ return nil
+}
+
// AddLeaf adds the provided leaf node into set.
func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent})
@@ -186,9 +206,9 @@ func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
// Merge merges the provided dirty nodes of a trie into the set. The assumption
// is held that no duplicated set belonging to the same trie will be merged twice.
func (set *MergedNodeSet) Merge(other *NodeSet) error {
- _, present := set.Sets[other.Owner]
+ subset, present := set.Sets[other.Owner]
if present {
- return fmt.Errorf("duplicate trie for owner %#x", other.Owner)
+ return subset.Merge(other.Owner, other.Nodes)
}
set.Sets[other.Owner] = other
return nil
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
new file mode 100644
index 0000000000..e5d0b87cb7
--- /dev/null
+++ b/trie/triestate/state.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package triestate
+
+import "github.com/ethereum/go-ethereum/common"
+
+// Set represents a collection of mutated states during a state transition.
+// The value refers to the original content of state before the transition
+// is made. Nil means that the state was not present previously.
+type Set struct {
+ Accounts map[common.Hash][]byte // Mutated account set, nil means the account was not present
+ Storages map[common.Hash]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
+ Incomplete map[common.Hash]struct{} // Indicator whether the storage slot is incomplete due to large deletion
+}
From bcfeb7e466ec802ceef03ad24ab33d7ce9cb8ee0 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 4 Oct 2024 17:21:53 +0700
Subject: [PATCH 21/41] all: remove trie cache journal (#595)
---
cmd/devp2p/internal/ethtest/suite_test.go | 2 --
cmd/ronin/config.go | 4 +++
cmd/ronin/main.go | 4 +--
cmd/ronin/snapshot.go | 6 ++--
cmd/utils/flags.go | 18 -----------
cmd/utils/flags_legacy.go | 14 ++++++++
core/blockchain.go | 22 -------------
core/state/pruner/pruner.go | 39 ++++++++---------------
eth/backend.go | 4 +--
eth/ethconfig/config.go | 20 ++++++------
eth/ethconfig/gen_config.go | 12 -------
graphql/graphql_test.go | 12 +++----
trie/database.go | 29 +----------------
13 files changed, 52 insertions(+), 134 deletions(-)
diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go
index 50380b989c..55e386c4aa 100644
--- a/cmd/devp2p/internal/ethtest/suite_test.go
+++ b/cmd/devp2p/internal/ethtest/suite_test.go
@@ -91,8 +91,6 @@ func setupGeth(stack *node.Node) error {
Genesis: &chain.genesis,
NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763
DatabaseCache: 10,
- TrieCleanCache: 10,
- TrieCleanCacheJournal: "",
TrieCleanCacheRejournal: 60 * time.Minute,
TrieDirtyCache: 16,
TrieTimeout: 60 * time.Minute,
diff --git a/cmd/ronin/config.go b/cmd/ronin/config.go
index cae2e3bc30..d3bf084fbb 100644
--- a/cmd/ronin/config.go
+++ b/cmd/ronin/config.go
@@ -287,6 +287,10 @@ func deprecated(field string) bool {
return true
case "ethconfig.Config.EWASMInterpreter":
return true
+ case "ethconfig.Config.TrieCleanCacheJournal":
+ return true
+ case "ethconfig.Config.TrieCleanCacheRejournal":
+ return true
default:
return false
}
diff --git a/cmd/ronin/main.go b/cmd/ronin/main.go
index 5144bc14a9..3e94098ab3 100644
--- a/cmd/ronin/main.go
+++ b/cmd/ronin/main.go
@@ -118,8 +118,8 @@ var (
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
- utils.CacheTrieJournalFlag,
- utils.CacheTrieRejournalFlag,
+ utils.CacheTrieJournalFlag, // deprecated
+ utils.CacheTrieRejournalFlag, // deprecated
utils.CacheGCFlag,
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index ce9b343b90..b61f32969f 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -66,7 +66,6 @@ var (
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
- utils.CacheTrieJournalFlag,
utils.BloomFilterSizeFlag,
},
Description: `
@@ -187,11 +186,12 @@ block is used.
)
func pruneState(ctx *cli.Context) error {
- stack, config := makeConfigNode(ctx)
+ stack, _ := makeConfigNode(ctx)
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, false)
- pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.Uint64(utils.BloomFilterSizeFlag.Name))
+ pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""),
+ ctx.Uint64(utils.BloomFilterSizeFlag.Name))
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 976c8191ce..806384adce 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -479,18 +479,6 @@ var (
Value: 15,
Category: flags.PerfCategory,
}
- CacheTrieJournalFlag = &cli.StringFlag{
- Name: "cache.trie.journal",
- Usage: "Disk journal directory for trie cache to survive node restarts",
- Value: ethconfig.Defaults.TrieCleanCacheJournal,
- Category: flags.PerfCategory,
- }
- CacheTrieRejournalFlag = &cli.DurationFlag{
- Name: "cache.trie.rejournal",
- Usage: "Time interval to regenerate the trie cache journal",
- Value: ethconfig.Defaults.TrieCleanCacheRejournal,
- Category: flags.PerfCategory,
- }
CacheGCFlag = &cli.IntFlag{
Name: "cache.gc",
Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
@@ -1922,12 +1910,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
}
- if ctx.IsSet(CacheTrieJournalFlag.Name) {
- cfg.TrieCleanCacheJournal = ctx.String(CacheTrieJournalFlag.Name)
- }
- if ctx.IsSet(CacheTrieRejournalFlag.Name) {
- cfg.TrieCleanCacheRejournal = ctx.Duration(CacheTrieRejournalFlag.Name)
- }
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
cfg.TrieDirtyCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
}
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
index cafa07892d..63b6d64879 100644
--- a/cmd/utils/flags_legacy.go
+++ b/cmd/utils/flags_legacy.go
@@ -20,6 +20,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/internal/flags"
"github.com/urfave/cli/v2"
)
@@ -35,6 +36,8 @@ var ShowDeprecated = &cli.Command{
var DeprecatedFlags = []cli.Flag{
LegacyMinerGasTargetFlag,
NoUSBFlag,
+ CacheTrieJournalFlag,
+ CacheTrieRejournalFlag,
}
var (
@@ -49,6 +52,17 @@ var (
Usage: "Target gas floor for mined blocks (deprecated)",
Value: ethconfig.Defaults.Miner.GasFloor,
}
+ // (Deprecated Oct 2024, shown in aliased flags section)
+ CacheTrieJournalFlag = &cli.StringFlag{
+ Name: "cache.trie.journal",
+ Usage: "Disk journal directory for trie cache to survive node restarts",
+ Category: flags.PerfCategory,
+ }
+ CacheTrieRejournalFlag = &cli.DurationFlag{
+ Name: "cache.trie.rejournal",
+ Usage: "Time interval to regenerate the trie cache journal",
+ Category: flags.PerfCategory,
+ }
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
diff --git a/core/blockchain.go b/core/blockchain.go
index 588ab667f9..2b7e82e2f7 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -132,8 +132,6 @@ const (
// that's resident in a blockchain.
type CacheConfig struct {
TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
- TrieCleanJournal string // Disk journal for saving clean cache entries.
- TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically
TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
@@ -269,7 +267,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
db,
&trie.Config{
Cache: cacheConfig.TrieCleanLimit,
- Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
})
// Setup the genesis block, commit the provided genesis specification
@@ -289,7 +286,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
triegc: prque.New(nil),
stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
- Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
}),
quit: make(chan struct{}),
@@ -456,19 +452,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
go bc.maintainTxIndex(txIndexBlock)
}
- // If periodic cache journal is required, spin it up.
- if bc.cacheConfig.TrieCleanRejournal > 0 {
- if bc.cacheConfig.TrieCleanRejournal < time.Minute {
- log.Warn("Sanitizing invalid trie cache journal time", "provided", bc.cacheConfig.TrieCleanRejournal, "updated", time.Minute)
- bc.cacheConfig.TrieCleanRejournal = time.Minute
- }
- bc.wg.Add(1)
- go func() {
- defer bc.wg.Done()
- bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit)
- }()
- }
-
// load the latest dirty accounts stored from last stop to cache
bc.loadLatestDirtyAccounts()
// Rewind the chain in case of an incompatible config upgrade.
@@ -1037,11 +1020,6 @@ func (bc *BlockChain) Stop() {
log.Error("Dangling trie nodes after full cleanup")
}
}
- // Ensure all live cached entries be saved into disk, so that we can skip
- // cache warmup when node restarts.
- if bc.cacheConfig.TrieCleanJournal != "" {
- bc.triedb.SaveCache(bc.cacheConfig.TrieCleanJournal)
- }
log.Info("Blockchain stopped")
}
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index c46432ee68..f46eb66990 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -75,16 +75,15 @@ var (
// periodically in order to release the disk usage and improve the
// disk read performance to some extent.
type Pruner struct {
- db ethdb.Database
- stateBloom *stateBloom
- datadir string
- trieCachePath string
- headHeader *types.Header
- snaptree *snapshot.Tree
+ db ethdb.Database
+ stateBloom *stateBloom
+ datadir string
+ headHeader *types.Header
+ snaptree *snapshot.Tree
}
// NewPruner creates the pruner instance.
-func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) {
+func NewPruner(db ethdb.Database, datadir string, bloomSize uint64) (*Pruner, error) {
headBlock := rawdb.ReadHeadBlock(db)
if headBlock == nil {
return nil, errors.New("Failed to load head block")
@@ -103,12 +102,11 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6
return nil, err
}
return &Pruner{
- db: db,
- stateBloom: stateBloom,
- datadir: datadir,
- trieCachePath: trieCachePath,
- headHeader: headBlock.Header(),
- snaptree: snaptree,
+ db: db,
+ stateBloom: stateBloom,
+ datadir: datadir,
+ headHeader: headBlock.Header(),
+ snaptree: snaptree,
}, nil
}
@@ -241,7 +239,7 @@ func (p *Pruner) Prune(root common.Hash) error {
return err
}
if stateBloomRoot != (common.Hash{}) {
- return RecoverPruning(p.datadir, p.db, p.trieCachePath)
+ return RecoverPruning(p.datadir, p.db)
}
// If the target state root is not specified, use the HEAD-127 as the
// target. The reason for picking it is:
@@ -299,11 +297,6 @@ func (p *Pruner) Prune(root common.Hash) error {
log.Info("Selecting user-specified state as the pruning target", "root", root)
}
}
- // Before start the pruning, delete the clean trie cache first.
- // It's necessary otherwise in the next restart we will hit the
- // deleted state root in the "clean cache" so that the incomplete
- // state is picked for usage.
- deleteCleanTrieCache(p.trieCachePath)
// All the state roots of the middle layer should be forcibly pruned,
// otherwise the dangling state will be left.
@@ -342,7 +335,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// pruning can be resumed. What's more if the bloom filter is constructed, the
// pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left
// in the disk.
-func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error {
+func RecoverPruning(datadir string, db ethdb.Database) error {
stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir)
if err != nil {
return err
@@ -372,12 +365,6 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
}
log.Info("Loaded state bloom filter", "path", stateBloomPath)
- // Before start the pruning, delete the clean trie cache first.
- // It's necessary otherwise in the next restart we will hit the
- // deleted state root in the "clean cache" so that the incomplete
- // state is picked for usage.
- deleteCleanTrieCache(trieCachePath)
-
// All the state roots of the middle layers should be forcibly pruned,
// otherwise the dangling state will be left.
var (
diff --git a/eth/backend.go b/eth/backend.go
index 32e3f17331..3850b94458 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -141,7 +141,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
return nil, err
}
- if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil {
+ if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil {
log.Error("Failed to recover state", "error", err)
}
eth := &Ethereum{
@@ -192,8 +192,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
cacheConfig = &core.CacheConfig{
TrieCleanLimit: config.TrieCleanCache,
- TrieCleanJournal: stack.ResolvePath(config.TrieCleanCacheJournal),
- TrieCleanRejournal: config.TrieCleanCacheRejournal,
TrieCleanNoPrefetch: config.NoPrefetch,
TrieDirtyLimit: config.TrieDirtyCache,
TrieDirtyDisabled: config.NoPruning,
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 5e64069127..ac257d8a68 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -76,17 +76,15 @@ var Defaults = Config{
DatasetsOnDisk: 2,
DatasetsLockMmap: false,
},
- NetworkId: 1,
- TxLookupLimit: 2350000,
- LightPeers: 100,
- UltraLightFraction: 75,
- DatabaseCache: 512,
- TrieCleanCache: 154,
- TrieCleanCacheJournal: "triecache",
- TrieCleanCacheRejournal: 60 * time.Minute,
- TrieDirtyCache: 256,
- TrieTimeout: 60 * time.Minute,
- SnapshotCache: 102,
+ NetworkId: 1,
+ TxLookupLimit: 2350000,
+ LightPeers: 100,
+ UltraLightFraction: 75,
+ DatabaseCache: 512,
+ TrieCleanCache: 154,
+ TrieDirtyCache: 256,
+ TrieTimeout: 60 * time.Minute,
+ SnapshotCache: 102,
Miner: miner.Config{
GasCeil: 8000000,
GasPrice: big.NewInt(params.GWei),
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 6e1302bc88..0af7eeced0 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -43,8 +43,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
DatabaseCache int
DatabaseFreezer string
TrieCleanCache int
- TrieCleanCacheJournal string `toml:",omitempty"`
- TrieCleanCacheRejournal time.Duration `toml:",omitempty"`
TrieDirtyCache int
TrieTimeout time.Duration
SnapshotCache int
@@ -87,8 +85,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.DatabaseCache = c.DatabaseCache
enc.DatabaseFreezer = c.DatabaseFreezer
enc.TrieCleanCache = c.TrieCleanCache
- enc.TrieCleanCacheJournal = c.TrieCleanCacheJournal
- enc.TrieCleanCacheRejournal = c.TrieCleanCacheRejournal
enc.TrieDirtyCache = c.TrieDirtyCache
enc.TrieTimeout = c.TrieTimeout
enc.SnapshotCache = c.SnapshotCache
@@ -135,8 +131,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
DatabaseCache *int
DatabaseFreezer *string
TrieCleanCache *int
- TrieCleanCacheJournal *string `toml:",omitempty"`
- TrieCleanCacheRejournal *time.Duration `toml:",omitempty"`
TrieDirtyCache *int
TrieTimeout *time.Duration
SnapshotCache *int
@@ -230,12 +224,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TrieCleanCache != nil {
c.TrieCleanCache = *dec.TrieCleanCache
}
- if dec.TrieCleanCacheJournal != nil {
- c.TrieCleanCacheJournal = *dec.TrieCleanCacheJournal
- }
- if dec.TrieCleanCacheRejournal != nil {
- c.TrieCleanCacheRejournal = *dec.TrieCleanCacheRejournal
- }
if dec.TrieDirtyCache != nil {
c.TrieDirtyCache = *dec.TrieDirtyCache
}
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 895e797535..767a37ae6f 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -245,13 +245,11 @@ func createGQLService(t *testing.T, stack *node.Node) {
Ethash: ethash.Config{
PowMode: ethash.ModeFake,
},
- NetworkId: 1337,
- TrieCleanCache: 5,
- TrieCleanCacheJournal: "triecache",
- TrieCleanCacheRejournal: 60 * time.Minute,
- TrieDirtyCache: 5,
- TrieTimeout: 60 * time.Minute,
- SnapshotCache: 5,
+ NetworkId: 1337,
+ TrieCleanCache: 5,
+ TrieDirtyCache: 5,
+ TrieTimeout: 60 * time.Minute,
+ SnapshotCache: 5,
}
ethBackend, err := eth.New(stack, ethConf)
if err != nil {
diff --git a/trie/database.go b/trie/database.go
index 96d201aabe..c970383ad2 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -18,7 +18,6 @@ package trie
import (
"errors"
- "runtime"
"time"
"github.com/VictoriaMetrics/fastcache"
@@ -91,11 +90,7 @@ type Database struct {
func prepare(diskdb ethdb.Database, config *Config) *Database {
var cleans *fastcache.Cache
if config != nil && config.Cache > 0 {
- if config.Journal == "" {
- cleans = fastcache.New(config.Cache * 1024 * 1024)
- } else {
- cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
- }
+ cleans = fastcache.New(config.Cache * 1024 * 1024)
}
var preimages *preimageStore
if config != nil && config.Preimages {
@@ -219,28 +214,6 @@ func (db *Database) saveCache(dir string, threads int) error {
return nil
}
-// SaveCache atomically saves fast cache data to the given dir using all
-// available CPU cores.
-func (db *Database) SaveCache(dir string) error {
- return db.saveCache(dir, runtime.GOMAXPROCS(0))
-}
-
-// SaveCachePeriodically atomically saves fast cache data to the given dir with
-// the specified interval. All dump operation will only use a single CPU core.
-func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- db.saveCache(dir, 1)
- case <-stopCh:
- return
- }
- }
-}
-
// Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold. The held pre-images accumulated
// up to this point will be flushed in case the size exceeds the threshold.
From 8759d466815a36072298b4c59e579f4642acd5fb Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Fri, 4 Oct 2024 17:25:39 +0700
Subject: [PATCH 22/41] core, trie: Expose block number to statedb (#593)
* core/state: clean up: db already exist in stateObject
* core, trie: statedb also commit the block number
---
cmd/evm/internal/t8ntool/execution.go | 4 +--
cmd/evm/runner.go | 7 ++---
core/blockchain.go | 2 +-
core/blockchain_test.go | 2 +-
core/chain_makers.go | 2 +-
core/genesis.go | 4 +--
core/state/dump.go | 4 +--
core/state/snapshot/generate.go | 2 +-
core/state/snapshot/generate_test.go | 2 +-
core/state/state_object.go | 36 +++++++++++++-------------
core/state/state_test.go | 10 +++----
core/state/statedb.go | 25 ++++++++++--------
core/state/statedb_fuzz_test.go | 2 +-
core/state/statedb_test.go | 18 ++++++-------
core/state/sync_test.go | 4 +--
core/txpool/blobpool/blobpool_test.go | 12 ++++-----
eth/api_test.go | 4 +--
eth/protocols/snap/sync_test.go | 8 +++---
eth/state_accessor.go | 2 +-
light/postprocess.go | 4 +--
tests/fuzzers/stacktrie/trie_fuzzer.go | 2 +-
tests/fuzzers/trie/trie-fuzzer.go | 2 +-
tests/state_test_util.go | 4 +--
trie/database.go | 2 +-
trie/iterator_test.go | 18 ++++++-------
trie/sync_test.go | 6 ++---
trie/tracer_test.go | 18 ++++++-------
trie/trie_test.go | 20 +++++++-------
28 files changed, 115 insertions(+), 111 deletions(-)
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index 8781ccc2a3..ea59c48427 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -242,7 +242,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
statedb.AddBalance(pre.Env.Coinbase, minerReward)
}
// Commit block
- root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber))
+ root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
if err != nil {
fmt.Fprintf(os.Stderr, "Could not commit state: %v", err)
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
@@ -273,7 +273,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(false)
+ root, _ := statedb.Commit(0, false)
statedb, _ = state.New(root, sdb, nil)
return statedb
}
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index be9d37b031..67aaa03ea9 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -20,8 +20,6 @@ import (
"bytes"
"encoding/json"
"fmt"
- "github.com/ethereum/go-ethereum/eth/tracers/logger"
- "github.com/ethereum/go-ethereum/internal/flags"
"io/ioutil"
"math/big"
"os"
@@ -30,6 +28,9 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
+ "github.com/ethereum/go-ethereum/internal/flags"
+
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
@@ -269,7 +270,7 @@ func runCmd(ctx *cli.Context) error {
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
- statedb.Commit(true)
+ statedb.Commit(0, true)
statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(nil)))
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 2b7e82e2f7..32ee4c65fe 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -1554,7 +1554,7 @@ func (bc *BlockChain) writeBlockWithState(
}
// Commit all cached state changes into underlying memory database.
dirtyAccounts := state.DirtyAccounts(block.Hash(), block.NumberU64())
- root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
+ root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
return NonStatTy, err
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index ed4e3a6994..cdf4a8c956 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -180,7 +180,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.chainmu.MustLock()
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
rawdb.WriteBlock(blockchain.db, block)
- statedb.Commit(false)
+ statedb.Commit(block.NumberU64(), false)
blockchain.chainmu.Unlock()
}
return nil
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 0554528a96..d0e20302a8 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -318,7 +318,7 @@ func generateChain(
}
// Write state changes to db
- root, err := statedb.Commit(config.IsEIP158(b.header.Number))
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
diff --git a/core/genesis.go b/core/genesis.go
index 5b248c5bbf..ef3e3014aa 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -97,7 +97,7 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) {
statedb.SetState(addr, key, value)
}
}
- return statedb.Commit(false)
+ return statedb.Commit(0, false)
}
// flush is very similar with deriveHash, but the main difference is
@@ -117,7 +117,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error {
}
}
// Commit current state, return the root hash.
- root, err := statedb.Commit(false)
+ root, err := statedb.Commit(0, false)
if err != nil {
return err
}
diff --git a/core/state/dump.go b/core/state/dump.go
index 320809492d..caa0061ce7 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -164,11 +164,11 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
addr := common.BytesToAddress(addrBytes)
obj := newObject(s, addr, &data)
if !conf.SkipCode {
- account.Code = obj.Code(s.db)
+ account.Code = obj.Code()
}
if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string)
- storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil))
+ storageIt := trie.NewIterator(obj.getTrie().NodeIterator(nil))
for storageIt.Next() {
_, content, _, err := rlp.Split(storageIt.Value)
if err != nil {
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index cc5864a10c..3367e98be7 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -440,7 +440,7 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
}
root, nodes, _ := snapTrie.Commit(false)
if nodes != nil {
- snapTrieDb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ snapTrieDb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
}
snapTrieDb.Commit(root, false)
}
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 65ec9ee66f..68d94dc522 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -198,7 +198,7 @@ func (t *testHelper) Commit() common.Hash {
if nodes != nil {
t.nodes.Merge(nodes)
}
- t.triedb.Update(root, types.EmptyRootHash, t.nodes, nil)
+ t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil)
t.triedb.Commit(root, false)
return root
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 0abc03d7fc..a650ad55fa 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -155,7 +155,7 @@ func (s *stateObject) touch() {
}
}
-func (s *stateObject) getTrie(db Database) Trie {
+func (s *stateObject) getTrie() Trie {
if s.trie == nil {
// Try fetching from prefetcher first
// We don't prefetch empty tries
@@ -166,9 +166,9 @@ func (s *stateObject) getTrie(db Database) Trie {
}
if s.trie == nil {
var err error
- s.trie, err = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
+ s.trie, err = s.db.db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root)
if err != nil {
- s.trie, _ = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, common.Hash{})
+ s.trie, _ = s.db.db.OpenStorageTrie(s.db.originalRoot, s.addrHash, common.Hash{})
s.setError(fmt.Errorf("can't create storage trie: %v", err))
}
}
@@ -177,7 +177,7 @@ func (s *stateObject) getTrie(db Database) Trie {
}
// GetState retrieves a value from the account storage trie.
-func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
+func (s *stateObject) GetState(key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
return s.fakeStorage[key]
@@ -188,11 +188,11 @@ func (s *stateObject) GetState(db Database, key common.Hash) common.Hash {
return value
}
// Otherwise return the entry's original value
- return s.GetCommittedState(db, key)
+ return s.GetCommittedState(key)
}
// GetCommittedState retrieves a value from the committed account storage trie.
-func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash {
+func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
// If the fake storage is set, only lookup the state here(in the debugging mode)
if s.fakeStorage != nil {
return s.fakeStorage[key]
@@ -247,7 +247,7 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if metrics.EnabledExpensive {
meter = &s.db.StorageReads
}
- if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
+ if enc, err = s.getTrie().TryGet(key.Bytes()); err != nil {
s.setError(err)
return common.Hash{}
}
@@ -265,14 +265,14 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// SetState updates a value in account storage.
-func (s *stateObject) SetState(db Database, key, value common.Hash) {
+func (s *stateObject) SetState(key, value common.Hash) {
// If the fake storage is set, put the temporary state update here.
if s.fakeStorage != nil {
s.fakeStorage[key] = value
return
}
// If the new value is the same as old, don't set
- prev := s.GetState(db, key)
+ prev := s.GetState(key)
if prev == value {
return
}
@@ -327,7 +327,7 @@ func (s *stateObject) finalise(prefetch bool) {
// updateTrie writes cached storage modifications into the object's storage trie.
// It will return nil if the trie has not been loaded and no changes have been made
-func (s *stateObject) updateTrie(db Database) Trie {
+func (s *stateObject) updateTrie() Trie {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false) // Don't prefetch anymore, pull directly if need be
if len(s.pendingStorage) == 0 {
@@ -343,7 +343,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
origin map[common.Hash][]byte
)
// Insert all the pending updates into the trie
- tr := s.getTrie(db)
+ tr := s.getTrie()
hasher := s.db.hasher
usedStorage := make([][]byte, 0, len(s.pendingStorage))
@@ -406,9 +406,9 @@ func (s *stateObject) updateTrie(db Database) Trie {
}
// UpdateRoot sets the trie root to the current root hash of
-func (s *stateObject) updateRoot(db Database) {
+func (s *stateObject) updateRoot() {
// If nothing changed, don't bother with hashing anything
- if s.updateTrie(db) == nil {
+ if s.updateTrie() == nil {
return
}
// Track the amount of time wasted on hashing the storage trie
@@ -419,9 +419,9 @@ func (s *stateObject) updateRoot(db Database) {
}
// commit returns the changes made in storage trie and updates the account data.
-func (s *stateObject) commit(db Database) (*trienode.NodeSet, error) {
+func (s *stateObject) commit() (*trienode.NodeSet, error) {
// If nothing changed, don't bother with hashing anything
- if s.updateTrie(db) == nil {
+ if s.updateTrie() == nil {
s.origin = s.data.Copy() // Update original account data after commit
return nil, nil
}
@@ -507,14 +507,14 @@ func (s *stateObject) Address() common.Address {
}
// Code returns the contract code associated with this object, if any.
-func (s *stateObject) Code(db Database) []byte {
+func (s *stateObject) Code() []byte {
if s.code != nil {
return s.code
}
if bytes.Equal(s.CodeHash(), emptyCodeHash) {
return nil
}
- code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash()))
+ code, err := s.db.db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash()))
if err != nil {
s.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err))
}
@@ -540,7 +540,7 @@ func (s *stateObject) CodeSize(db Database) int {
}
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) {
- prevcode := s.Code(s.db.db)
+ prevcode := s.Code()
s.db.journal.append(codeChange{
account: &s.address,
prevhash: s.CodeHash(),
diff --git a/core/state/state_test.go b/core/state/state_test.go
index afcc0cf70d..881509f474 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -55,7 +55,7 @@ func TestDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- s.state.Commit(false)
+ s.state.Commit(0, false)
// check that DumpToCollector contains the state objects that are in trie
got := string(s.state.Dump(nil))
@@ -99,7 +99,7 @@ func TestNull(t *testing.T) {
var value common.Hash
s.state.SetState(address, common.Hash{}, value)
- s.state.Commit(false)
+ s.state.Commit(0, false)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
t.Errorf("expected empty current value, got %x", value)
@@ -171,7 +171,7 @@ func TestSnapshot2(t *testing.T) {
so0.deleted = false
state.setStateObject(so0)
- root, _ := state.Commit(false)
+ root, _ := state.Commit(0, false)
state, _ = New(root, state.db, state.snaps)
// and one with deleted == true
@@ -193,8 +193,8 @@ func TestSnapshot2(t *testing.T) {
so0Restored := state.getStateObject(stateobjaddr0)
// Update lazily-loaded values before comparing.
- so0Restored.GetState(state.db, storageaddr)
- so0Restored.Code(state.db)
+ so0Restored.GetState(storageaddr)
+ so0Restored.Code()
// non-deleted is equal (restored)
compareStateObjects(so0Restored, so0, t)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index c2566c56e3..6e97190c3f 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -300,7 +300,7 @@ func (s *StateDB) TxIndex() int {
func (s *StateDB) GetCode(addr common.Address) []byte {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.Code(s.db)
+ return stateObject.Code()
}
return nil
}
@@ -325,7 +325,7 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetState(s.db, hash)
+ return stateObject.GetState(hash)
}
return common.Hash{}
}
@@ -357,7 +357,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte,
func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- return stateObject.GetCommittedState(s.db, hash)
+ return stateObject.GetCommittedState(hash)
}
return common.Hash{}
}
@@ -375,8 +375,8 @@ func (s *StateDB) StorageTrie(addr common.Address) Trie {
return nil
}
cpy := stateObject.deepCopy(s)
- cpy.updateTrie(s.db)
- return cpy.getTrie(s.db)
+ cpy.updateTrie()
+ return cpy.getTrie()
}
func (s *StateDB) HasSelfDestructed(addr common.Address) bool {
@@ -431,7 +431,7 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) {
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
stateObject := s.GetOrNewStateObject(addr)
if stateObject != nil {
- stateObject.SetState(s.db, key, value)
+ stateObject.SetState(key, value)
}
}
@@ -712,7 +712,7 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
if so == nil {
return nil
}
- it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil))
+ it := trie.NewIterator(so.getTrie().NodeIterator(nil))
for it.Next() {
key := common.BytesToHash(db.trie.GetKey(it.Key))
@@ -954,7 +954,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// to pull useful data from disk.
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
- obj.updateRoot(s.db)
+ obj.updateRoot()
}
}
// Now we're about to start to write changes to the trie. The trie is so far
@@ -1134,7 +1134,10 @@ func (s *StateDB) clearJournalAndRefund() {
}
// Commit writes the state to the underlying in-memory trie database.
-func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
+//
+// The associated block number of the state transition is also provided
+// for more chain context.
+func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) {
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
@@ -1164,7 +1167,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
- nodeSet, err := obj.commit(s.db)
+ nodeSet, err := obj.commit()
if err != nil {
return common.Hash{}, err
}
@@ -1253,7 +1256,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
Storages: s.storagesOrigin,
Incomplete: incomplete,
}
- if err := s.db.TrieDB().Update(root, origin, nodes, set); err != nil {
+ if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
return common.Hash{}, err
}
s.originalRoot = root
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 0f627b2d69..76311a6b11 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -207,7 +207,7 @@ func (test *stateTest) run() bool {
} else {
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
}
- nroot, err := state.Commit(true) // call commit at the block boundary
+ nroot, err := state.Commit(0, true) // call commit at the block boundary
if err != nil {
panic(err)
}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index eac8cbe06d..aa08ebb926 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -102,7 +102,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
// Commit and cross check the databases.
- transRoot, err := transState.Commit(false)
+ transRoot, err := transState.Commit(0, false)
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
@@ -110,7 +110,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
- finalRoot, err := finalState.Commit(false)
+ finalRoot, err := finalState.Commit(0, false)
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
@@ -483,7 +483,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
func TestTouchDelete(t *testing.T) {
s := newStateEnv()
s.state.GetOrNewStateObject(common.Address{})
- root, _ := s.state.Commit(false)
+ root, _ := s.state.Commit(0, false)
s.state, _ = New(root, s.state.db, s.state.snaps)
snapshot := s.state.Snapshot()
@@ -556,7 +556,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- copyOne.Commit(false)
+ copyOne.Commit(0, false)
if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
}
@@ -641,7 +641,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- copyTwo.Commit(false)
+ copyTwo.Commit(0, false)
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
}
@@ -685,7 +685,7 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, big.NewInt(1))
- root, _ := state.Commit(false)
+ root, _ := state.Commit(0, false)
state, _ = New(root, state.db, state.snaps)
// Simulate self-destructing in one transaction, then create-reverting in another
@@ -697,7 +697,7 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state
- root, _ = state.Commit(true)
+ root, _ = state.Commit(0, true)
state, _ = New(root, state.db, state.snaps)
if state.getStateObject(addr) != nil {
@@ -722,7 +722,7 @@ func TestMissingTrieNodes(t *testing.T) {
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, big.NewInt(100))
state.SetCode(a2, []byte{1, 2, 4})
- root, _ = state.Commit(false)
+ root, _ = state.Commit(0, false)
t.Logf("root: %x", root)
// force-flush
state.Database().TrieDB().Cap(0)
@@ -746,7 +746,7 @@ func TestMissingTrieNodes(t *testing.T) {
}
// Modify the state
state.SetBalance(addr, big.NewInt(2))
- root, err := state.Commit(false)
+ root, err := state.Commit(0, false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index d7334f0639..30742ed3b2 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -65,13 +65,13 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
if i%5 == 0 {
for j := byte(0); j < 5; j++ {
hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})
- obj.SetState(sdb, hash, hash)
+ obj.SetState(hash, hash)
}
}
state.updateStateObject(obj)
accounts = append(accounts, acc)
}
- root, _ := state.Commit(false)
+ root, _ := state.Commit(0, false)
// Return the generated state
return db, sdb, root, accounts
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 02cb9ec6e9..1ecba877c8 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -559,7 +559,7 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), big.NewInt(10000000))
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), big.NewInt(1000000))
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), big.NewInt(1000000))
- statedb.Commit(true)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -719,7 +719,7 @@ func TestOpenIndex(t *testing.T) {
// Create a blob pool out of the pre-seeded data
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
statedb.AddBalance(addr, big.NewInt(1_000_000_000))
- statedb.Commit(true)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -820,7 +820,7 @@ func TestOpenHeap(t *testing.T) {
statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
- statedb.Commit(true)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -899,7 +899,7 @@ func TestOpenCap(t *testing.T) {
statedb.AddBalance(addr1, big.NewInt(1_000_000_000))
statedb.AddBalance(addr2, big.NewInt(1_000_000_000))
statedb.AddBalance(addr3, big.NewInt(1_000_000_000))
- statedb.Commit(true)
+ statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
@@ -1321,7 +1321,7 @@ func TestAdd(t *testing.T) {
store.Put(blob)
}
}
- statedb.Commit(true)
+ statedb.Commit(0, true)
store.Close()
// Create a blob pool out of the pre-seeded dats
@@ -1394,7 +1394,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
statedb.AddBalance(addr, big.NewInt(1_000_000_000))
pool.add(tx)
}
- statedb.Commit(true)
+ statedb.Commit(0, true)
defer pool.Close()
// Benchmark assembling the pending
diff --git a/eth/api_test.go b/eth/api_test.go
index e1bfa48bc9..86812fce5b 100644
--- a/eth/api_test.go
+++ b/eth/api_test.go
@@ -84,7 +84,7 @@ func TestAccountRange(t *testing.T) {
m[addr] = true
}
}
- state.Commit(true)
+ state.Commit(0, true)
root := state.IntermediateRoot(true)
trie, err := statedb.OpenTrie(root)
@@ -141,7 +141,7 @@ func TestEmptyAccountRange(t *testing.T) {
statedb = state.NewDatabase(rawdb.NewMemoryDatabase())
st, _ = state.New(common.Hash{}, statedb, nil)
)
- st.Commit(true)
+ st.Commit(0, true)
st.IntermediateRoot(true)
results := st.IteratorDump(&state.DumpConfig{
SkipCode: true,
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 966bec7a2e..ecfc76fe9e 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -1382,7 +1382,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1443,7 +1443,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1491,7 +1491,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(root, types.EmptyRootHash, nodes, nil)
+ db.Update(root, types.EmptyRootHash, 0, nodes, nil)
// Re-create tries with new root
accTrie, _ = trie.New(trie.StateTrieID(root), db)
@@ -1553,7 +1553,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(root, types.EmptyRootHash, nodes, nil)
+ db.Update(root, types.EmptyRootHash, 0, nodes, nil)
// Re-create tries with new root
accTrie, err := trie.New(trie.StateTrieID(root), db)
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index d5d06da672..06bbfdbd46 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -157,7 +157,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()))
+ root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
diff --git a/light/postprocess.go b/light/postprocess.go
index 7957d98807..f43fdb3cb5 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -226,7 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
// Commite trie changes into trie database in case it's not nil.
if nodes != nil {
- if err := c.triedb.Update(root, c.originRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := c.triedb.Update(root, c.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
@@ -474,7 +474,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
if nodes != nil {
- if err := b.triedb.Update(root, b.originRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := b.triedb.Update(root, b.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index e45b172c76..6e728ac2c2 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -189,7 +189,7 @@ func (f *fuzzer) fuzz() int {
panic(err)
}
if nodes != nil {
- dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ dbA.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
}
// Flush memdb -> disk (sponge)
dbA.Commit(rootA, false)
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 1b67d81e6b..dc2e689a56 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -173,7 +173,7 @@ func runRandTest(rt randTest) error {
return err
}
if nodes != nil {
- if err := triedb.Update(hash, origin, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := triedb.Update(hash, origin, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
return err
}
}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index ec858d882d..a923535181 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -237,7 +237,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
statedb.AddBalance(block.Coinbase(), new(big.Int))
// Commit block
- statedb.Commit(config.IsEIP158(block.Number()))
+ statedb.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
// And _now_ get the state root
root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
return snaps, statedb, root, err
@@ -259,7 +259,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(false)
+ root, _ := statedb.Commit(0, false)
var snaps *snapshot.Tree
if snapshotter {
diff --git a/trie/database.go b/trie/database.go
index c970383ad2..6894164477 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -129,7 +129,7 @@ func (db *Database) Reader(blockRoot common.Hash) Reader {
// given set in order to update state from the specified parent to the specified
// root. The held pre-images accumulated up to this point will be flushed in case
// the size exceeds the threshold.
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
if db.config != nil && db.config.OnCommit != nil {
db.config.OnCommit(states)
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index a7d9c96b1f..3527cc0266 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -66,7 +66,7 @@ func TestIterator(t *testing.T) {
if err != nil {
t.Fatalf("Failed to commit trie %v", err)
}
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil))
@@ -249,7 +249,7 @@ func TestDifferenceIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA), nil)
+ dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -258,7 +258,7 @@ func TestDifferenceIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB), nil)
+ dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
@@ -291,7 +291,7 @@ func TestUnionIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA), nil)
+ dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -300,7 +300,7 @@ func TestUnionIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB), nil)
+ dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
@@ -362,7 +362,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
tr.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
tdb.Commit(root, false)
}
@@ -478,7 +478,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
break
}
}
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
triedb.Commit(root, false)
}
@@ -599,7 +599,7 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
trie.Update(key, val)
}
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Return the generated trie
return triedb, trie, logDb
}
@@ -639,7 +639,7 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
triedb.Commit(root, false)
var found = make(map[common.Hash][]byte)
diff --git a/trie/sync_test.go b/trie/sync_test.go
index f2f9d1e6d1..386e1995d7 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -61,7 +61,7 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[st
if err != nil {
panic(fmt.Errorf("failed to commit trie: %v", err))
}
- if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
if err := triedb.Commit(root, false); err != nil {
@@ -713,7 +713,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
diff[string(key)] = val
}
root, nodes, _ := srcTrie.Commit(false)
- if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(err)
}
if err := srcDb.Commit(root, false); err != nil {
@@ -738,7 +738,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
reverted[k] = val
}
root, nodes, _ = srcTrie.Commit(false)
- if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
panic(err)
}
if err := srcDb.Commit(root, false); err != nil {
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index 5ca3c528b6..e44b8826d6 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -71,7 +71,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -137,7 +137,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -152,7 +152,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -170,7 +170,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update(key, randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -185,7 +185,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(key), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -200,7 +200,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -219,7 +219,7 @@ func TestAccessListLeak(t *testing.T) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
var cases = []struct {
op func(tr *Trie)
@@ -269,7 +269,7 @@ func TestTinyTree(t *testing.T) {
trie.Update([]byte(val.k), randBytes(32))
}
root, set, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil)
parent := root
trie, _ = New(TrieID(root), db)
@@ -278,7 +278,7 @@ func TestTinyTree(t *testing.T) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, set, _ = trie.Commit(false)
- db.Update(root, parent, trienode.NewWithNodeSet(set), nil)
+ db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil)
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 447374a1af..e87c4dba91 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -96,7 +96,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
if !memonly {
triedb.Commit(root, true)
}
@@ -214,7 +214,7 @@ func TestGet(t *testing.T) {
return
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
}
}
@@ -289,7 +289,7 @@ func TestReplication(t *testing.T) {
if err != nil {
t.Fatalf("commit error: %v", err)
}
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// create a new trie on top of the database and check that lookups work.
trie2, err := New(TrieID(root), db)
@@ -310,7 +310,7 @@ func TestReplication(t *testing.T) {
}
// recreate the trie after commit
if nodes != nil {
- db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
}
trie2, err = New(TrieID(hash), db)
if err != nil {
@@ -527,7 +527,7 @@ func runRandTest(rt randTest) bool {
return false
}
if nodes != nil {
- triedb.Update(root, origin, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil)
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -821,7 +821,7 @@ func TestCommitSequence(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -862,7 +862,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
@@ -902,7 +902,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
// And flush stacktrie -> disk
@@ -951,7 +951,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
// Flush memdb -> disk (sponge)
db.Commit(root, false)
// And flush stacktrie -> disk
@@ -1123,7 +1123,7 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
}
h := trie.Hash()
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
From 211f9ae8cafe67913e487e76588c7b31736cc8b1 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Thu, 10 Oct 2024 22:11:55 +0700
Subject: [PATCH 23/41] final implementing path base (#591)
* all: clean up overall structure, preparing for path-based (#594)
* trie/triedb/pathdb: init pathdb components
* core, trie: track state change with address instead of hash
Reference: https://github.com/ethereum/go-ethereum/commit/817553cc288ff5e3e9602cca2750219322247974
* trie: refactor
* rawdb: implement freezer resettable & state freezer (#596)
* rawdb: implement freezer resettable
* rawdb: implement state freezer
* rawdb: update description
* trie: path based scheme implementing (#598)
* core/state: move account definition to core/types
Reference: https://github.com/ethereum/go-ethereum/pull/27323
* trie: add path base utils
* triedb: implement history and adding some test utils
* trie/triedb/pathdb: implement difflayer and disklayer
* Fix some issues related to history, and add logic checking maxbyte when is zero for retrieving ancient ranges with maxbyte is zero
* trie/triedb/pathdb: implement database.go
* freezer: Add unit test and docs for support freezer reading with no limit size
* trie/triedb/pathdb: add database and difflayer tests
* triedb/pathdb: implement journal and add more comments
---------
Co-authored-by: Huy Ngo
---------
Co-authored-by: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
---
cmd/ronin/snapshot.go | 4 +-
common/types.go | 5 +
core/blockchain.go | 6 +-
core/rawdb/accessors_chain_test.go | 4 +-
core/rawdb/accessors_indexes_test.go | 4 +-
core/rawdb/accessors_state.go | 179 +++++++
core/rawdb/accessors_trie.go | 47 +-
core/rawdb/ancient_scheme.go | 34 ++
core/rawdb/chain_iterator_test.go | 8 +-
core/rawdb/database.go | 8 +-
core/rawdb/freezer.go | 40 +-
core/rawdb/freezer_resettable.go | 241 ++++++++++
core/rawdb/freezer_resettable_test.go | 120 +++++
core/rawdb/freezer_table.go | 43 +-
core/rawdb/freezer_table_test.go | 46 ++
core/rawdb/freezer_test.go | 4 +-
core/rawdb/freezer_utils.go | 16 +
core/rawdb/schema.go | 12 +
core/rawdb/table.go | 4 +-
core/state/journal.go | 5 +-
core/state/snapshot/account.go | 86 ----
core/state/snapshot/conversion.go | 8 +-
core/state/snapshot/difflayer.go | 5 +-
core/state/snapshot/disklayer.go | 5 +-
core/state/snapshot/generate.go | 12 +-
core/state/snapshot/generate_test.go | 112 ++---
core/state/snapshot/snapshot.go | 3 +-
core/state/snapshot/snapshot_test.go | 7 +-
core/state/state_object.go | 4 +-
core/state/statedb.go | 70 ++-
core/state/statedb_fuzz_test.go | 22 +-
core/types/hashes.go | 32 ++
core/types/state_account.go | 65 +++
eth/protocols/snap/protocol.go | 4 +-
eth/protocols/snap/sync.go | 7 +-
ethdb/database.go | 11 +-
internal/testrand/rand.go | 53 +++
trie/committer.go | 7 +-
trie/database.go | 48 +-
trie/database_test.go | 8 +-
trie/testutil/utils.go | 61 +++
trie/tracer.go | 16 +-
trie/trie.go | 19 +-
trie/trie_reader.go | 9 +-
trie/trie_test.go | 34 +-
trie/triedb/hashdb/database.go | 59 ++-
trie/triedb/pathdb/database.go | 401 ++++++++++++++++
trie/triedb/pathdb/database_test.go | 574 ++++++++++++++++++++++
trie/triedb/pathdb/difflayer.go | 178 +++++++
trie/triedb/pathdb/difflayer_test.go | 171 +++++++
trie/triedb/pathdb/disklayer.go | 298 ++++++++++++
trie/triedb/pathdb/errors.go | 51 ++
trie/triedb/pathdb/history.go | 661 ++++++++++++++++++++++++++
trie/triedb/pathdb/history_test.go | 312 ++++++++++++
trie/triedb/pathdb/journal.go | 401 ++++++++++++++++
trie/triedb/pathdb/layertree.go | 214 +++++++++
trie/triedb/pathdb/metrics.go | 50 ++
trie/triedb/pathdb/nodebuffer.go | 276 +++++++++++
trie/triedb/pathdb/testutils.go | 157 ++++++
trie/trienode/node.go | 57 +--
trie/triestate/state.go | 249 +++++++++-
61 files changed, 5204 insertions(+), 443 deletions(-)
create mode 100644 core/rawdb/freezer_resettable.go
create mode 100644 core/rawdb/freezer_resettable_test.go
delete mode 100644 core/state/snapshot/account.go
create mode 100644 core/types/hashes.go
create mode 100644 internal/testrand/rand.go
create mode 100644 trie/testutil/utils.go
create mode 100644 trie/triedb/pathdb/database.go
create mode 100644 trie/triedb/pathdb/database_test.go
create mode 100644 trie/triedb/pathdb/difflayer.go
create mode 100644 trie/triedb/pathdb/difflayer_test.go
create mode 100644 trie/triedb/pathdb/disklayer.go
create mode 100644 trie/triedb/pathdb/errors.go
create mode 100644 trie/triedb/pathdb/history.go
create mode 100644 trie/triedb/pathdb/history_test.go
create mode 100644 trie/triedb/pathdb/journal.go
create mode 100644 trie/triedb/pathdb/layertree.go
create mode 100644 trie/triedb/pathdb/metrics.go
create mode 100644 trie/triedb/pathdb/nodebuffer.go
create mode 100644 trie/triedb/pathdb/testutils.go
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index b61f32969f..e83c4c31ba 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -496,14 +496,14 @@ func dumpState(ctx *cli.Context) error {
Root common.Hash `json:"root"`
}{root})
for accIt.Next() {
- account, err := snapshot.FullAccount(accIt.Account())
+ account, err := types.FullAccount(accIt.Account())
if err != nil {
return err
}
da := &state.DumpAccount{
Balance: account.Balance.String(),
Nonce: account.Nonce,
- Root: account.Root,
+ Root: account.Root.Bytes(),
CodeHash: account.CodeHash,
SecureKey: accIt.Hash().Bytes(),
}
diff --git a/common/types.go b/common/types.go
index 88cda0a841..0f53406980 100644
--- a/common/types.go
+++ b/common/types.go
@@ -79,6 +79,11 @@ func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
// Hex converts a hash to a hex string.
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
+// Cmp compares two hashes.
+func (h Hash) Cmp(other Hash) int {
+ return bytes.Compare(h[:], other[:])
+}
+
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
diff --git a/core/blockchain.go b/core/blockchain.go
index 32ee4c65fe..67b8c9fffd 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -763,7 +763,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
- if err := bc.db.TruncateHead(num); err != nil {
+ if _, err := bc.db.TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
@@ -1194,7 +1194,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// The tx index data could not be written.
// Roll back the ancient store update.
fastBlock := bc.CurrentFastBlock().NumberU64()
- if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
+ if _, err := bc.db.TruncateHead(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, err
@@ -1210,7 +1210,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain.
- if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
+ if _, err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, errSideChainReceipts
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 7168fa6969..fa0a0b03b0 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -88,7 +88,7 @@ func TestBodyStorage(t *testing.T) {
WriteBody(db, hash, 0, body)
if entry := ReadBody(db, hash, 0); entry == nil {
t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
+ } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body)
}
if entry := ReadBodyRLP(db, hash, 0); entry == nil {
@@ -142,7 +142,7 @@ func TestBlockStorage(t *testing.T) {
}
if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil {
t.Fatalf("Stored body not found")
- } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
+ } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) {
t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body())
}
// Delete the block and verify the execution
diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go
index 4734e986e2..dcb474c180 100644
--- a/core/rawdb/accessors_indexes_test.go
+++ b/core/rawdb/accessors_indexes_test.go
@@ -37,7 +37,7 @@ type testHasher struct {
hasher hash.Hash
}
-func newHasher() *testHasher {
+func newTestHasher() *testHasher {
return &testHasher{hasher: sha3.NewLegacyKeccak256()}
}
@@ -99,7 +99,7 @@ func TestLookupStorage(t *testing.T) {
tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33})
txs := []*types.Transaction{tx1, tx2, tx3}
- block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher())
+ block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher())
// Check that no transactions entries are in a pristine database
for i, tx := range txs {
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 1438aad0ff..894db97e76 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -17,6 +17,8 @@
package rawdb
import (
+ "encoding/binary"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
@@ -67,3 +69,180 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
log.Crit("Failed to delete contract code", "err", err)
}
}
+
+/* Function below support Path base state trie scheme */
+
+// ReadStateId retrieves the state id with the provided state root. (Return pointer can detect that Statid is valid or not, nil is invalid)
+func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 {
+ data, err := db.Get(stateIDKey(root))
+ if err != nil || len(data) == 0 {
+ return nil
+ }
+ number := binary.BigEndian.Uint64(data)
+ return &number
+}
+
+// WriteStateID writes the provided state lookup to database.
+func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) {
+ var buff [8]byte
+ // Convert from uint64 to 8 bytes BigEndian id -> buff
+ binary.BigEndian.PutUint64(buff[:], id)
+ // Store the state id for root
+ if err := db.Put(stateIDKey(root), buff[:]); err != nil {
+ log.Crit("Failed to store state id", "err", err)
+ }
+}
+
+func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) {
+ if err := db.Delete(stateIDKey(root)); err != nil {
+ log.Crit("Failed to delete state id", "err", err)
+ }
+}
+
+// ReadPersistentStateID retrievies the id of persistent state from the database.
+func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 {
+ data, _ := db.Get(persistentStateIDKey)
+
+ if len(data) != 8 { // 8 bytes
+ return 0
+ }
+ return binary.BigEndian.Uint64(data)
+}
+
+// WritePersistentStateID writes the provided id of persistent state to the database.
+func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) {
+ if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil {
+ log.Crit("Failed to store persistent state id", "err", err)
+ }
+}
+
+// Read Trie Journal retrieves in-memory trie nodes of layers saved at
+// the last shutdown.
+func ReadTrieJournal(db ethdb.KeyValueReader) []byte {
+ data, _ := db.Get(trieJournalKey)
+ return data
+}
+
+// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at
+// shutdown.
+func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) {
+ if err := db.Put(trieJournalKey, journal); err != nil {
+ log.Crit("Failed to store tries journal", "err", err)
+ }
+}
+
+// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at
+// the last shutdown.
+func DeleteTrieJournal(db ethdb.KeyValueWriter) {
+ if err := db.Delete(trieJournalKey); err != nil {
+ log.Crit("Failed to remove tries journal", "err", err)
+ }
+}
+
+/* Ancients */
+
+// ReadStateHistoryMeta retrieves the metadata corresponding to the specified
+// state history. Compute the position of state history in freezer by minus
+// one since the id of first state history starts from one(zero for initial
+// state).
+func ReadStateHistoryMeta(db ethdb.AncientReaderOp, id uint64) []byte {
+ blob, err := db.Ancient(stateHistoryMeta, id-1)
+ if err != nil {
+ return nil
+ }
+ return blob
+}
+
+// ReadStateHistoryMetaList retrieves a batch of meta objects with the specified
+// start position and count. Compute the position of state history in freezer by
+// minus one since the id of first state history starts from one(zero for initial
+// state).
+func ReadStateHistoryMetaList(db ethdb.AncientReaderOp, start uint64, count uint64) ([][]byte, error) {
+ return db.AncientRange(stateHistoryMeta, start-1, count, 0)
+}
+
+// ReadStateAccountIndex retrieves the state root corresponding to the specified
+// state history. Compute the position of state history in freezer by minus one
+// since the id of first state history starts from one(zero for initial state).
+func ReadStateAccountIndex(db ethdb.AncientReaderOp, id uint64) []byte {
+ blob, err := db.Ancient(stateHistoryAccountIndex, id-1)
+ if err != nil {
+ return nil
+ }
+ return blob
+}
+
+// ReadStateStorageIndex retrieves the state root corresponding to the specified
+// state history. Compute the position of state history in freezer by minus one
+// since the id of first state history starts from one(zero for initial state).
+func ReadStateStorageIndex(db ethdb.AncientReaderOp, id uint64) []byte {
+ blob, err := db.Ancient(stateHistoryStorageIndex, id-1)
+ if err != nil {
+ return nil
+ }
+ return blob
+}
+
+// ReadStateAccountHistory retrieves the state root corresponding to the specified
+// state history. Compute the position of state history in freezer by minus one
+// since the id of first state history starts from one(zero for initial state).
+func ReadStateAccountHistory(db ethdb.AncientReaderOp, id uint64) []byte {
+ blob, err := db.Ancient(stateHistoryAccountData, id-1)
+ if err != nil {
+ return nil
+ }
+ return blob
+}
+
+// ReadStateStorageHistory retrieves the state root corresponding to the specified
+// state history. Compute the position of state history in freezer by minus one
+// since the id of first state history starts from one(zero for initial state).
+func ReadStateStorageHistory(db ethdb.AncientReaderOp, id uint64) []byte {
+ blob, err := db.Ancient(stateHistoryStorageData, id-1)
+ if err != nil {
+ return nil
+ }
+ return blob
+}
+
+// ReadStateHistory retrieves the state history from database with provided id.
+// Compute the position of state history in freezer by minus one since the id
+// of first state history starts from one(zero for initial state).
+// Returns meta, account and storage (index, data).
+func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []byte, []byte, []byte, error) {
+ meta, err := db.Ancient(stateHistoryMeta, id-1)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ accountIndex, err := db.Ancient(stateHistoryAccountIndex, id-1)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ storageIndex, err := db.Ancient(stateHistoryStorageIndex, id-1)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ accountData, err := db.Ancient(stateHistoryAccountData, id-1)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ storageData, err := db.Ancient(stateHistoryStorageData, id-1)
+ if err != nil {
+ return nil, nil, nil, nil, nil, err
+ }
+ return meta, accountIndex, storageIndex, accountData, storageData, nil
+}
+
+// WriteStateHistory writes the provided state history to database. Compute the
+// position of state history in freezer by minus one since the id of first state
+// history starts from one(zero for initial state).
+func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) {
+ db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ op.AppendRaw(stateHistoryMeta, id-1, meta)
+ op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex)
+ op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex)
+ op.AppendRaw(stateHistoryAccountData, id-1, accounts)
+ op.AppendRaw(stateHistoryStorageData, id-1, storages)
+ return nil
+ })
+}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index e240213025..978220f15c 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -46,33 +46,34 @@ const HashScheme = "hashScheme"
// on extra state diffs to survive deep reorg.
const PathScheme = "pathScheme"
-// nodeHasher used to derive the hash of trie node.
-type nodeHasher struct{ sha crypto.KeccakState }
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
var hasherPool = sync.Pool{
- New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
}
-func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) }
-func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) }
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
-func (h *nodeHasher) hashData(data []byte) (n common.Hash) {
- h.sha.Reset()
- h.sha.Write(data)
- h.sha.Read(n[:])
- return n
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+func (h *hasher) release() {
+ hasherPool.Put(h)
}
// ReadAccountTrieNode retrieves the account trie node and the associated node
-// hash with the specified node path.
+// hash with the specified node path. If it's empty, return empty hash.
func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
data, err := db.Get(accountTrieNodeKey(path))
if err != nil {
return nil, common.Hash{}
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return data, hasher.hashData(data)
+ h := newHasher()
+ defer h.release()
+ return data, h.hash(data)
}
// HasAccountTrieNode checks the account trie node presence with the specified
@@ -82,9 +83,9 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
if err != nil {
return false
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return hasher.hashData(data) == hash
+ h := newHasher()
+ defer h.release()
+ return h.hash(data) == hash
}
// WriteAccountTrieNode writes the provided account trie node into database.
@@ -108,9 +109,9 @@ func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path
if err != nil {
return nil, common.Hash{}
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return data, hasher.hashData(data)
+ h := newHasher()
+ defer h.release()
+ return data, h.hash(data)
}
// HasStorageTrieNode checks the storage trie node presence with the provided
@@ -120,9 +121,9 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
if err != nil {
return false
}
- hasher := newNodeHasher()
- defer returnHasherToPool(hasher)
- return hasher.hashData(data) == hash
+ h := newHasher()
+ defer h.release()
+ return h.hash(data) == hash
}
// WriteStorageTrieNode writes the provided storage trie node into database.
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
index f621ba1a3d..b0f507cdd5 100644
--- a/core/rawdb/ancient_scheme.go
+++ b/core/rawdb/ancient_scheme.go
@@ -16,6 +16,8 @@
package rawdb
+import "path/filepath"
+
// The list of table names of chain freezer. (headers, hashes, bodies, difficulties)
const (
@@ -35,6 +37,30 @@ const (
chainFreezerDifficultyTable = "diffs"
)
+const (
+ // stateHistoryTableSize defines the maximum size of freezer data files.
+ stateHistoryTableSize = 2 * 1000 * 1000 * 1000 // 2GB
+
+ // stateHistoryAccountIndex indicates the name of the freezer state history table (Account + Storage).
+ stateHistoryMeta = "history.meta"
+ stateHistoryAccountIndex = "account.index"
+ stateHistoryStorageIndex = "storage.index"
+ stateHistoryAccountData = "account.data"
+ stateHistoryStorageData = "storage.data"
+
+ namespace = "eth/db/state"
+)
+
+// stateHistoryFreezerNoSnappy configures whether compression is disabled for the stateHistory.
+// https://github.com/golang/snappy, Reason for splititng files for looking up in archive mode easily.
+var stateHistoryFreezerNoSnappy = map[string]bool{
+ stateHistoryMeta: true,
+ stateHistoryAccountIndex: false,
+ stateHistoryStorageIndex: false,
+ stateHistoryAccountData: false,
+ stateHistoryStorageData: false,
+}
+
// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
// Hashes and difficulties don't compress well.
var chainFreezerNoSnappy = map[string]bool{
@@ -48,7 +74,15 @@ var chainFreezerNoSnappy = map[string]bool{
// The list of identifiers of ancient stores. It can split more in the futures.
var (
chainFreezerName = "chain" // the folder name of chain segment ancient store.
+ stateFreezerName = "state" // the folder name of reverse diff ancient store.
)
// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}
+
+// NewStateHistoryFreezer initializes the freezer for state history.
+func NewStateHistoryFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) {
+ return NewResettableFreezer(
+ filepath.Join(ancientDir, stateFreezerName), namespace, readOnly,
+ stateHistoryTableSize, stateHistoryFreezerNoSnappy)
+}
diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go
index 45cc6323e0..b6180f8252 100644
--- a/core/rawdb/chain_iterator_test.go
+++ b/core/rawdb/chain_iterator_test.go
@@ -34,7 +34,7 @@ func TestChainIterator(t *testing.T) {
var block *types.Block
var txs []*types.Transaction
to := common.BytesToAddress([]byte{0x11})
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
for i := uint64(1); i <= 10; i++ {
@@ -60,7 +60,7 @@ func TestChainIterator(t *testing.T) {
})
}
txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
@@ -111,7 +111,7 @@ func TestIndexTransactions(t *testing.T) {
to := common.BytesToAddress([]byte{0x11})
// Write empty genesis block
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher())
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
@@ -138,7 +138,7 @@ func TestIndexTransactions(t *testing.T) {
})
}
txs = append(txs, tx)
- block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher())
+ block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher())
WriteBlock(chainDb, block)
WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 6a08932208..cf0cc15096 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -129,13 +129,13 @@ func (db *nofreezedb) Sync() error {
}
// TruncateHead returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateHead(items uint64) error {
- return errNotSupported
+func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) {
+ return 0, errNotSupported
}
// TruncateTail returns an error as we don't have a backing chain freezer.
-func (db *nofreezedb) TruncateTail(items uint64) error {
- return errNotSupported
+func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) {
+ return 0, errNotSupported
}
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 1a96aff6d7..df768eb693 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -194,9 +194,11 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) {
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return
-// - at most 'max' items,
-// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
-// return as many items as fit into maxByteSize.
+// - at most 'count' items,
+// - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize),
+// but will otherwise return as many items as fit into maxByteSize.
+// - if maxBytes is not specified, 'count' items will be returned if they are present.Retru
+// - if maxBytes is not specified, 'count' items will be returned if they are present.Retru
func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes)
@@ -270,46 +272,50 @@ func (f *Freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
}
// TruncateHead discards any recent data above the provided threshold number, only keep the first items ancient data.
-func (f *Freezer) TruncateHead(items uint64) error {
+// Return the old head number.
+func (f *Freezer) TruncateHead(items uint64) (uint64, error) {
if f.readonly {
- return errReadOnly
+ return 0, errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
// If the current frozen number is less than the requested items for frozen, do nothing.
- if f.frozen.Load() <= items {
- return nil
+ previousItems := f.frozen.Load()
+ if previousItems <= items {
+ return previousItems, nil
}
for _, table := range f.tables {
if err := table.truncateHead(items); err != nil {
- return err
+ return 0, err
}
}
f.frozen.Store(items)
- return nil
+ return previousItems, nil
}
-// TruncateTail discards any recent data below the provided threshold number, only keep the last items ancient data.
-func (f *Freezer) TruncateTail(tail uint64) error {
+// TruncateTail discards any recent data below the provided threshold number, only keep the last items ancient data, return the old tail number.
+func (f *Freezer) TruncateTail(tail uint64) (uint64, error) {
if f.readonly {
- return errReadOnly
+ return 0, errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
// If the current tail number is greater than the requested tail, seem out of range for truncating, do nothing.
- if f.tail.Load() >= tail {
- return nil
+ old := f.tail.Load()
+
+ if old >= tail {
+ return old, nil
}
for _, table := range f.tables {
if err := table.truncateTail(tail); err != nil {
- return err
+ return 0, err
}
}
f.tail.Store(tail)
- return nil
+ return old, nil
}
// Sync flushes all data tables to disk.
@@ -345,7 +351,7 @@ func (f *Freezer) repair() error {
}
}
- // Truncate all tables to the common head and tail.
+ // Truncate all tables to the common head and tail. Returns the previous head number.
for _, table := range f.tables {
if err := table.truncateHead(head); err != nil {
return err
diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go
new file mode 100644
index 0000000000..45d32b0f88
--- /dev/null
+++ b/core/rawdb/freezer_resettable.go
@@ -0,0 +1,241 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// In PBSS, this freezer is used to reverse diff
+// The idea for implementing this package is to provide a freezer which supported resettable in case we need to rollback to the genesis
+// Normally, TruncateTail is irreversible. This implementing will depend on "os.Rename" & "os.RemoveAll" to delete and recreate a new one from scratch.
+
+const tmpSuffix = ".tmp"
+
+// freezerOpenFunc is the function used to open/create a freezer.
+type freezerOpenFunc = func() (*Freezer, error)
+
+// ResettableFreezer is a wrapper of the freezer which makes the
+// freezer resettable.
+type ResettableFreezer struct {
+ freezer *Freezer
+ opener freezerOpenFunc
+ datadir string
+ lock sync.RWMutex
+}
+
+// NewResettableFreezer creates a resettable freezer, note freezer is
+// only resettable if the passed file directory is exclusively occupied
+// by the freezer. And also the user-configurable ancient root directory
+// is **not** supported for reset since it might be a mount and rename
+// will cause a copy of hundreds of gigabyte into local directory. It
+// needs some other file based solutions.
+//
+// The reset function will delete directory atomically and re-create the
+// freezer from scratch.
+// namespace is the prefix for metrics which is not stored in freezer
+func NewResettableFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*ResettableFreezer, error) {
+ // Clean up if we figureout .tmp inside data directory
+ if err := cleanup(datadir); err != nil {
+ return nil, err
+ }
+ opener := func() (*Freezer, error) {
+ return NewFreezer(datadir, namespace, readonly, maxTableSize, tables)
+ }
+ freezer, err := opener()
+ if err != nil {
+ return nil, err
+ }
+ return &ResettableFreezer{
+ freezer: freezer,
+ opener: opener,
+ datadir: datadir,
+ }, nil
+}
+
+// Reset deletes the file directory exclusively occupied by the freezer and
+// recreate the freezer from scratch. The atomicity of directory deletion
+// is guaranteed by the rename operation,
+func (f *ResettableFreezer) Reset() error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Close the freezer before deleting the directory
+ if err := f.freezer.Close(); err != nil {
+ return err
+ }
+
+ tmp := tmpName(f.datadir)
+ if err := os.Rename(f.datadir, tmp); err != nil {
+ return err
+ }
+
+ // the leftover directory will be cleaned up in next startup in case crash happens after rename. See in cleanup function.
+ if err := os.RemoveAll(tmp); err != nil {
+ return err
+ }
+ freezer, err := f.opener()
+ if err != nil {
+ return err
+ }
+ f.freezer = freezer
+ return nil
+}
+
+// Close terminates the chain freezer, unmapping all the data files.
+func (f *ResettableFreezer) Close() error {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.Close()
+}
+
+// HasAncient returns an indicator whether the specified ancient data exists
+// in the freezer
+func (f *ResettableFreezer) HasAncient(kind string, number uint64) (bool, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.HasAncient(kind, number)
+}
+
+// Ancient retrieves an ancient binary blob from the append-only immutable files.
+func (f *ResettableFreezer) Ancient(kind string, number uint64) ([]byte, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.Ancient(kind, number)
+}
+
+// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
+// It will return
+// - at most 'max' items,
+// - at least 1 item (even if exceeding the maxByteSize), but will otherwise
+// return as many items as fit into maxByteSize
+func (f *ResettableFreezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.AncientRange(kind, start, count, maxBytes)
+}
+
+// Ancients returns the length of the frozen items.
+func (f *ResettableFreezer) Ancients() (uint64, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.Ancients()
+}
+
+// Tail returns the number of first stored item in the freezer.
+func (f *ResettableFreezer) Tail() (uint64, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.Tail()
+}
+
+// AncientSize returns the ancient size of the specified category.
+func (f *ResettableFreezer) AncientSize(kind string) (uint64, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.AncientSize(kind)
+}
+
+// ReadAncients runs the given read operation while ensuring that no writes take place
+// on the underlying freezer.
+func (f *ResettableFreezer) ReadAncients(fn func(ethdb.AncientReaderOp) error) (err error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.ReadAncients(fn)
+}
+
+// ModifyAncients runs the given write operation.
+func (f *ResettableFreezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.ModifyAncients(fn)
+}
+
+// TruncateHead discards any recent data above the provided threshold number.
+func (f *ResettableFreezer) TruncateHead(items uint64) (uint64, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.TruncateHead(items)
+}
+
+// TruncateTail discards any recent data below the provided threshold number.
+func (f *ResettableFreezer) TruncateTail(tail uint64) (uint64, error) {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.TruncateTail(tail)
+}
+
+// Sync flushes all data tables to disk.
+func (f *ResettableFreezer) Sync() error {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+
+ return f.freezer.Sync()
+}
+
+func cleanup(pathToDelete string) error {
+ parentDir := filepath.Dir(pathToDelete)
+
+ // In case Parent directory does not exist, return nil, no need to cleanup.
+ if _, err := os.Lstat(parentDir); os.IsNotExist(err) {
+ return nil
+ }
+ dir, err := os.Open(parentDir)
+ if err != nil {
+ return err
+ }
+ // Read all the names of files and directories in the parent directory with single slice.
+ names, err := dir.Readdirnames(0)
+ if err != nil {
+ return err
+ }
+ if cerr := dir.Close(); cerr != nil {
+ return cerr
+ }
+
+ for _, name := range names {
+ if name == filepath.Base(pathToDelete)+tmpSuffix {
+ // Figure out then delete the tmp directory which is renamed in Reset Method.
+ log.Info("Cleaning up the freezer Reset directory", "pathToDelete", pathToDelete, "total files inside", len(names))
+ return os.RemoveAll(filepath.Join(parentDir, name))
+ }
+ }
+ return nil
+
+}
+
+// /home/user/documents -> /home/user/documents.tmp (Directory)
+// /home/user/documents/file.txt -> /home/user/documents/file.txt.tmp (File)
+func tmpName(path string) string {
+ return filepath.Join(filepath.Dir(path), filepath.Base(path)+tmpSuffix)
+}
diff --git a/core/rawdb/freezer_resettable_test.go b/core/rawdb/freezer_resettable_test.go
new file mode 100644
index 0000000000..bd7129ae84
--- /dev/null
+++ b/core/rawdb/freezer_resettable_test.go
@@ -0,0 +1,120 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+func TestResetFreezer(t *testing.T) {
+ items := []struct {
+ id uint64
+ blob []byte
+ }{
+ {0, bytes.Repeat([]byte{0}, 2048)},
+ {1, bytes.Repeat([]byte{1}, 2048)},
+ {2, bytes.Repeat([]byte{2}, 2048)},
+ {3, bytes.Repeat([]byte{3}, 2048)},
+ }
+ temp := t.TempDir()
+ f, _ := NewResettableFreezer(temp, "", false, 2048, freezerTestTableDef)
+ defer f.Close()
+
+ f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for _, item := range items {
+ op.AppendRaw("test", item.id, item.blob)
+ }
+ return nil
+ })
+ // Expected can get
+ for _, item := range items {
+ blob, _ := f.Ancient("test", item.id)
+ if !bytes.Equal(blob, item.blob) {
+ t.Fatalf("Failed to get the correct blob")
+ }
+ }
+ if _, err := os.Lstat(temp); os.IsNotExist(err) {
+ t.Fatal("Expected datadir should exist")
+ }
+ // Reset freezer, Expect all data is removed, and the directory is still there.
+ f.Reset()
+ count, _ := f.Ancients()
+ if count != 0 {
+ t.Fatal("Failed to reset freezer")
+ }
+ for _, item := range items {
+ blob, _ := f.Ancient("test", item.id)
+ if len(blob) != 0 {
+ t.Fatal("Unexpected blob")
+ }
+ }
+ if _, err := os.Lstat(temp); os.IsNotExist(err) {
+ t.Fatal("Expected datadir should exist")
+ }
+ // Fill the freezer
+ f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for _, item := range items {
+ op.AppendRaw("test", item.id, item.blob)
+ }
+ return nil
+ })
+ for _, item := range items {
+ blob, _ := f.Ancient("test", item.id)
+ if !bytes.Equal(blob, item.blob) {
+ t.Fatal("Unexpected blob")
+ }
+ }
+}
+
+func TestFreezerCleanUpWhenInit(t *testing.T) {
+ items := []struct {
+ id uint64
+ blob []byte
+ }{
+ {0, bytes.Repeat([]byte{0}, 2048)},
+ {1, bytes.Repeat([]byte{1}, 2048)},
+ {2, bytes.Repeat([]byte{2}, 2048)},
+ {3, bytes.Repeat([]byte{3}, 2048)},
+ }
+ // Generate a temporary directory for the freezer
+ datadir := t.TempDir()
+ // Expect nothing here.
+ f, _ := NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef)
+ // Write some data to the freezer
+ f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
+ for _, item := range items {
+ op.AppendRaw("test", item.id, item.blob)
+ }
+ return nil
+ })
+ f.Close()
+ fmt.Println(tmpName(datadir))
+ os.Rename(datadir, tmpName(datadir))
+ // Open the freezer again, trigger cleanup operation
+ f, _ = NewResettableFreezer(datadir, "", false, 2048, freezerTestTableDef)
+ f.Close()
+
+ // Expected datadir.tmp should be removed
+ if _, err := os.Lstat(tmpName(datadir)); !os.IsNotExist(err) {
+ t.Fatal("Failed to cleanup leftover directory")
+ }
+}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index 81e8a3155f..fd26882d5e 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -422,6 +422,7 @@ func (t *freezerTable) truncateTail(items uint64) error {
defer t.lock.Unlock()
// The truncateTarget is below the current tail, return nil, no need to truncate
+
if t.itemHidden.Load() >= items {
return nil
}
@@ -678,7 +679,7 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e
if !t.noCompression {
decompressedSize, _ = snappy.DecodedLen(item)
}
- if i > 0 && uint64(outputSize+decompressedSize) > maxBytes {
+ if i > 0 && maxBytes != 0 && uint64(outputSize+decompressedSize) > maxBytes {
break
}
if !t.noCompression {
@@ -696,14 +697,16 @@ func (t *freezerTable) RetrieveItems(start, count, maxBytes uint64) ([][]byte, e
}
// retrieveItems reads up to 'count' items from the table. It reads at least
-// one item, but otherwise avoids reading more than maxBytes bytes.
-// It returns the (potentially compressed) data, and the sizes.
+// one item, but otherwise avoids reading more than maxBytes bytes. Freezer
+// will ignore the size limitation and continuously allocate memory to store
+// data if maxBytes is 0. It returns the (potentially compressed) data, and
+// the sizes.
func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []int, error) {
t.lock.RLock()
defer t.lock.RUnlock()
// Ensure the table and the item is accessible
- if t.index == nil || t.head == nil {
+ if t.index == nil || t.head == nil || t.meta == nil {
return nil, nil, errClosed
}
items := t.items.Load() // max number
@@ -713,28 +716,31 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
if items <= start || hidden > start || count == 0 {
return nil, nil, errOutOfBounds
}
+
if start+count > items {
count = items - start
}
- var (
- output = make([]byte, maxBytes) // Buffer to read data into
- outputSize int // Used size of that buffer
- )
+
+ var output []byte // Buffer to read data into
+
+ if maxBytes != 0 {
+ output = make([]byte, 0, maxBytes)
+ } else {
+ output = make([]byte, 0, 1024) // initial buffer cap
+ }
+
// readData is a helper method to read a single data item from disk.
readData := func(fileId, start uint32, length int) error {
// In case a small limit is used, and the elements are large, may need to
// realloc the read-buffer when reading the first (and only) item.
- if len(output) < length {
- output = make([]byte, length)
- }
+ output = grow(output, length)
dataFile, exist := t.files[fileId]
if !exist {
return fmt.Errorf("missing data file %d", fileId)
}
- if _, err := dataFile.ReadAt(output[outputSize:outputSize+length], int64(start)); err != nil {
- return err
+ if _, err := dataFile.ReadAt(output[len(output)-length:], int64(start)); err != nil {
+ return fmt.Errorf("%w, fileid: %d, start: %d, length: %d", err, fileId, start, length)
}
- outputSize += length
return nil
}
// Read all the indexes in one go
@@ -742,6 +748,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
if err != nil {
return nil, nil, err
}
+
var (
sizes []int // The sizes for each element
totalSize = 0 // The total size of all data read so far
@@ -765,7 +772,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
}
readStart = 0
}
- if i > 0 && uint64(totalSize+size) > maxBytes {
+ if i > 0 && uint64(totalSize+size) > maxBytes && maxBytes != 0 {
// About to break out due to byte limit being exceeded. We don't
// read this last item, but we need to do the deferred reads now.
if unreadSize > 0 {
@@ -779,7 +786,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
unreadSize += size
totalSize += size
sizes = append(sizes, size)
- if i == len(indices)-2 || uint64(totalSize) > maxBytes {
+ if i == len(indices)-2 || (uint64(totalSize) > maxBytes && maxBytes != 0) {
// Last item, need to do the read now
if err := readData(secondIndex.filenum, readStart, unreadSize); err != nil {
return nil, nil, err
@@ -787,7 +794,9 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
break
}
}
- return output[:outputSize], sizes, nil
+ // Update metrics.
+ t.readMeter.Mark(int64(totalSize))
+ return output, sizes, nil
}
// has returns an indicator whether the specified number data
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index edbfa15687..074144a9e2 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -829,3 +829,49 @@ func TestSequentialReadByteLimit(t *testing.T) {
}
}
}
+
+// TestSequentialReadNoByteLimit tests the batch-read if maxBytes is not specified.
+// Freezer should return the requested items regardless the size limitation.
+func TestSequentialReadNoByteLimit(t *testing.T) {
+ rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
+ fname := fmt.Sprintf("batchread-3-%d", rand.Uint64())
+ { // Fill table
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Write 10 bytes 30 times,
+ // Splitting it at every 100 bytes (10 items)
+ writeChunks(t, f, 30, 10)
+ f.Close()
+ }
+ for i, tc := range []struct {
+ items uint64
+ want int
+ }{
+ {1, 1},
+ {30, 30},
+ {31, 30},
+ } {
+ {
+ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ items, err := f.RetrieveItems(0, tc.items, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if have, want := len(items), tc.want; have != want {
+ t.Fatalf("test %d: want %d items, have %d ", i, want, have)
+ }
+ for ii, have := range items {
+ want := getChunk(10, ii)
+ if !bytes.Equal(want, have) {
+ t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want)
+ }
+ }
+ f.Close()
+ }
+ }
+}
diff --git a/core/rawdb/freezer_test.go b/core/rawdb/freezer_test.go
index 418e4ae5b1..9749e58616 100644
--- a/core/rawdb/freezer_test.go
+++ b/core/rawdb/freezer_test.go
@@ -196,7 +196,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
for i := 0; i < 1000; i++ {
// First reset and write 100 items.
- if err := f.TruncateHead(0); err != nil {
+ if _, err := f.TruncateHead(0); err != nil {
t.Fatal("truncate failed:", err)
}
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@@ -231,7 +231,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
wg.Done()
}()
go func() {
- truncateErr = f.TruncateHead(10)
+ _, truncateErr = f.TruncateHead(10)
wg.Done()
}()
go func() {
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
index 4354f94986..6a18a5a016 100644
--- a/core/rawdb/freezer_utils.go
+++ b/core/rawdb/freezer_utils.go
@@ -123,3 +123,19 @@ func truncateFreezerFile(file *os.File, size int64) error {
}
return nil
}
+
+// grow prepares the slice space for new item, and doubles the slice capacity
+// if space is not enough.
+func grow(buf []byte, n int) []byte {
+ if cap(buf)-len(buf) < n {
+ newcap := 2 * cap(buf)
+ if newcap-len(buf) < n {
+ newcap = len(buf) + n
+ }
+ nbuf := make([]byte, len(buf), newcap)
+ copy(nbuf, buf)
+ buf = nbuf
+ }
+ buf = buf[:len(buf)+n]
+ return buf
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 439e47df9f..6c2afc1d1c 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -40,6 +40,9 @@ var (
// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
headFastBlockKey = []byte("LastFast")
+ // persistentStateIDKey tracks the id of latest stored state(for path-based only)
+ persistentStateIDKey = []byte("LastStateID")
+
// lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead).
lastPivotKey = []byte("LastPivot")
@@ -64,6 +67,9 @@ var (
// snapshotSyncStatusKey tracks the snapshot sync status across restarts.
snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
+ // trieJournalKey tracks the in-memory trie node layers across restarts.
+ trieJournalKey = []byte("TrieJournal")
+
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail")
@@ -107,6 +113,7 @@ var (
// Path-based storage scheme of merkle patricia trie.
trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
+ stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
@@ -296,3 +303,8 @@ func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength])
return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
}
+
+// stateIDKey = stateIDPrefix + root (32 bytes)
+func stateIDKey(root common.Hash) []byte {
+ return append(stateIDPrefix, root.Bytes()...)
+}
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 2672f4ea8d..73ec8416a1 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -97,12 +97,12 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err e
// TruncateHead is a noop passthrough that just forwards the request to the underlying
// database.
-func (t *table) TruncateHead(items uint64) error {
+func (t *table) TruncateHead(items uint64) (uint64, error) {
return t.db.TruncateHead(items)
}
// TruncateTail is a noop passthrough that just forwards the request to the underlying
-func (t *table) TruncateTail(items uint64) error {
+func (t *table) TruncateTail(items uint64) (uint64, error) {
return t.db.TruncateTail(items)
}
diff --git a/core/state/journal.go b/core/state/journal.go
index 44ae7faf67..99ac806c1c 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -170,7 +170,10 @@ func (ch resetObjectChange) revert(s *StateDB) {
delete(s.stateObjectsDestruct, ch.prev.address)
}
if ch.prevAccountOriginExist {
- s.accountsOrigin[ch.prev.addrHash] = ch.prevAccountOrigin
+ s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin
+ }
+ if ch.prevStorageOrigin != nil {
+ s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin
}
if ch.prevAccount != nil {
s.accounts[ch.prev.addrHash] = ch.prevAccount
diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go
deleted file mode 100644
index b92e942950..0000000000
--- a/core/state/snapshot/account.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package snapshot
-
-import (
- "bytes"
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/rlp"
-)
-
-// Account is a modified version of a state.Account, where the root is replaced
-// with a byte slice. This format can be used to represent full-consensus format
-// or slim-snapshot format which replaces the empty root and code hash as nil
-// byte slice.
-type Account struct {
- Nonce uint64
- Balance *big.Int
- Root []byte
- CodeHash []byte
-}
-
-// SlimAccount converts a state.Account content into a slim snapshot account
-func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account {
- slim := Account{
- Nonce: nonce,
- Balance: balance,
- }
- if root != emptyRoot {
- slim.Root = root[:]
- }
- if !bytes.Equal(codehash, emptyCode[:]) {
- slim.CodeHash = codehash
- }
- return slim
-}
-
-// SlimAccountRLP converts a state.Account content into a slim snapshot
-// version RLP encoded.
-func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte {
- data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash))
- if err != nil {
- panic(err)
- }
- return data
-}
-
-// FullAccount decodes the data on the 'slim RLP' format and return
-// the consensus format account.
-func FullAccount(data []byte) (Account, error) {
- var account Account
- if err := rlp.DecodeBytes(data, &account); err != nil {
- return Account{}, err
- }
- if len(account.Root) == 0 {
- account.Root = emptyRoot[:]
- }
- if len(account.CodeHash) == 0 {
- account.CodeHash = emptyCode[:]
- }
- return account, nil
-}
-
-// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
-func FullAccountRLP(data []byte) ([]byte, error) {
- account, err := FullAccount(data)
- if err != nil {
- return nil, err
- }
- return rlp.EncodeToBytes(account)
-}
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index abf541aef6..b567579525 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -17,7 +17,6 @@
package snapshot
import (
- "bytes"
"encoding/binary"
"errors"
"fmt"
@@ -28,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -300,7 +300,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
fullData []byte
)
if leafCallback == nil {
- fullData, err = FullAccountRLP(it.(AccountIterator).Account())
+ fullData, err = types.FullAccountRLP(it.(AccountIterator).Account())
if err != nil {
return stop(err)
}
@@ -312,7 +312,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
return stop(err)
}
// Fetch the next account and process it concurrently
- account, err := FullAccount(it.(AccountIterator).Account())
+ account, err := types.FullAccount(it.(AccountIterator).Account())
if err != nil {
return stop(err)
}
@@ -322,7 +322,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
results <- err
return
}
- if !bytes.Equal(account.Root, subroot.Bytes()) {
+ if account.Root != subroot {
results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot)
return
}
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index 2d69c33355..2409ae1422 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
bloomfilter "github.com/holiman/bloomfilter/v2"
)
@@ -269,7 +270,7 @@ func (dl *diffLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
-func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
+func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
@@ -277,7 +278,7 @@ func (dl *diffLayer) Account(hash common.Hash) (*Account, error) {
if len(data) == 0 { // can be both nil and []byte{}
return nil, nil
}
- account := new(Account)
+ account := new(types.SlimAccount)
if err := rlp.DecodeBytes(data, account); err != nil {
panic(err)
}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 7cbf6e293d..513f0f5aba 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -23,6 +23,7 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
@@ -65,7 +66,7 @@ func (dl *diskLayer) Stale() bool {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
-func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
+func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) {
data, err := dl.AccountRLP(hash)
if err != nil {
return nil, err
@@ -73,7 +74,7 @@ func (dl *diskLayer) Account(hash common.Hash) (*Account, error) {
if len(data) == 0 { // can be both nil and []byte{}
return nil, nil
}
- account := new(Account)
+ account := new(types.SlimAccount)
if err := rlp.DecodeBytes(data, account); err != nil {
panic(err)
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 3367e98be7..a9ea6be54d 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -21,7 +21,6 @@ import (
"encoding/binary"
"errors"
"fmt"
- "math/big"
"time"
"github.com/VictoriaMetrics/fastcache"
@@ -616,12 +615,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
return nil
}
// Retrieve the current account and flatten it into the internal format
- var acc struct {
- Nonce uint64
- Balance *big.Int
- Root common.Hash
- CodeHash []byte
- }
+ var acc types.StateAccount
if err := rlp.DecodeBytes(val, &acc); err != nil {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
@@ -637,7 +631,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
}
snapRecoveredAccountMeter.Mark(1)
} else {
- data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
+ data := types.SlimAccountRLP(acc)
dataLen = len(data)
rawdb.WriteAccountSnapshot(batch, accountHash, data)
snapGeneratedAccountMeter.Mark(1)
@@ -722,7 +716,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
// Global loop for regerating the entire state trie + all layered storage tries.
for {
- exhausted, last, err := dl.generateRange(trie.StateTrieID(dl.root), rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP)
+ exhausted, last, err := dl.generateRange(trie.StateTrieID(dl.root), rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, types.FullAccountRLP)
// The procedure it aborted, either by external signal or internal error
if err != nil {
if abort == nil { // aborted by internal error, wait the signal
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 68d94dc522..08d83c46f6 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -51,9 +51,9 @@ func TestGeneration(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -85,16 +85,16 @@ func TestGenerateExistentState(t *testing.T) {
var helper = newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
- helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
- helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()})
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
- helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
root, snap := helper.CommitAndGenerate()
@@ -155,18 +155,18 @@ func newHelper() *testHelper {
}
}
-func (t *testHelper) addTrieAccount(acckey string, acc *Account) {
+func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) {
val, _ := rlp.EncodeToBytes(acc)
t.accTrie.Update([]byte(acckey), val)
}
-func (t *testHelper) addSnapAccount(acckey string, acc *Account) {
+func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) {
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte(acckey))
rawdb.WriteAccountSnapshot(t.diskdb, key, val)
}
-func (t *testHelper) addAccount(acckey string, acc *Account) {
+func (t *testHelper) addAccount(acckey string, acc *types.StateAccount) {
t.addTrieAccount(acckey, acc)
t.addSnapAccount(acckey, acc)
}
@@ -178,19 +178,19 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string)
}
}
-func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) []byte {
+func (t *testHelper) makeStorageTrie(stateRoot, owner common.Hash, keys []string, vals []string, commit bool) common.Hash {
stTrie, _ := trie.NewSecure(trie.StorageTrieID(stateRoot, owner, common.Hash{}), t.triedb)
for i, k := range keys {
stTrie.Update([]byte(k), []byte(vals[i]))
}
if !commit {
- return stTrie.Hash().Bytes()
+ return stTrie.Hash()
}
root, nodes, _ := stTrie.Commit(false)
if nodes != nil {
t.nodes.Merge(nodes)
}
- return root.Bytes()
+ return root
}
func (t *testHelper) Commit() common.Hash {
@@ -231,28 +231,28 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
helper := newHelper()
// Account one, empty root but non-empty database
- helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"})
// Account two, non empty root but empty database
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
// Miss slots
{
// Account three, non empty root but misses slots in the beginning
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"})
// Account four, non empty root but misses slots in the middle
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"})
// Account five, non empty root but misses slots in the end
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"})
}
@@ -260,22 +260,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
{
// Account six, non empty root but wrong slots in the beginning
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"})
// Account seven, non empty root but wrong slots in the middle
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"})
// Account eight, non empty root but wrong slots in the end
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"})
// Account 9, non empty root but rotated slots
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"})
}
@@ -283,17 +283,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
{
// Account 10, non empty root but extra slots in the beginning
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"})
// Account 11, non empty root but extra slots in the middle
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"})
// Account 12, non empty root but extra slots in the end
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"})
}
@@ -333,25 +333,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
// Missing accounts, only in the trie
{
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Beginning
- helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Middle
- helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // End
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Beginning
+ helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // Middle
+ helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // End
}
// Wrong accounts
{
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
- helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")})
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
- helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()})
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()})
}
// Extra accounts, only in the snap
{
- helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyRoot.Bytes()}) // before the beginning
- helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle
- helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyRoot.Bytes()}) // after the end
+ helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyRoot.Bytes()}) // before the beginning
+ helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: common.Hex2Bytes("0x1234")}) // Middle
+ helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyRoot.Bytes()}) // after the end
}
root, snap := helper.CommitAndGenerate()
@@ -380,9 +380,9 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
// without any storage slots to keep the test smaller.
helper := newHelper()
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
@@ -415,14 +415,14 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
helper := newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root := helper.Commit()
// Delete a storage trie root and ensure the generator chokes
- helper.diskdb.Delete(stRoot)
+ helper.diskdb.Delete(stRoot.Bytes())
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -448,10 +448,10 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
helper := newHelper()
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
- helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
- helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
+ helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
+ helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
stRoot = helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
- helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
+ helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root := helper.Commit()
// Delete a storage trie leaf and ensure the generator chokes
@@ -482,7 +482,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -502,7 +502,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3", "val-4", "val-5"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte("acc-2"))
rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val)
@@ -553,7 +553,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
[]string{"val-1", "val-2", "val-3"},
true,
)
- acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.Update([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -567,8 +567,8 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
{
// 100 accounts exist only in snapshot
for i := 0; i < 1000; i++ {
- //acc := &Account{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
- acc := &Account{Balance: big.NewInt(int64(i)), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ //acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stTrie.Hash().Bytes(), CodeHash: emptyCode.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: emptyRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
key := hashData([]byte(fmt.Sprintf("acc-%d", i)))
rawdb.WriteAccountSnapshot(helper.diskdb, key, val)
@@ -605,7 +605,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
}
helper := newHelper()
{
- acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
helper.accTrie.Update(common.HexToHash("0x07").Bytes(), val)
@@ -642,7 +642,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
}
helper := newHelper()
{
- acc := &Account{Balance: big.NewInt(1), Root: emptyRoot.Bytes(), CodeHash: emptyCode.Bytes()}
+ acc := &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
helper.accTrie.Update(common.HexToHash("0x03").Bytes(), val)
@@ -681,7 +681,7 @@ func TestGenerateFromEmptySnap(t *testing.T) {
for i := 0; i < 400; i++ {
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount(fmt.Sprintf("acc-%d", i),
- &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
}
root, snap := helper.CommitAndGenerate()
t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4
@@ -718,7 +718,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) {
for i := 0; i < 8; i++ {
accKey := fmt.Sprintf("acc-%d", i)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(accKey)), stKeys, stVals, true)
- helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()})
+ helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: emptyCode.Bytes()})
var moddedKeys []string
var moddedVals []string
for ii := 0; ii < 8; ii++ {
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 1fcb40a354..267a2ffd83 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -103,7 +104,7 @@ type Snapshot interface {
// Account directly retrieves the account associated with a particular hash in
// the snapshot slim data format.
- Account(hash common.Hash) (*Account, error)
+ Account(hash common.Hash) (*types.SlimAccount, error)
// AccountRLP directly retrieves the account RLP associated with a particular
// hash in the snapshot slim data format.
diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go
index 87c46629d9..0daf4dfd78 100644
--- a/core/state/snapshot/snapshot_test.go
+++ b/core/state/snapshot/snapshot_test.go
@@ -27,6 +27,7 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -42,10 +43,10 @@ func randomHash() common.Hash {
// randomAccount generates a random account and returns it RLP encoded.
func randomAccount() []byte {
root := randomHash()
- a := Account{
+ a := &types.StateAccount{
Balance: big.NewInt(rand.Int63()),
Nonce: rand.Uint64(),
- Root: root[:],
+ Root: root,
CodeHash: emptyCode[:],
}
data, _ := rlp.EncodeToBytes(a)
@@ -463,7 +464,7 @@ func TestReadStateDuringFlattening(t *testing.T) {
snap := snaps.Snapshot(common.HexToHash("0xa3"))
// Register the testing hook to access the state after flattening
- var result = make(chan *Account)
+ var result = make(chan *types.SlimAccount)
snaps.onFlatten = func() {
// Spin up a thread to read the account from the pre-created
// snapshot handler. It's expected to be blocked.
diff --git a/core/state/state_object.go b/core/state/state_object.go
index a650ad55fa..be5452b4aa 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -377,9 +377,9 @@ func (s *stateObject) updateTrie() Trie {
storage[khash] = v // v will be nil if it's deleted
// Cache the original value of mutated storage slots
if origin == nil {
- if origin = s.db.storagesOrigin[s.addrHash]; origin == nil {
+ if origin = s.db.storagesOrigin[s.address]; origin == nil {
origin = make(map[common.Hash][]byte)
- s.db.storagesOrigin[s.addrHash] = origin
+ s.db.storagesOrigin[s.address] = origin
}
}
// Track the original value of slot only if it's mutated first time
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 6e97190c3f..b4d66f0687 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -79,10 +79,10 @@ type StateDB struct {
// These maps hold the state changes (including the corresponding
// original value) that occurred in this **block**.
- accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding
- storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format
- accountsOrigin map[common.Hash][]byte // The original value of mutated accounts in 'slim RLP' encoding
- storagesOrigin map[common.Hash]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format
+ accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding
+ storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format
+ accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding
+ storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format
// This map holds 'live' objects, which will get modified while processing a state transition.
stateObjects map[common.Address]*stateObject
@@ -152,8 +152,8 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
snaps: snaps,
accounts: make(map[common.Hash][]byte),
storages: make(map[common.Hash]map[common.Hash][]byte),
- accountsOrigin: make(map[common.Hash][]byte),
- storagesOrigin: make(map[common.Hash]map[common.Hash][]byte),
+ accountsOrigin: make(map[common.Address][]byte),
+ storagesOrigin: make(map[common.Address]map[common.Hash][]byte),
stateObjects: make(map[common.Address]*stateObject),
stateObjectsPending: make(map[common.Address]struct{}),
stateObjectsDirty: make(map[common.Address]struct{}),
@@ -533,15 +533,15 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
// to the deletion, because whereas it is enough to track account updates
// at commit time, deletions need tracking at transaction boundary level to
// ensure we capture state clearing.
- s.accounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash)
+ s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data)
// Track the original value of mutated account, nil means it was not present.
// Skip if it has been tracked (because updateStateObject may be called
// multiple times in a block).
- if _, ok := s.accountsOrigin[obj.addrHash]; !ok {
+ if _, ok := s.accountsOrigin[obj.address]; !ok {
if obj.origin == nil {
- s.accountsOrigin[obj.addrHash] = nil
+ s.accountsOrigin[obj.address] = nil
} else {
- s.accountsOrigin[obj.addrHash] = snapshot.SlimAccountRLP(obj.origin.Nonce, obj.origin.Balance, obj.origin.Root, obj.origin.CodeHash)
+ s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin)
}
}
}
@@ -587,7 +587,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
}
- var acc *snapshot.Account
+ var acc *types.SlimAccount
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
if acc == nil {
return nil
@@ -664,7 +664,7 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
if prev == nil {
s.journal.append(createObjectChange{account: &addr})
} else {
- prevAccount, ok := s.accountsOrigin[prev.addrHash]
+ prevAccount, ok := s.accountsOrigin[prev.address]
s.journal.append(resetObjectChange{
account: &addr,
prev: prev,
@@ -673,12 +673,12 @@ func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject)
prevStorage: s.storages[prev.addrHash],
prevAccountOriginExist: ok,
prevAccountOrigin: prevAccount,
- prevStorageOrigin: s.storagesOrigin[prev.addrHash],
+ prevStorageOrigin: s.storagesOrigin[prev.address],
})
delete(s.accounts, prev.addrHash)
delete(s.storages, prev.addrHash)
- delete(s.accountsOrigin, prev.addrHash)
- delete(s.storagesOrigin, prev.addrHash)
+ delete(s.accountsOrigin, prev.address)
+ delete(s.storagesOrigin, prev.address)
}
newobj.created = true
@@ -903,10 +903,10 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
// ressurrect an account; but the snapshotter needs both events.
- delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
- delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
- delete(s.accountsOrigin, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
- delete(s.storagesOrigin, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
+ delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect)
+ delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect)
+ delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect)
} else {
obj.finalise(true) // Prefetch slots in the background
}
@@ -1021,8 +1021,8 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
if it.Hash() == (common.Hash{}) {
continue
}
- nodeSize += common.StorageSize(len(it.Path()) + len(it.NodeBlob()))
- set.AddNode(it.Path(), trienode.NewNodeWithPrev(common.Hash{}, nil, it.NodeBlob()))
+ nodeSize += common.StorageSize(len(it.Path()))
+ set.AddNode(it.Path(), trienode.NewDeleted())
}
if err := it.Error(); err != nil {
return false, nil, nil, err
@@ -1065,8 +1065,8 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
//
// In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
-func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Hash]struct{}, error) {
- incomplete := make(map[common.Hash]struct{})
+func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) {
+ incomplete := make(map[common.Address]struct{})
for addr, prev := range s.stateObjectsDestruct {
// The original account was non-existing, and it's marked as destructed
// in the scope of block. It can be case (a) or (b).
@@ -1076,12 +1076,12 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.H
addrHash := crypto.Keccak256Hash(addr[:])
if prev == nil {
if _, ok := s.accounts[addrHash]; ok {
- s.accountsOrigin[addrHash] = nil // case (b)
+ s.accountsOrigin[addr] = nil // case (b)
}
continue
}
// It can overwrite the data in s.accountsOrigin set by 'updateStateObject'.
- s.accountsOrigin[addrHash] = snapshot.SlimAccountRLP(prev.Nonce, prev.Balance, prev.Root, prev.CodeHash) // case (c) or (d)
+ s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d)
// Short circuit if the storage was empty.
if prev.Root == types.EmptyRootHash {
@@ -1097,17 +1097,17 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.H
// created. In this case, wipe the entire storage state diff because
// of aborted deletion.
if aborted {
- incomplete[addrHash] = struct{}{}
- delete(s.storagesOrigin, addrHash)
+ incomplete[addr] = struct{}{}
+ delete(s.storagesOrigin, addr)
continue
}
- if s.storagesOrigin[addrHash] == nil {
- s.storagesOrigin[addrHash] = slots
+ if s.storagesOrigin[addr] == nil {
+ s.storagesOrigin[addr] = slots
} else {
// It can overwrite the data in s.storagesOrigin[addrHash] set by
// 'object.updateTrie'.
for key, val := range slots {
- s.storagesOrigin[addrHash][key] = val
+ s.storagesOrigin[addr][key] = val
}
}
if err := nodes.Merge(set); err != nil {
@@ -1251,11 +1251,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
}
if root != origin {
start := time.Now()
- set := &triestate.Set{
- Accounts: s.accountsOrigin,
- Storages: s.storagesOrigin,
- Incomplete: incomplete,
- }
+ set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)
if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil {
return common.Hash{}, err
}
@@ -1267,8 +1263,8 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
// Clear all internal flags at the end of commit operation.
s.accounts = make(map[common.Hash][]byte)
s.storages = make(map[common.Hash]map[common.Hash][]byte)
- s.accountsOrigin = make(map[common.Hash][]byte)
- s.storagesOrigin = make(map[common.Hash]map[common.Hash][]byte)
+ s.accountsOrigin = make(map[common.Address][]byte)
+ s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
s.stateObjectsDirty = make(map[common.Address]struct{})
s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount)
return root, nil
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 76311a6b11..bea297d0f9 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -31,8 +31,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triestate"
@@ -172,8 +172,8 @@ func (test *stateTest) String() string {
func (test *stateTest) run() bool {
var (
roots []common.Hash
- accountList []map[common.Hash][]byte
- storageList []map[common.Hash]map[common.Hash][]byte
+ accountList []map[common.Address][]byte
+ storageList []map[common.Address]map[common.Hash][]byte
onCommit = func(states *triestate.Set) {
accountList = append(accountList, copySet(states.Accounts))
storageList = append(storageList, copy2DSet(states.Storages))
@@ -236,8 +236,9 @@ func (test *stateTest) run() bool {
// - the account was indeed not present in trie
// - the account is present in new trie, nil->nil is regarded as invalid
// - the slots transition is correct
-func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addrHash common.Hash, slots map[common.Hash][]byte) error {
+func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
// Verify account change
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob := otr.Get(addrHash.Bytes())
nBlob := ntr.Get(addrHash.Bytes())
if len(oBlob) != 0 {
@@ -280,14 +281,15 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database
// - the account was indeed present in trie
// - the account in old trie matches the provided value
// - the slots transition is correct
-func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addrHash common.Hash, origin []byte, slots map[common.Hash][]byte) error {
+func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
// Verify account change
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob := otr.Get(addrHash.Bytes())
nBlob := ntr.Get(addrHash.Bytes())
if len(oBlob) == 0 {
return fmt.Errorf("missing account in old trie, %x", addrHash)
}
- full, err := snapshot.FullAccountRLP(origin)
+ full, err := types.FullAccountRLP(origin)
if err != nil {
return err
}
@@ -327,7 +329,7 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database,
return nil
}
-func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Hash][]byte, storagesOrigin map[common.Hash]map[common.Hash][]byte) error {
+func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
otr, err := trie.New(trie.StateTrieID(root), db)
if err != nil {
return err
@@ -336,12 +338,12 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Datab
if err != nil {
return err
}
- for addrHash, account := range accountsOrigin {
+ for addr, account := range accountsOrigin {
var err error
if len(account) == 0 {
- err = test.verifyAccountCreation(next, db, otr, ntr, addrHash, storagesOrigin[addrHash])
+ err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr])
} else {
- err = test.verifyAccountUpdate(next, db, otr, ntr, addrHash, accountsOrigin[addrHash], storagesOrigin[addrHash])
+ err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr])
}
if err != nil {
return err
diff --git a/core/types/hashes.go b/core/types/hashes.go
new file mode 100644
index 0000000000..0ce1835b51
--- /dev/null
+++ b/core/types/hashes.go
@@ -0,0 +1,32 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// TrieRootHash returns the hash itself if it's non-empty or the predefined
+// emptyHash one instead.
+func TrieRootHash(hash common.Hash) common.Hash {
+ if hash == (common.Hash{}) {
+ log.Error("Zero trie root hash!")
+ return EmptyRootHash
+ }
+ return hash
+}
diff --git a/core/types/state_account.go b/core/types/state_account.go
index 5853fb58b5..95ee1954d2 100644
--- a/core/types/state_account.go
+++ b/core/types/state_account.go
@@ -17,10 +17,12 @@
package types
import (
+ "bytes"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -74,3 +76,66 @@ type DirtyStateAccountsAndBlock struct {
BlockHash common.Hash
DirtyAccounts []*DirtyStateAccount
}
+
+// SlimAccount is a modified version of an Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim format which replaces the empty root and code hash as nil byte slice.
+type SlimAccount struct {
+ Nonce uint64
+ Balance *big.Int
+ Root []byte // Nil if root equals to types.EmptyRootHash
+ CodeHash []byte // Nil if hash equals to types.EmptyCodeHash
+}
+
+// SlimAccountRLP encodes the state account in 'slim RLP' format.
+func SlimAccountRLP(account StateAccount) []byte {
+ slim := SlimAccount{
+ Nonce: account.Nonce,
+ Balance: account.Balance,
+ }
+ if account.Root != EmptyRootHash {
+ slim.Root = account.Root[:]
+ }
+ if !bytes.Equal(account.CodeHash, emptyCodeHash[:]) {
+ slim.CodeHash = account.CodeHash
+ }
+ data, err := rlp.EncodeToBytes(slim)
+ if err != nil {
+ panic(err)
+
+ }
+ return data
+}
+
+// FullAccount decodes the data on the 'slim RLP' format and return
+// the consensus format account.
+func FullAccount(data []byte) (*StateAccount, error) {
+ var slim SlimAccount
+ if err := rlp.DecodeBytes(data, &slim); err != nil {
+ return nil, err
+ }
+ var account StateAccount
+ account.Nonce, account.Balance = slim.Nonce, slim.Balance
+
+ // Interpret the storage root and code hash in slim format.
+ if len(slim.Root) == 0 {
+ account.Root = EmptyRootHash
+ } else {
+ account.Root = common.BytesToHash(slim.Root)
+ }
+ if len(slim.CodeHash) == 0 {
+ account.CodeHash = emptyCodeHash[:]
+ } else {
+ account.CodeHash = slim.CodeHash
+ }
+ return &account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go
index 5528e9212e..57deb54772 100644
--- a/eth/protocols/snap/protocol.go
+++ b/eth/protocols/snap/protocol.go
@@ -21,7 +21,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -104,7 +104,7 @@ func (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) {
accounts = make([][]byte, len(p.Accounts))
)
for i, acc := range p.Accounts {
- val, err := snapshot.FullAccountRLP(acc.Body)
+ val, err := types.FullAccountRLP(acc.Body)
if err != nil {
return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err)
}
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index c4b0a25dda..538d6f0b5a 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
@@ -2250,13 +2249,13 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
if task.needCode[i] || task.needState[i] {
break
}
- slim := snapshot.SlimAccountRLP(res.accounts[i].Nonce, res.accounts[i].Balance, res.accounts[i].Root, res.accounts[i].CodeHash)
+ slim := types.SlimAccountRLP(*res.accounts[i])
rawdb.WriteAccountSnapshot(batch, hash, slim)
// If the task is complete, drop it into the stack trie to generate
// account trie nodes for it
if !task.needHeal[i] {
- full, err := snapshot.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
+ full, err := types.FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
if err != nil {
panic(err) // Really shouldn't ever happen
}
@@ -2875,7 +2874,7 @@ func (s *Syncer) onHealState(paths [][]byte, value []byte) error {
if err := rlp.DecodeBytes(value, &account); err != nil {
return nil
}
- blob := snapshot.SlimAccountRLP(account.Nonce, account.Balance, account.Root, account.CodeHash)
+ blob := types.SlimAccountRLP(account)
rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
s.accountHealed += 1
s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
diff --git a/ethdb/database.go b/ethdb/database.go
index a3c5570b53..52e9f30328 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -79,9 +79,10 @@ type AncientReaderOp interface {
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return
- // - at most 'count' items,
- // - at least 1 item (even if exceeding the maxBytes), but will otherwise
- // return as many items as fit into maxBytes.
+ // - at most 'count' items,
+ // - if maxBytes is specified: at least 1 item (even if exceeding the maxByteSize),
+ // but will otherwise return as many items as fit into maxByteSize.
+ // - if maxBytes is not specified, 'count' items will be returned if they are present
AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error)
// Ancients returns the ancient item numbers in the ancient store.
@@ -118,7 +119,7 @@ type AncientWriter interface {
// TruncateHead discards all, but keep the first n ancient data from the ancient store.
// After the truncation, the latest item can be accessed it item_ n-1 (start from 0)
// Tail 0 -> (n-1)New-headxxxxOld-head
- TruncateHead(n uint64) error
+ TruncateHead(n uint64) (uint64, error)
// TruncateTail discards the first n ancient data from the ancient store. The already
// deleted items are ignored. After the truncation, the earliest item can be accessed
@@ -126,7 +127,7 @@ type AncientWriter interface {
// immediately, but only when the accumulated deleted data reach the threshold then
// will be removed all together.
// Old-tail(0)xxxxxxxNew-tail(n)->Head
- TruncateTail(n uint64) error
+ TruncateTail(n uint64) (uint64, error)
// Sync flushes all in-memory ancient store data to disk.
Sync() error
diff --git a/internal/testrand/rand.go b/internal/testrand/rand.go
new file mode 100644
index 0000000000..690993de05
--- /dev/null
+++ b/internal/testrand/rand.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package testrand
+
+import (
+ crand "crypto/rand"
+ "encoding/binary"
+ mrand "math/rand"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// prng is a pseudo random number generator seeded by strong randomness.
+// The randomness is printed on startup in order to make failures reproducible.
+var prng = initRand()
+
+func initRand() *mrand.Rand {
+ var seed [8]byte
+ crand.Read(seed[:])
+ rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
+ return rnd
+}
+
+// Bytes generates a random byte slice with specified length.
+func Bytes(n int) []byte {
+ r := make([]byte, n)
+ prng.Read(r)
+ return r
+}
+
+// Hash generates a random hash.
+func Hash() common.Hash {
+ return common.BytesToHash(Bytes(common.HashLength))
+}
+
+// Address generates a random address.
+func Address() common.Address {
+ return common.BytesToAddress(Bytes(common.AddressLength))
+}
diff --git a/trie/committer.go b/trie/committer.go
index add2c02efe..72abd9a1fd 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -168,9 +168,9 @@ func (c *committer) store(path []byte, n node) node {
// The node is embedded in its parent, in other words, this node
// will not be stored in the database independently, mark it as
// deleted only if the node was existent in database before.
- prev, ok := c.tracer.accessList[string(path)]
+ _, ok := c.tracer.accessList[string(path)]
if ok {
- c.nodes.AddNode(path, trienode.NewNodeWithPrev(common.Hash{}, nil, prev))
+ c.nodes.AddNode(path, trienode.NewDeleted())
}
return n
}
@@ -179,10 +179,9 @@ func (c *committer) store(path []byte, n node) node {
var (
nhash = common.BytesToHash(hash)
blob, _ = rlp.EncodeToBytes(n)
- node = trienode.NewNodeWithPrev(
+ node = trienode.New(
nhash,
blob,
- c.tracer.accessList[string(path)],
)
)
diff --git a/trie/database.go b/trie/database.go
index 6894164477..8988859a50 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -18,12 +18,9 @@ package trie
import (
"errors"
- "time"
- "github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
@@ -56,7 +53,9 @@ type backend interface {
// Update performs a state transition by committing dirty nodes contained
// in the given set in order to update state from the specified parent to
// the specified root.
- Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
+ // The passed in maps(nodes, states) will be retained to avoid copying
+ // everything. Therefore, these maps must not be changed afterwards.
+ Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
// Nodes retrieves the hashes of all the nodes cached within the memory database.
// This method is extremely expensive and should only be used to validate internal
@@ -78,20 +77,15 @@ type backend interface {
// types of node backend as an entrypoint. It's responsible for all interactions
// relevant with trie nodes and node preimages.
type Database struct {
- config *Config // Configuration for trie database
- diskdb ethdb.Database // Persistent database to store the snapshot
- cleans *fastcache.Cache // Megabytes permitted using for read caches
- preimages *preimageStore // The store for caching preimages
- backend backend // The backend for managing trie nodes
+ config *Config // Configuration for trie database
+ diskdb ethdb.Database // Persistent database to store the snapshot
+ preimages *preimageStore // The store for caching preimages
+ backend backend // The backend for managing trie nodes
}
// prepare initializes the database with provided configs, but the
// database backend is still left as nil.
func prepare(diskdb ethdb.Database, config *Config) *Database {
- var cleans *fastcache.Cache
- if config != nil && config.Cache > 0 {
- cleans = fastcache.New(config.Cache * 1024 * 1024)
- }
var preimages *preimageStore
if config != nil && config.Preimages {
preimages = newPreimageStore(diskdb)
@@ -99,7 +93,6 @@ func prepare(diskdb ethdb.Database, config *Config) *Database {
return &Database{
config: config,
diskdb: diskdb,
- cleans: cleans,
preimages: preimages,
}
}
@@ -114,8 +107,13 @@ func NewDatabase(diskdb ethdb.Database) *Database {
// The path-based scheme is not activated yet, always initialized with legacy
// hash-based scheme by default.
func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
+ var cleans int
+
+ if config != nil && config.Cache != 0 {
+ cleans = config.Cache * 1024 * 1024
+ }
db := prepare(diskdb, config)
- db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ db.backend = hashdb.New(diskdb, cleans, mptResolver{})
return db
}
@@ -136,7 +134,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n
if db.preimages != nil {
db.preimages.commit(false)
}
- return db.backend.Update(root, parent, nodes)
+ return db.backend.Update(root, parent, block, nodes, states)
}
// Commit iterates over all the children of a particular node, writes them out
@@ -196,24 +194,6 @@ func (db *Database) Close() error {
return db.backend.Close()
}
-// saveCache saves clean state cache to given directory path
-// using specified CPU cores.
-func (db *Database) saveCache(dir string, threads int) error {
- if db.cleans == nil {
- return nil
- }
- log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
-
- start := time.Now()
- err := db.cleans.SaveToFileConcurrent(dir, threads)
- if err != nil {
- log.Error("Failed to persist clean trie cache", "error", err)
- return err
- }
- log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
- return nil
-}
-
// Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold. The held pre-images accumulated
// up to this point will be flushed in case the size exceeds the threshold.
diff --git a/trie/database_test.go b/trie/database_test.go
index f81dc135ca..0d4f63e467 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -27,10 +27,10 @@ import (
func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
db := prepare(diskdb, nil)
if scheme == rawdb.HashScheme {
- db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ db.backend = hashdb.New(diskdb, 0, mptResolver{})
}
- //} else {
- // db.backend = snap.New(diskdb, db.cleans, nil)
- //}
+ // //} else {
+ // // db.backend = snap.New(diskdb, db.cleans, nil)
+ // //}
return db
}
diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go
new file mode 100644
index 0000000000..a75d0431b0
--- /dev/null
+++ b/trie/testutil/utils.go
@@ -0,0 +1,61 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package testutil
+
+import (
+ crand "crypto/rand"
+ "encoding/binary"
+ mrand "math/rand"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
+// Prng is a pseudo random number generator seeded by strong randomness.
+// The randomness is printed on startup in order to make failures reproducible.
+var prng = initRand()
+
+func initRand() *mrand.Rand {
+ var seed [8]byte
+ crand.Read(seed[:])
+ rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:]))))
+ return rnd
+}
+
+// RandBytes generates a random byte slice with specified length.
+func RandBytes(n int) []byte {
+ r := make([]byte, n)
+ prng.Read(r)
+ return r
+}
+
+// RandomHash generates a random blob of data and returns it as a hash.
+func RandomHash() common.Hash {
+ return common.BytesToHash(RandBytes(common.HashLength))
+}
+
+// RandomAddress generates a random blob of data and returns it as an address.
+func RandomAddress() common.Address {
+ return common.BytesToAddress(RandBytes(common.AddressLength))
+}
+
+// RandomNode generates a random node.
+func RandomNode() *trienode.Node {
+ val := RandBytes(100)
+ return trienode.New(crypto.Keccak256Hash(val), val)
+}
diff --git a/trie/tracer.go b/trie/tracer.go
index 796b792afc..7ccd49f6b1 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -16,11 +16,6 @@
package trie
-import (
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/trie/trienode"
-)
-
// tracer tracks the changes of trie nodes. During the trie operations,
// some nodes can be deleted from the trie, while these deleted nodes
// won't be captured by trie.Hasher or trie.Commiter. Thus, these deleted
@@ -114,16 +109,19 @@ func (t *tracer) copy() *tracer {
}
}
-// markDeletions puts all tracked deletions into the provided nodeset.
-func (t *tracer) markDeletions(set *trienode.NodeSet) {
+// deletedNodes returns a list of node paths which are deleted from the trie.
+func (t *tracer) deletedNodes() []string {
+ var paths []string
for path := range t.deletes {
// It's possible a few deleted nodes were embedded
// in their parent before, the deletions can be no
// effect by deleting nothing, filter them out.
- prev, ok := t.accessList[path]
+ _, ok := t.accessList[path]
+
if !ok {
continue
}
- set.AddNode([]byte(path), trienode.NewNodeWithPrev(common.Hash{}, nil, prev))
+ paths = append(paths, path)
}
+ return paths
}
diff --git a/trie/trie.go b/trie/trie.go
index ae00e542e5..51883e7bb8 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -585,11 +585,20 @@ func (t *Trie) Hash() common.Hash {
// be created with new root and updated trie database for following usage
func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
defer t.tracer.reset()
- nodes := trienode.NewNodeSet(t.owner)
- t.tracer.markDeletions(nodes)
+ // (a) The trie was empty and no update happens => return nil
+ // (b) The trie was non-empty and all nodes are dropped => return
+ // the node set includes all deleted nodes
if t.root == nil {
- return emptyRoot, nodes, nil
+ paths := t.tracer.deletedNodes()
+ if len(paths) == 0 {
+ return types.EmptyRootHash, nil, nil // case (a)
+ }
+ nodes := trienode.NewNodeSet(t.owner)
+ for _, path := range paths {
+ nodes.AddNode([]byte(path), trienode.NewDeleted())
+ }
+ return types.EmptyRootHash, nodes, nil // case (b)
}
// Derive the hash for all dirty nodes first. We hold the assumption
// in the following procedure that all nodes are hashed.
@@ -604,6 +613,10 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
t.root = hashedNode
return rootHash, nil, nil
}
+ nodes := trienode.NewNodeSet(t.owner)
+ for _, path := range t.tracer.deletedNodes() {
+ nodes.AddNode([]byte(path), trienode.NewDeleted())
+ }
h := newCommitter(nodes, t.tracer, collectLeaf)
newRoot, nodes, err := h.Commit(t.root)
if err != nil {
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 58a9f7ed86..92cc8f54df 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -24,9 +24,12 @@ import (
// Reader wraps the Node and NodeBlob method of a backing trie store.
type Reader interface {
- // Node retrieves the RLP-encoded trie node blob with the provided trie
- // identifier, node path and the corresponding node hash. No error will
- // be returned if the node is not found.
+ // Node retrieves the trie node blob with the provided trie identifier, node path and
+ // the corresponding node hash. No error will be returned if the node is not found.
+
+ // When looking up nodes in the account trie, 'owner' is the zero hash. For contract
+ // storage trie nodes, 'owner' is the hash of the account address that containing the
+ // storage.
Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index e87c4dba91..062f465066 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -440,35 +440,35 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- if len(n.Prev) > 0 {
- return errors.New("unexpected origin value")
- }
+ // if len(n.Prev) > 0 {
+ // return errors.New("unexpected origin value")
+ // }
}
// Check deletion set
- for path, blob := range deletes {
+ for path, _ := range deletes {
n, ok := set.Nodes[path]
if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- if len(n.Prev) == 0 {
- return errors.New("expect origin value")
- }
- if !bytes.Equal(n.Prev, blob) {
- return errors.New("invalid origin value")
- }
+ // if len(n.Prev) == 0 {
+ // return errors.New("expect origin value")
+ // }
+ // if !bytes.Equal(n.Prev, blob) {
+ // return errors.New("invalid origin value")
+ // }
}
// Check update set
- for path, blob := range updates {
+ for path, _ := range updates {
n, ok := set.Nodes[path]
if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- if len(n.Prev) == 0 {
- return errors.New("expect origin value")
- }
- if !bytes.Equal(n.Prev, blob) {
- return errors.New("invalid origin value")
- }
+ // if len(n.Prev) == 0 {
+ // return errors.New("expect origin value")
+ // }
+ // if !bytes.Equal(n.Prev, blob) {
+ // return errors.New("invalid origin value")
+ // }
}
return nil
}
diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go
index 096cd632d5..56cd38699a 100644
--- a/trie/triedb/hashdb/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -31,30 +31,31 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
var (
- memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
- memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
- memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
- memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
-
- memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
- memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
- memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
- memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
-
- memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
- memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
- memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
-
- memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
- memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
- memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
-
- memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
- memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
- memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
+ memcacheCleanHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil)
+ memcacheCleanMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil)
+ memcacheCleanReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil)
+ memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil)
+
+ memcacheDirtyHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil)
+ memcacheDirtyMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil)
+ memcacheDirtyReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil)
+ memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil)
+
+ memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil)
+ memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil)
+ memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil)
+
+ memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil)
+ memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil)
+ memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil)
+
+ memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil)
+ memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil)
+ memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
)
// ChildResolver defines the required method to decode the provided
@@ -126,8 +127,14 @@ type Config struct {
}
// New initializes the hash-based node database.
-func New(diskdb ethdb.Database, cleans *fastcache.Cache, resolver ChildResolver) *Database {
+func New(diskdb ethdb.Database, size int, resolver ChildResolver) *Database {
+ // Initialize the clean cache if the specified cache allowance
+ // is non-zero. Note, the size is in bytes.
+ var cleans *fastcache.Cache
+ if size > 0 {
+ cleans = fastcache.New(size)
+ }
db := &Database{
diskdb: diskdb,
resolver: resolver,
@@ -282,7 +289,7 @@ func (db *Database) Dereference(root common.Hash) {
db.gctime += time.Since(start)
memcacheGCTimeTimer.Update(time.Since(start))
- memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
@@ -403,7 +410,7 @@ func (db *Database) Cap(limit common.StorageSize) error {
db.flushtime += time.Since(start)
memcacheFlushTimeTimer.Update(time.Since(start))
- memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
@@ -449,7 +456,7 @@ func (db *Database) Commit(node common.Hash, report bool) error {
// Reset the storage counters and bumped metrics
memcacheCommitTimeTimer.Update(time.Since(start))
- memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
+ memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
logger := log.Info
@@ -561,7 +568,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
// account trie with multiple storage tries if necessary.
//
// root and parent are used for path-based only
-func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash {
if blob, _ := db.Node(parent); len(blob) == 0 {
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
new file mode 100644
index 0000000000..48b4744b40
--- /dev/null
+++ b/trie/triedb/pathdb/database.go
@@ -0,0 +1,401 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+// maxDiffLayers is the maximum diff layers allowed in the layer tree.
+const maxDiffLayers = 128
+
+// layer is the interface implemented by all state layers which includes some
+// public methods and some additional methods for internal usage.
+type layer interface {
+ // Node retrieves the trie node with the node info. An error will be returned
+ // if the read operation exits abnormally. For example, if the layer is already
+ // stale, or the associated state is regarded as corrupted. Notably, no error
+ // will be returned if the requested node is not found in database.
+ Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
+
+ // rootHash returns the root hash for which this layer was made.
+ rootHash() common.Hash
+
+ // stateID returns the associated state id of layer.
+ stateID() uint64
+
+ // parentLayer returns the subsequent layer of it, or nil if the disk was reached.
+ parentLayer() layer
+
+ // update creates a new layer on top of the existing layer diff tree with
+ // the provided dirty trie nodes along with the state change set.
+ //
+ // Note, the maps are retained by the method to avoid copying everything.
+ update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer
+
+ // journal commits an entire diff hierarchy to disk into a single journal entry.
+ // This is meant to be used during shutdown to persist the layer without
+ // flattening everything down (bad for reorgs).
+ journal(w io.Writer) error
+}
+
+// Config contains the settings for database.
+type Config struct {
+ StateLimit uint64 // Number of recent blocks to maintain state history for
+ CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes
+ DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes
+ ReadOnly bool // Flag whether the database is opened in read only mode.
+}
+
+var (
+ // defaultCleanSize is the default memory allowance of clean cache.
+ defaultCleanSize = 16 * 1024 * 1024
+
+ // defaultBufferSize is the default memory allowance of node buffer
+ // that aggregates the writes from above until it's flushed into the
+ // disk. Do not increase the buffer size arbitrarily, otherwise the
+ // system pause time will increase when the database writes happen.
+ defaultBufferSize = 128 * 1024 * 1024
+)
+
+// Defaults contains default settings for Ethereum mainnet.
+var Defaults = &Config{
+ StateLimit: params.FullImmutabilityThreshold,
+ CleanSize: defaultCleanSize,
+ DirtySize: defaultBufferSize,
+}
+
+// Database is a multiple-layered structure for maintaining in-memory trie nodes.
+// It consists of one persistent base layer backed by a key-value store, on top
+// of which arbitrarily many in-memory diff layers are stacked. The memory diffs
+// can form a tree with branching, but the disk layer is singleton and common to
+// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can
+// be applied to rollback. The deepest reorg that can be handled depends on the
+// amount of state histories tracked in the disk.
+//
+// At most one readable and writable database can be opened at the same time in
+// the whole system which ensures that only one database writer can operate disk
+// state. Unexpected open operations can cause the system to panic.
+type Database struct {
+ // readOnly is the flag whether the mutation is allowed to be applied.
+ // It will be set automatically when the database is journaled during
+ // the shutdown to reject all following unexpected mutations.
+ readOnly bool // Indicator if database is opened in read only mode
+ bufferSize int // Memory allowance (in bytes) for caching dirty nodes
+ config *Config // Configuration for database
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
+ tree *layerTree // The group for all known layers
+ freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests
+ lock sync.RWMutex // Lock to prevent mutations from happening at the same time
+}
+
+// New attempts to load an already existing layer from a persistent key-value
+// store (with a number of memory layers from a journal). If the journal is not
+// matched with the base persistent layer, all the recorded diff layers are discarded.
+func New(diskdb ethdb.Database, config *Config) *Database {
+ if config == nil {
+ config = Defaults
+ }
+ db := &Database{
+ readOnly: config.ReadOnly,
+ bufferSize: config.DirtySize,
+ config: config,
+ diskdb: diskdb,
+ }
+ // Construct the layer tree by resolving the in-disk singleton state
+ // and in-memory layer journal.
+ db.tree = newLayerTree(db.loadLayers())
+
+ // Open the freezer for state history if the passed database contains an
+ // ancient store. Otherwise, all the relevant functionalities are disabled.
+ //
+ // Because the freezer can only be opened once at the same time, this
+ // mechanism also ensures that at most one **non-readOnly** database
+ // is opened at the same time to prevent accidental mutation.
+ if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly {
+ db.freezer, err = rawdb.NewStateHistoryFreezer(ancient, false)
+ if err != nil {
+ log.Crit("Failed to open state history freezer", "err", err)
+ }
+
+ // Truncate the extra state histories above the current diskLayer
+ // in freezer in case it's not aligned with the disk layer.
+ pruned, err := truncateFromHead(db.diskdb, db.freezer, db.tree.bottom().stateID())
+ if err != nil {
+ log.Crit("Failed to truncate state history freezer", "err", err)
+ }
+ if pruned > 0 {
+ log.Warn("Truncated extra state histories from freezer", "count", pruned)
+ }
+ }
+ log.Warn("Path-based state scheme is an experimental feature")
+ return db
+}
+
+// Reader retrieves a layer belonging to the given state root.
+func (db *Database) Reader(root common.Hash) (layer, error) {
+ l := db.tree.get(root)
+ if l == nil {
+ return nil, fmt.Errorf("state %#x is not available", root)
+ }
+ return l, nil
+}
+
+// Update adds a new layer into the tree, if that can be linked to an existing
+// old parent. It is disallowed to insert a disk layer (the origin of all). Apart
+// from that this function will flatten the extra diff layers at bottom into disk
+// to only keep 128 diff layers in memory by default.
+//
+// The passed in maps(nodes, states) will be retained to avoid copying everything.
+// Therefore, these maps must not be changed afterwards.
+func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ // Hold the lock to prevent concurrent mutations.
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil {
+ return err
+ }
+
+ // Keep 128 diff layers in the memory, persistent layer is 129th.
+ // - head layer is paired with HEAD state
+ // - head-1 layer is paired with HEAD-1 state
+ // - ...
+ // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
+ // - head-128 layer(disk layer) is paired with HEAD-128 state
+
+ // If the number of diff layers exceeds 128, all the excess diff layers will flattened down
+ return db.tree.cap(root, maxDiffLayers)
+}
+
+// Commit traverses downwards the layer tree from a specified layer with the
+// provided state root and all the layers below are flattened downwards.
+// It can be used alone and mostly for test purposes.
+func (db *Database) Commit(root common.Hash, report bool) error {
+ // Hold the lock to prevent concurrent mutations.
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ return db.tree.cap(root, 0)
+}
+
+// Reset rebuilds the database with the specified state as the base.
+//
+// - if target state is empty, clear the stored state and all layers on top
+// - if target state is non-empty, ensure the stored state matches with it
+// and clear all other layers on top.
+func (db *Database) Reset(root common.Hash) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ batch := db.diskdb.NewBatch()
+ root = types.TrieRootHash(root)
+ if root == types.EmptyRootHash {
+ // Empty state is requested as the target, nuke out
+ // the root node and leave all others as dangling.
+ rawdb.DeleteAccountTrieNode(batch, nil)
+ } else {
+ // Ensure the requested state is existent before any
+ // action is applied.
+ _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ if hash != root {
+ return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root)
+ }
+ }
+ // Mark the disk layer as stale before applying any mutation.
+ db.tree.bottom().markStale()
+
+ // Drop the stale state journal in persistent database and
+ // reset the persistent state id back to zero.
+ rawdb.DeleteTrieJournal(batch)
+ rawdb.WritePersistentStateID(batch, 0)
+ if err := batch.Write(); err != nil {
+ return err
+ }
+
+ // Clean up all state histories in freezer. Theoretically
+ // all root->id mappings should be removed as well. Since
+ // mappings can be huge and might take a while to clear
+ // them, just leave them in disk and wait for overwriting.
+ if db.freezer != nil {
+ if err := db.freezer.Reset(); err != nil {
+ return err
+ }
+ }
+
+ // Re-construct a new disk layer backed by persistent state
+ // with **empty clean cache and node buffer**.
+ dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))
+ db.tree.reset(dl)
+ log.Info("Rebuilt trie database", "root", root)
+ return nil
+}
+
+// Recover rollbacks the database to a specified historical point.
+// The state is supported as the rollback destination only if it's
+// canonical state and the corresponding trie histories are existent.
+func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if rollback operation is not supported.
+ if db.readOnly || db.freezer == nil {
+ return errors.New("state rollback is not supported")
+ }
+
+ // Short circuit if the target state is not recoverable.
+ root = types.TrieRootHash(root)
+ if !db.Recoverable(root) {
+ return errStateUnrecoverable
+ }
+
+ // Apply the state histories upon the disk layer in order.
+ var (
+ start = time.Now()
+ dl = db.tree.bottom()
+ )
+ for dl.rootHash() != root {
+ h, err := readHistory(db.freezer, dl.stateID())
+ if err != nil {
+ return err
+ }
+ dl, err = dl.revert(h, loader)
+ if err != nil {
+ return err
+ }
+ // reset layer with newly created disk layer. It must be
+ // done after each revert operation, otherwise the new
+ // disk layer won't be accessible from outside.
+ db.tree.reset(dl)
+ }
+ rawdb.DeleteTrieJournal(db.diskdb)
+ _, err := truncateFromHead(db.diskdb, db.freezer, dl.stateID())
+ if err != nil {
+ return err
+ }
+ log.Debug("Recovered state", "root", root, "elapsed", common.PrettyDuration(time.Since(start)))
+ return nil
+}
+
+// Recoverable returns the indicator if the specified state is recoverable.
+func (db *Database) Recoverable(root common.Hash) bool {
+ // Ensure the requested state is a known state.
+ root = types.TrieRootHash(root)
+ id := rawdb.ReadStateID(db.diskdb, root)
+ if id == nil {
+ return false
+ }
+
+ // Recoverable state must below the disk layer. The recoverable
+ // state only refers the state that is currently not available,
+ // but can be restored by applying state history.
+ dl := db.tree.bottom()
+ if *id >= dl.stateID() {
+ return false
+ }
+
+ // Ensure the requested state is a canonical state and all state
+ // histories in range [id+1, disklayer.ID] are present and complete.
+ parent := root
+ return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error {
+ if m.parent != parent {
+ return errors.New("unexpected state history")
+ }
+ if len(m.incomplete) > 0 {
+ return errors.New("incomplete state history")
+ }
+ parent = m.root
+ return nil
+ }) == nil
+}
+
+// Initialized returns an indicator if the state data is already
+// initialized in path-based scheme.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ var inited bool
+ db.tree.forEach(func(layer layer) {
+ if layer.rootHash() != types.EmptyRootHash {
+ inited = true
+ }
+ })
+ return inited
+}
+
+// Size returns the current storage size of the memory cache in front of the
+// persistent database layer.
+func (db *Database) Size() (size common.StorageSize) {
+ db.tree.forEach(func(layer layer) {
+ if diff, ok := layer.(*diffLayer); ok {
+ size += common.StorageSize(diff.memory)
+ }
+ if disk, ok := layer.(*diskLayer); ok {
+ size += disk.size()
+ }
+ })
+ return size
+}
+
+// SetBufferSize sets the node buffer size to the provided value(in bytes).
+func (db *Database) SetBufferSize(size int) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ db.bufferSize = size
+ return db.tree.bottom().setBufferSize(db.bufferSize)
+}
+
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return rawdb.PathScheme
+}
+
+// Close closes the trie database and the held freezer.
+func (db *Database) Close() error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ db.readOnly = true
+ if db.freezer == nil {
+ return nil
+ }
+ return db.freezer.Close()
+}
diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go
new file mode 100644
index 0000000000..89a5042ada
--- /dev/null
+++ b/trie/triedb/pathdb/database_test.go
@@ -0,0 +1,574 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/internal/testrand"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/testutil"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+ "golang.org/x/exp/rand"
+)
+
+func generateAccount(storageRoot common.Hash) types.StateAccount {
+ return types.StateAccount{
+ Nonce: uint64(rand.Intn(100)),
+ Balance: new(big.Int).SetUint64(rand.Uint64()),
+ CodeHash: testrand.Bytes(32),
+ Root: storageRoot,
+ }
+}
+
+func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) {
+ h, err := newTestHasher(addrHash, root, cleans)
+ if err != nil {
+ panic(fmt.Errorf("failed to create hasher, err: %w", err))
+ }
+ for key, val := range dirties {
+ if len(val) == 0 {
+ h.Delete(key.Bytes())
+ } else {
+ h.Update(key.Bytes(), val)
+ }
+ }
+ return h.Commit(false)
+}
+
+const (
+ createAccountOp int = iota
+ modifyAccountOp
+ deleteAccountOp
+ opLen
+)
+
+type genctx struct {
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+ accountOrigin map[common.Address][]byte
+ storageOrigin map[common.Address]map[common.Hash][]byte
+ nodes *trienode.MergedNodeSet
+}
+
+func newCtx() *genctx {
+ return &genctx{
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ accountOrigin: make(map[common.Address][]byte),
+ storageOrigin: make(map[common.Address]map[common.Hash][]byte),
+ nodes: trienode.NewMergedNodeSet(),
+ }
+}
+
+type tester struct {
+ db *Database
+ roots []common.Hash
+ preimages map[common.Hash]common.Address
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+
+ // state snapshots
+ snapAccounts map[common.Hash]map[common.Hash][]byte
+ snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
+}
+
+func newTester(t *testing.T) *tester {
+ var (
+ disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
+ db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024})
+ obj = &tester{
+ db: db,
+ preimages: make(map[common.Hash]common.Address),
+ accounts: make(map[common.Hash][]byte),
+ storages: make(map[common.Hash]map[common.Hash][]byte),
+ snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
+ snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte),
+ }
+ )
+ for i := 0; i < 2*128; i++ {
+ var parent = types.EmptyRootHash
+ if len(obj.roots) != 0 {
+ parent = obj.roots[len(obj.roots)-1]
+ }
+ root, nodes, states := obj.generate(parent)
+ if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
+ panic(fmt.Errorf("failed to update state changes, err: %w", err))
+ }
+ obj.roots = append(obj.roots, root)
+ }
+ return obj
+}
+
+func (t *tester) release() {
+ t.db.Close()
+ t.db.diskdb.Close()
+}
+
+func (t *tester) randAccount() (common.Address, []byte) {
+ for addrHash, account := range t.accounts {
+ return t.preimages[addrHash], account
+ }
+ return common.Address{}, nil
+}
+
+func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for i := 0; i < 10; i++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ hash := testutil.RandomHash()
+
+ storage[hash] = v
+ origin[hash] = nil
+ }
+ root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil)
+
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for hash, val := range t.storages[addrHash] {
+ origin[hash] = val
+ storage[hash] = nil
+
+ if len(origin) == 3 {
+ break
+ }
+ }
+ for i := 0; i < 3; i++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ hash := testutil.RandomHash()
+
+ storage[hash] = v
+ origin[hash] = nil
+ }
+ root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash])
+
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash {
+ var (
+ addrHash = crypto.Keccak256Hash(addr.Bytes())
+ storage = make(map[common.Hash][]byte)
+ origin = make(map[common.Hash][]byte)
+ )
+ for hash, val := range t.storages[addrHash] {
+ origin[hash] = val
+ storage[hash] = nil
+ }
+ root, set := updateTrie(addrHash, root, storage, t.storages[addrHash])
+ if root != types.EmptyRootHash {
+ panic("failed to clear storage trie")
+ }
+ ctx.storages[addrHash] = storage
+ ctx.storageOrigin[addr] = origin
+ ctx.nodes.Merge(set)
+ return root
+}
+
+func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) {
+ var (
+ ctx = newCtx()
+ dirties = make(map[common.Hash]struct{})
+ )
+ for i := 0; i < 20; i++ {
+ switch rand.Intn(opLen) {
+ case createAccountOp:
+ // account creation
+ addr := testutil.RandomAddress()
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := t.accounts[addrHash]; ok {
+ continue
+ }
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ root := t.generateStorage(ctx, addr)
+ ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
+ ctx.accountOrigin[addr] = nil
+ t.preimages[addrHash] = addr
+
+ case modifyAccountOp:
+ // account mutation
+ addr, account := t.randAccount()
+ if addr == (common.Address{}) {
+ continue
+ }
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ acct, _ := types.FullAccount(account)
+ stRoot := t.mutateStorage(ctx, addr, acct.Root)
+ newAccount := types.SlimAccountRLP(generateAccount(stRoot))
+
+ ctx.accounts[addrHash] = newAccount
+ ctx.accountOrigin[addr] = account
+
+ case deleteAccountOp:
+ // account deletion
+ addr, account := t.randAccount()
+ if addr == (common.Address{}) {
+ continue
+ }
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ if _, ok := dirties[addrHash]; ok {
+ continue
+ }
+ dirties[addrHash] = struct{}{}
+
+ acct, _ := types.FullAccount(account)
+ if acct.Root != types.EmptyRootHash {
+ t.clearStorage(ctx, addr, acct.Root)
+ }
+ ctx.accounts[addrHash] = nil
+ ctx.accountOrigin[addr] = account
+ }
+ }
+ root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts)
+ ctx.nodes.Merge(set)
+
+ // Save state snapshot before commit
+ t.snapAccounts[parent] = copyAccounts(t.accounts)
+ t.snapStorages[parent] = copyStorages(t.storages)
+
+ // Commit all changes to live state set
+ for addrHash, account := range ctx.accounts {
+ if len(account) == 0 {
+ delete(t.accounts, addrHash)
+ } else {
+ t.accounts[addrHash] = account
+ }
+ }
+ for addrHash, slots := range ctx.storages {
+ if _, ok := t.storages[addrHash]; !ok {
+ t.storages[addrHash] = make(map[common.Hash][]byte)
+ }
+ for sHash, slot := range slots {
+ if len(slot) == 0 {
+ delete(t.storages[addrHash], sHash)
+ } else {
+ t.storages[addrHash][sHash] = slot
+ }
+ }
+ }
+ return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil)
+}
+
+// lastRoot returns the latest root hash, or empty if nothing is cached.
+func (t *tester) lastHash() common.Hash {
+ if len(t.roots) == 0 {
+ return common.Hash{}
+ }
+ return t.roots[len(t.roots)-1]
+}
+
+func (t *tester) verifyState(root common.Hash) error {
+ reader, err := t.db.Reader(root)
+ if err != nil {
+ return err
+ }
+ _, err = reader.Node(common.Hash{}, nil, root)
+ if err != nil {
+ return errors.New("root node is not available")
+ }
+ for addrHash, account := range t.snapAccounts[root] {
+ blob, err := reader.Node(common.Hash{}, addrHash.Bytes(), crypto.Keccak256Hash(account))
+ if err != nil || !bytes.Equal(blob, account) {
+ return fmt.Errorf("account is mismatched: %w", err)
+ }
+ }
+ for addrHash, slots := range t.snapStorages[root] {
+ for hash, slot := range slots {
+ blob, err := reader.Node(addrHash, hash.Bytes(), crypto.Keccak256Hash(slot))
+ if err != nil || !bytes.Equal(blob, slot) {
+ return fmt.Errorf("slot is mismatched: %w", err)
+ }
+ }
+ }
+ return nil
+}
+
+func (t *tester) verifyHistory() error {
+ bottom := t.bottomIndex()
+ for i, root := range t.roots {
+ // The state history related to the state above disk layer should not exist.
+ if i > bottom {
+ _, err := readHistory(t.db.freezer, uint64(i+1))
+ if err == nil {
+ return errors.New("unexpected state history")
+ }
+ continue
+ }
+ // The state history related to the state below or equal to the disk layer
+ // should exist.
+ obj, err := readHistory(t.db.freezer, uint64(i+1))
+ if err != nil {
+ return err
+ }
+ parent := types.EmptyRootHash
+ if i != 0 {
+ parent = t.roots[i-1]
+ }
+ if obj.meta.parent != parent {
+ return fmt.Errorf("unexpected parent, want: %x, got: %x", parent, obj.meta.parent)
+ }
+ if obj.meta.root != root {
+ return fmt.Errorf("unexpected root, want: %x, got: %x", root, obj.meta.root)
+ }
+ }
+ return nil
+}
+
+// bottomIndex returns the index of current disk layer.
+func (t *tester) bottomIndex() int {
+ bottom := t.db.tree.bottom()
+ for i := 0; i < len(t.roots); i++ {
+ if t.roots[i] == bottom.rootHash() {
+ return i
+ }
+ }
+ return -1
+}
+
+func TestDatabaseRollback(t *testing.T) {
+ // Verify state histories
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.verifyHistory(); err != nil {
+ t.Fatalf("Invalid state history, err: %v", err)
+ }
+ // Revert database from top to bottom
+ for i := tester.bottomIndex(); i >= 0; i-- {
+ root := tester.roots[i]
+ parent := types.EmptyRootHash
+ if i > 0 {
+ parent = tester.roots[i-1]
+ }
+ loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root])
+ if err := tester.db.Recover(parent, loader); err != nil {
+ t.Fatalf("Failed to revert db, err: %v", err)
+ }
+ tester.verifyState(parent)
+ }
+ if tester.db.tree.len() != 1 {
+ t.Fatal("Only disk layer is expected")
+ }
+}
+
+func TestDatabaseRecoverable(t *testing.T) {
+ var (
+ tester = newTester(t)
+ index = tester.bottomIndex()
+ )
+ defer tester.release()
+
+ var cases = []struct {
+ root common.Hash
+ expect bool
+ }{
+ // Unknown state should be unrecoverable
+ {common.Hash{0x1}, false},
+
+ // Initial state should be recoverable
+ {types.EmptyRootHash, true},
+
+ // Initial state should be recoverable
+ {common.Hash{}, true},
+
+ // Layers below current disk layer are recoverable
+ {tester.roots[index-1], true},
+
+ // Disklayer itself is not recoverable, since it's
+ // available for accessing.
+ {tester.roots[index], false},
+
+ // Layers above current disk layer are not recoverable
+ // since they are available for accessing.
+ {tester.roots[index+1], false},
+ }
+ for i, c := range cases {
+ result := tester.db.Recoverable(c.root)
+ if result != c.expect {
+ t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result)
+ }
+ }
+}
+
+func TestReset(t *testing.T) {
+ var (
+ tester = newTester(t)
+ index = tester.bottomIndex()
+ )
+ defer tester.release()
+
+ // Reset database to unknown target, should reject it
+ if err := tester.db.Reset(testutil.RandomHash()); err == nil {
+ t.Fatal("Failed to reject invalid reset")
+ }
+ // Reset database to state persisted in the disk
+ if err := tester.db.Reset(types.EmptyRootHash); err != nil {
+ t.Fatalf("Failed to reset database %v", err)
+ }
+ // Ensure journal is deleted from disk
+ if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
+ t.Fatal("Failed to clean journal")
+ }
+ // Ensure all trie histories are removed
+ for i := 0; i <= index; i++ {
+ _, err := readHistory(tester.db.freezer, uint64(i+1))
+ if err == nil {
+ t.Fatalf("Failed to clean state history, index %d", i+1)
+ }
+ }
+ // Verify layer tree structure, single disk layer is expected
+ if tester.db.tree.len() != 1 {
+ t.Fatalf("Extra layer kept %d", tester.db.tree.len())
+ }
+ if tester.db.tree.bottom().rootHash() != types.EmptyRootHash {
+ t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash())
+ }
+}
+
+func TestCommit(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Commit(tester.lastHash(), false); err != nil {
+ t.Fatalf("Failed to cap database, err: %v", err)
+ }
+ // Verify layer tree structure, single disk layer is expected
+ if tester.db.tree.len() != 1 {
+ t.Fatal("Layer tree structure is invalid")
+ }
+ if tester.db.tree.bottom().rootHash() != tester.lastHash() {
+ t.Fatal("Layer tree structure is invalid")
+ }
+ // Verify states
+ if err := tester.verifyState(tester.lastHash()); err != nil {
+ t.Fatalf("State is invalid, err: %v", err)
+ }
+ // Verify state histories
+ if err := tester.verifyHistory(); err != nil {
+ t.Fatalf("State history is invalid, err: %v", err)
+ }
+}
+
+func TestJournal(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Journal(tester.lastHash()); err != nil {
+ t.Errorf("Failed to journal, err: %v", err)
+ }
+ tester.db.Close()
+ tester.db = New(tester.db.diskdb, nil)
+
+ // Verify states including disk layer and all diff on top.
+ for i := 0; i < len(tester.roots); i++ {
+ if i >= tester.bottomIndex() {
+ if err := tester.verifyState(tester.roots[i]); err != nil {
+ t.Fatalf("Invalid state, err: %v", err)
+ }
+ continue
+ }
+ if err := tester.verifyState(tester.roots[i]); err == nil {
+ t.Fatal("Unexpected state")
+ }
+ }
+}
+
+func TestCorruptedJournal(t *testing.T) {
+ tester := newTester(t)
+ defer tester.release()
+
+ if err := tester.db.Journal(tester.lastHash()); err != nil {
+ t.Errorf("Failed to journal, err: %v", err)
+ }
+ tester.db.Close()
+ _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
+
+ // Mutate the journal in disk, it should be regarded as invalid
+ blob := rawdb.ReadTrieJournal(tester.db.diskdb)
+ blob[0] = 1
+ rawdb.WriteTrieJournal(tester.db.diskdb, blob)
+
+ // Verify states, all not-yet-written states should be discarded
+ tester.db = New(tester.db.diskdb, nil)
+ for i := 0; i < len(tester.roots); i++ {
+ if tester.roots[i] == root {
+ if err := tester.verifyState(root); err != nil {
+ t.Fatalf("Disk state is corrupted, err: %v", err)
+ }
+ continue
+ }
+ if err := tester.verifyState(tester.roots[i]); err == nil {
+ t.Fatal("Unexpected state")
+ }
+ }
+}
+
+// copyAccounts returns a deep-copied account set of the provided one.
+func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte {
+ copied := make(map[common.Hash][]byte, len(set))
+ for key, val := range set {
+ copied[key] = common.CopyBytes(val)
+ }
+ return copied
+}
+
+// copyStorages returns a deep-copied storage set of the provided one.
+func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ copied := make(map[common.Hash]map[common.Hash][]byte, len(set))
+ for addrHash, subset := range set {
+ copied[addrHash] = make(map[common.Hash][]byte, len(subset))
+ for key, val := range subset {
+ copied[addrHash][key] = common.CopyBytes(val)
+ }
+ }
+ return copied
+}
diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go
new file mode 100644
index 0000000000..4cd4a8f376
--- /dev/null
+++ b/trie/triedb/pathdb/difflayer.go
@@ -0,0 +1,178 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+// diffLayer represents a collection of modifications made to the in-memory tries
+// along with associated state changes after running a block on top.
+//
+// The goal of a diff layer is to act as a journal, tracking recent modifications
+// made to the state, that have not yet graduated into a semi-immutable state.
+type diffLayer struct {
+ // Immutables
+ root common.Hash // Root hash to which this layer diff belongs to
+ id uint64 // Corresponding state id
+ block uint64 // Associated block number
+ nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path
+ states *triestate.Set // Associated state change set for building history
+ memory uint64 // Approximate guess as to how much memory we use
+
+ // Parent layer modified by this one, never nil, **can be changed**
+ parent layer
+
+ lock sync.RWMutex // Lock used to protect parent
+}
+
+// newDiffLayer creates a new diff layer on top of an existing layer.
+func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ var (
+ size int64
+ count int
+ )
+ dl := &diffLayer{
+ root: root,
+ id: id,
+ block: block,
+ nodes: nodes,
+ states: states,
+ parent: parent,
+ }
+ for _, subset := range nodes {
+ for path, n := range subset {
+ dl.memory += uint64(n.Size() + len(path))
+ size += int64(len(n.Blob) + len(path))
+ }
+ count += len(subset)
+ }
+ if states != nil {
+ dl.memory += uint64(states.Size())
+ }
+ dirtyWriteMeter.Mark(size)
+ diffLayerNodesMeter.Mark(int64(count))
+ diffLayerBytesMeter.Mark(int64(dl.memory))
+ log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory))
+ return dl
+}
+
+// rootHash implements the layer interface, returning the root hash of
+// corresponding state.
+func (dl *diffLayer) rootHash() common.Hash {
+ return dl.root
+}
+
+// stateID implements the layer interface, returning the state id of the layer.
+func (dl *diffLayer) stateID() uint64 {
+ return dl.id
+}
+
+// parentLayer implements the layer interface, returning the subsequent
+// layer of the diff layer.
+func (dl *diffLayer) parentLayer() layer {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.parent
+}
+
+// node retrieves the node with provided node information.
+// It's the internal version of Node function with additional accessed layer tracked.
+// No error will be returned if node is not found.
+func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, depth int) ([]byte, error) {
+ // hold the lock to prevent the parent layer from being changed
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // If the trie node is known locally, return it
+ subset, ok := dl.nodes[owner]
+ if ok {
+ n, ok := subset[string(path)]
+ if ok {
+ // If the trie node is not hash matched, or marked as removed,
+ // bubble up an error here. It shouldn't happen at all.
+ if n.Hash != hash {
+ dirtyFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
+ return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path)
+ }
+ dirtyHitMeter.Mark(1)
+ dirtyNodeHitDepthHist.Update(int64(depth))
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, nil
+ }
+ }
+
+ // Trie node unknown to this layer, resolve from parent with lower depth
+ if diff, ok := dl.parent.(*diffLayer); ok {
+ return diff.node(owner, path, hash, depth+1)
+ }
+
+ // Parent is a disk layer, resolve from there
+ return dl.parent.Node(owner, path, hash)
+}
+
+// Node implements the layer interface, retrieving the trie node blob with the
+// provided node information. No error will be returned if the node is not found.
+func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ return dl.node(owner, path, hash, 0)
+}
+
+// update implements the layer interface, creating a new layer on top of the
+// existing layer tree with the specified data items.
+func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ return newDiffLayer(dl, root, id, block, nodes, states)
+}
+
+// persist flushes the diff layer and all its parent layers to disk layer.
+func (dl *diffLayer) persist(force bool) (layer, error) {
+ if parent, ok := dl.parentLayer().(*diffLayer); ok {
+ // Hold the lock to prevent any read operation until the new
+ // parent is linked correctly.
+ dl.lock.Lock()
+
+ // The merging of diff layers starts at the bottom-most layer,
+ // therefore we recurse down here, flattening on the way up
+ // (diffToDisk).
+ result, err := parent.persist(force)
+ if err != nil {
+ dl.lock.Unlock()
+ return nil, err
+ }
+
+ dl.parent = result
+ dl.lock.Unlock()
+ }
+ return diffToDisk(dl, force)
+}
+
+// diffToDisk merges a bottom-most diff into the persistent disk layer underneath
+// it. The method will panic if called onto a non-bottom-most diff layer.
+func diffToDisk(layer *diffLayer, force bool) (layer, error) {
+ disk, ok := layer.parentLayer().(*diskLayer)
+ if !ok {
+ panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer()))
+ }
+ return disk.commit(layer, force)
+}
diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go
new file mode 100644
index 0000000000..513f9685de
--- /dev/null
+++ b/trie/triedb/pathdb/difflayer_test.go
@@ -0,0 +1,171 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/trie/testutil"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
+func emptyLayer() *diskLayer {
+ return &diskLayer{
+ db: New(rawdb.NewMemoryDatabase(), nil),
+ buffer: newNodeBuffer(defaultBufferSize, nil, 0),
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkSearch128Layers
+// BenchmarkSearch128Layers-8 243826 4755 ns/op
+func BenchmarkSearch128Layers(b *testing.B) { benchmarkSearch(b, 0, 128) }
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkSearch512Layers
+// BenchmarkSearch512Layers-8 49686 24256 ns/op
+func BenchmarkSearch512Layers(b *testing.B) { benchmarkSearch(b, 0, 512) }
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkSearch1Layer
+// BenchmarkSearch1Layer-8 14062725 88.40 ns/op
+func BenchmarkSearch1Layer(b *testing.B) { benchmarkSearch(b, 127, 128) }
+
+func benchmarkSearch(b *testing.B, depth int, total int) {
+ var (
+ npath []byte
+ nhash common.Hash
+ nblob []byte
+ )
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer, index int) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ if npath == nil && depth == index {
+ npath = common.CopyBytes(path)
+ nblob = common.CopyBytes(node.Blob)
+ nhash = node.Hash
+ }
+ }
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ var layer layer
+ layer = emptyLayer()
+ for i := 0; i < total; i++ {
+ layer = fill(layer, i)
+ }
+ b.ResetTimer()
+
+ var (
+ have []byte
+ err error
+ )
+ for i := 0; i < b.N; i++ {
+ have, err = layer.Node(common.Hash{}, npath, nhash)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ if !bytes.Equal(have, nblob) {
+ b.Fatalf("have %x want %x", have, nblob)
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// BenchmarkPersist
+// BenchmarkPersist-8 10 111252975 ns/op
+func BenchmarkPersist(b *testing.B) {
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ }
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ var layer layer
+ layer = emptyLayer()
+ for i := 1; i < 128; i++ {
+ layer = fill(layer)
+ }
+ b.StartTimer()
+
+ dl, ok := layer.(*diffLayer)
+ if !ok {
+ break
+ }
+ dl.persist(false)
+ }
+}
+
+// BenchmarkJournal benchmarks the performance for journaling the layers.
+//
+// BenchmarkJournal
+// BenchmarkJournal-8 10 110969279 ns/op
+func BenchmarkJournal(b *testing.B) {
+ b.SkipNow()
+
+ // First, we set up 128 diff layers, with 3K items each
+ fill := func(parent layer) *diffLayer {
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ nodes[common.Hash{}] = make(map[string]*trienode.Node)
+ for i := 0; i < 3000; i++ {
+ var (
+ path = testutil.RandBytes(32)
+ node = testutil.RandomNode()
+ )
+ nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob)
+ }
+ // TODO(rjl493456442) a non-nil state set is expected.
+ return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil)
+ }
+ var layer layer
+ layer = emptyLayer()
+ for i := 0; i < 128; i++ {
+
+ layer = fill(layer)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ layer.journal(new(bytes.Buffer))
+ }
+}
diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go
new file mode 100644
index 0000000000..34e2d62ba0
--- /dev/null
+++ b/trie/triedb/pathdb/disklayer.go
@@ -0,0 +1,298 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+ "golang.org/x/crypto/sha3"
+)
+
+// diskLayer is a low level persistent layer built on top of a key-value store.
+type diskLayer struct {
+ root common.Hash // Immutable, root hash to which this layer was made for
+ id uint64 // Immutable, corresponding state id
+ db *Database // Path-based trie database
+ cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
+ buffer *nodebuffer // Node buffer to aggregate writes
+ // A stale state means that the data or information is outdated compared to a newer version or a more recent state.
+ stale bool // Signals that the layer became stale (state progressed)
+ lock sync.RWMutex // Lock used to protect stale flag
+}
+
+// newDiskLayer creates a new disk layer based on the passing arguments.
+func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer {
+ // Initialize a clean cache if the memory allowance is not zero
+ // or reuse the provided cache if it is not nil (inherited from
+ // the original disk layer).
+ if cleans == nil && db.config.CleanSize != 0 {
+ cleans = fastcache.New(db.config.CleanSize)
+ }
+ return &diskLayer{
+ root: root,
+ id: id,
+ db: db,
+ cleans: cleans,
+ buffer: buffer,
+ }
+}
+
+// root implements the layer interface, returning root hash of corresponding state.
+func (dl *diskLayer) rootHash() common.Hash {
+ return dl.root
+}
+
+// stateID implements the layer interface, returning the state id of disk layer.
+func (dl *diskLayer) stateID() uint64 {
+ return dl.id
+}
+
+// parent implements the layer interface, returning nil as there's no layer
+// below the disk.
+func (dl *diskLayer) parentLayer() layer {
+ return nil
+}
+
+// isStale return whether this layer has become stale (was flattened across) or if
+// it's still live.
+func (dl *diskLayer) isStale() bool {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ return dl.stale
+}
+
+// markStale sets the stale flag as true.
+func (dl *diskLayer) markStale() {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+
+ if dl.stale {
+ panic("triedb disk layer is stale") // we've committed into the same base from two children, boom
+ }
+ dl.stale = true
+}
+
+// Node implements the layer interface, retrieving the trie node with the provided node info.
+// No error will be returned if the node is not found.
+func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return nil, errSnapshotStale
+ }
+ // Try to retrieve the trie node from the not-yet-written
+ // node buffer first. Note the buffer is lock free since
+ // it's impossible to mutate the buffer before tagging the
+ // layer as stale.
+ n, err := dl.buffer.node(owner, path, hash)
+ if err != nil {
+ return nil, err
+ }
+ if n != nil {
+ dirtyHitMeter.Mark(1)
+ dirtyReadMeter.Mark(int64(len(n.Blob)))
+ return n.Blob, nil
+ }
+ dirtyMissMeter.Mark(1)
+
+ // Try to retrieve the trie node from the clean memory cache
+ key := cacheKey(owner, path)
+ if dl.cleans != nil {
+ if blob := dl.cleans.Get(nil, key); len(blob) > 0 {
+ h := newHasher()
+ defer h.release()
+
+ got := h.hash(blob)
+ if got == hash {
+ cleanHitMeter.Mark(1)
+ cleanReadMeter.Mark(int64(len(blob)))
+ return blob, nil
+ }
+ cleanFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got)
+ }
+ cleanMissMeter.Mark(1)
+ }
+
+ // Try to retrieve the trie node from the disk.
+ var (
+ nBlob []byte
+ nHash common.Hash
+ )
+ if owner == (common.Hash{}) {
+ nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path)
+ } else {
+ nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path)
+ }
+ if nHash != hash {
+ diskFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash)
+ return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path)
+ }
+ if dl.cleans != nil && len(nBlob) > 0 {
+ dl.cleans.Set(key, nBlob)
+ cleanWriteMeter.Mark(int64(len(nBlob)))
+ }
+ return nBlob, nil
+}
+
+// update implements the layer interface, returning a new diff layer on top with the given state set.
+func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer {
+ return newDiffLayer(dl, root, id, block, nodes, states)
+}
+
+// commit merges the given bottom-most diff layer into the node buffer
+// and returns a newly constructed disk layer. Note the current disk
+// layer must be tagged as stale first to prevent re-access.
+func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+ // Construct and store the state history first. If crash happens
+ // after storing the state history but without flushing the
+ // corresponding states(journal), the stored state history will
+ // be truncated in the next restart.
+ if dl.db.freezer != nil {
+ err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Mark the diskLayer as stale before applying any mutations on top.
+ dl.stale = true
+
+ // Store the root->id lookup afterwards. All stored lookups are
+ // identified by the **unique** state root. It's impossible that
+ // in the same chain blocks are not adjacent but have the same root.
+ if dl.id == 0 {
+ rawdb.WriteStateID(dl.db.diskdb, dl.root, 0)
+ }
+ rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
+
+ // Construct a new disk layer by merging the nodes from the provided
+ // diff layer, and flush the content in disk layer if there are too
+ // many nodes cached. The clean cache is inherited from the original
+ // disk layer for reusing.
+ ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes))
+ err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force)
+ if err != nil {
+ return nil, err
+ }
+ return ndl, nil
+}
+
+// revert applies the given state history and return a reverted disk layer.
+func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) {
+ if h.meta.root != dl.rootHash() {
+ return nil, errUnexpectedHistory
+ }
+
+ // Reject if the provided state history is incomplete. It's due to
+ // a large construct SELF-DESTRUCT which can't be handled because
+ // of memory limitation.
+ if len(h.meta.incomplete) > 0 {
+ return nil, errors.New("incomplete state history")
+ }
+ if dl.id == 0 {
+ return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
+ }
+
+ // Apply the reverse state changes upon the current state. This must
+ // be done before holding the lock in order to access state in "this"
+ // layer.
+ nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader)
+ if err != nil {
+ return nil, err
+ }
+
+ dl.lock.Lock()
+ defer dl.lock.Unlock()
+ // Mark the diskLayer as stale before applying any mutations on top.
+ dl.stale = true
+
+ // State change may be applied to node buffer, or the persistent
+ // state, depends on if node buffer is empty or not. If the node
+ // buffer is not empty, it means that the state transition that
+ // needs to be reverted is not yet flushed and cached in node
+ // buffer, otherwise, manipulate persistent state directly.
+ if !dl.buffer.empty() {
+ err := dl.buffer.revert(dl.db.diskdb, nodes)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ batch := dl.db.diskdb.NewBatch()
+ writeNodes(batch, nodes, dl.cleans)
+ rawdb.WritePersistentStateID(batch, dl.id-1)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write states", "err", err)
+ }
+ }
+ return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil
+}
+
+// setBufferSize sets the node buffer size to the provided value.
+func (dl *diskLayer) setBufferSize(size int) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return errSnapshotStale
+ }
+ return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id)
+}
+
+// size returns the approximate size of cached nodes in the disk layer.
+func (dl *diskLayer) size() common.StorageSize {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ if dl.stale {
+ return 0
+ }
+ return common.StorageSize(dl.buffer.size)
+}
+
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+
+func (h *hasher) release() {
+ hasherPool.Put(h)
+}
diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go
new file mode 100644
index 0000000000..f503a9c49d
--- /dev/null
+++ b/trie/triedb/pathdb/errors.go
@@ -0,0 +1,51 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ // errSnapshotReadOnly is returned if the database is opened in read only mode
+ // and mutation is requested.
+ errSnapshotReadOnly = errors.New("read only")
+
+ // errSnapshotStale is returned from data accessors if the underlying layer
+ // layer had been invalidated due to the chain progressing forward far enough
+ // to not maintain the layer's original state.
+ errSnapshotStale = errors.New("layer stale")
+
+ // errUnexpectedHistory is returned if an unmatched state history is applied
+ // to the database for state rollback.
+ errUnexpectedHistory = errors.New("unexpected state history")
+
+ // errStateUnrecoverable is returned if state is required to be reverted to
+ // a destination without associated state history available.
+ errStateUnrecoverable = errors.New("state is unrecoverable")
+
+ // errUnexpectedNode is returned if the requested node with specified path is
+ // not hash matched with expectation.
+ errUnexpectedNode = errors.New("unexpected node")
+)
+
+func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error {
+ return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash)
+}
diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go
new file mode 100644
index 0000000000..7d672d3254
--- /dev/null
+++ b/trie/triedb/pathdb/history.go
@@ -0,0 +1,661 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+ "golang.org/x/exp/slices"
+)
+
+// State history records the state changes involved in executing a block. The
+// state can be reverted to the previous version by applying the associated
+// history object (state reverse diff). State history objects are kept to
+// guarantee that the system can perform state rollbacks in case of deep reorg.
+//
+// Each state transition will generate a state history object. Note that not
+// every block has a corresponding state history object. If a block performs
+// no state changes whatsoever, no state is created for it. Each state history
+// will have a sequentially increasing number acting as its unique identifier.
+//
+// The state history is written to disk (ancient store normally naming "state") when the corresponding
+// diff layer is merged into the disk layer. At the same time, system can prune
+// the oldest histories according to config.
+
+//
+// Disk State
+// ^
+// |
+// +------------+ +---------+ +---------+ +---------+
+// | Init State |---->| State 1 |---->| ... |---->| State n |
+// +------------+ +---------+ +---------+ +---------+
+//
+// +-----------+ +------+ +-----------+
+// | History 1 |----> | ... |---->| History n |
+// +-----------+ +------+ +-----------+
+//
+
+// # Rollback
+//
+// If the system wants to roll back to a previous state n, it needs to ensure
+// all history objects from n+1 up to the current disk layer are existent. The
+// history objects are applied to the state in reverse order, starting from the
+// current disk layer.
+// For example, If current state is 9 and revert 5, 6-9 need to be existent in ancient store.
+
+// We have 5 types ( metad, account (index +data), storage (index+data)).
+
+const (
+ accountIndexSize = common.AddressLength + 13 // 20 + 13 The length of encoded account index
+ slotIndexSize = common.HashLength + 5 // 32 + 5 The length of encoded slot index
+ historyMetaSize = 9 + 2*common.HashLength // 9 + 32*2 The length of fixed size part of meta object
+
+ stateHistoryVersion = uint8(0) // initial version of state history structure.
+)
+
+// Each state history entry is consisted of five elements:
+//
+// # metadata
+// This object contains a few meta fields, such as the associated state root,
+// block number, version tag and so on. This object may contain an extra
+// accountHash list which means the storage changes belong to these accounts
+// are not complete due to large contract destruction. The incomplete history
+// can not be used for rollback and serving archive state request
+
+// # account index
+// This object contains some index information of account. For example, offset
+// and length indicate the location of the data belonging to the account. Besides,
+// storageOffset and storageSlots indicate the storage modification location
+// belonging to the account.
+//
+// The size of each account index is *fixed*, and all indexes are sorted
+// lexicographically. Thus binary search can be performed to quickly locate a
+// specific account.
+//
+// # account data
+// Account data is a concatenated byte stream composed of all account data.
+// The account data can be solved by the offset and length info indicated
+// by corresponding account index. It means we can use offset and length from
+// account index to seek a specific account in the account data.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | length
+// offset |----------------+
+// v v
+// +----------------+----------------+----------------+----------------+
+// | Account data 1 | Account data 2 | ... | Account data N |
+// +----------------+----------------+----------------+----------------+
+//
+// # storage index
+// This object is similar with account index. It's also fixed size and contains
+// the location info of storage slot data.
+// # storage data
+// Storage data is a concatenated byte stream composed of all storage slot data.
+// The storage slot data can be solved by the location info indicated by
+// corresponding account index and storage slot index.
+// We need to get storageOffset and storageSlots from account index to get thr offset and length of storage data from storage index.
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | storage slots
+// storage offset |-----------------------------------------------------+
+// v v
+// +-----------------+-----------------+-----------------+
+// | storage index 1 | storage index 2 | storage index 3 |
+// +-----------------+-----------------+-----------------+
+// | length
+// offset |-------------+
+// v v
+// +-------------+
+// | slot data 1 |
+//
+
+// accountIndex describes the metadata belonging to an account.
+type accountIndex struct {
+ address common.Address // The address of the account
+ length uint8 // The length of account data, size limited by 255
+ offset uint32 // The offset of item in account data table
+ storageOffset uint32 // The offset of storage index in storage index table, belong to the account
+ storageSlots uint32 // The number of mutated storage slots belonging to the account
+}
+
+// encode packs account index into byte stream.
+func (i *accountIndex) encode() []byte {
+ var buf [accountIndexSize]byte
+ copy(buf[:], i.address.Bytes()) // 20 bytes
+ buf[common.AddressLength] = i.length // 1 byte
+ binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset) // 4 bytes
+ binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset) // 4 bytes
+ binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots) // 4 bytes
+ return buf[:]
+}
+
+// decode unpacks account index from byte stream.
+func (i *accountIndex) decode(blob []byte) {
+ i.address = common.BytesToAddress(blob[:common.AddressLength])
+ i.length = blob[common.AddressLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:])
+ i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:])
+ i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:])
+}
+
+// slotIndex describes the metadata belonging to a storage slot.
+// Per account can have multiple storage slots.
+type slotIndex struct {
+ hash common.Hash // The hash of slot key
+ length uint8 // The length of storage slot, up to 32 bytes defined in protocol
+ offset uint32 // The offset of item in storage slot data table
+}
+
+// encode packs slot index into byte stream.
+func (i *slotIndex) encode() []byte {
+ var buf [slotIndexSize]byte
+ copy(buf[:common.HashLength], i.hash.Bytes())
+ buf[common.HashLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
+ return buf[:]
+}
+
+// decode unpack slot index from the byte stream.
+func (i *slotIndex) decode(blob []byte) {
+ i.hash = common.BytesToHash(blob[:common.HashLength])
+ i.length = blob[common.HashLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
+}
+
+// meta describes the meta data of state history object.
+type meta struct {
+ version uint8 // version tag of history object
+ parent common.Hash // prev-state root before the state transition
+ root common.Hash // post-state root after the state transition
+ block uint64 // associated block number
+ incomplete []common.Address // list of address whose storage set is incomplete
+}
+
+// encode packs the meta object into byte stream.
+func (m *meta) encode() []byte {
+ buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength) // 73 bytes + 20* current incomplete address.
+ buf[0] = m.version
+ copy(buf[1:1+common.HashLength], m.parent.Bytes())
+ copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
+ binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
+ for i, h := range m.incomplete {
+ copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes())
+ }
+ return buf[:]
+}
+
+// decode unpacks the meta object from byte stream.
+func (m *meta) decode(blob []byte) error {
+ if len(blob) < 1 {
+ return fmt.Errorf("no version tag")
+ }
+ switch blob[0] { // Check the version tag
+ case stateHistoryVersion:
+ // Check base history meta size
+ if len(blob) < historyMetaSize {
+ return fmt.Errorf("invalid state history meta, len: %d", len(blob))
+ }
+ if (len(blob)-historyMetaSize)%common.AddressLength != 0 {
+ return fmt.Errorf("corrupted state history meta, len: %d", len(blob))
+ }
+ m.version = blob[0]
+ m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
+ m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
+ m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
+ for pos := historyMetaSize; pos < len(blob); {
+ m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength]))
+ pos += common.AddressLength
+ }
+ return nil
+ default:
+ return fmt.Errorf("unknown version %d", blob[0])
+ }
+}
+
+// history represents a set of state changes belong to a block along with
+// the metadata including the state roots involved in the state transition.
+// State history objects in disk are linked with each other by a unique id
+// (8-bytes integer), the oldest state history object can be pruned on demand
+// in order to control the storage size.
+
+type history struct {
+ meta *meta // Meta data of history
+ accounts map[common.Address][]byte // Account data keyed by its address hash
+ accountList []common.Address // Sorted account hash list
+ storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash
+ storageList map[common.Address][]common.Hash // Sorted slot hash list
+}
+
+// newHistory constructs the state history object with provided state change set.( We need to track block and states)
+func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history {
+ var (
+ accountList []common.Address
+ storageList = make(map[common.Address][]common.Hash)
+ incomplete []common.Address
+ )
+ for addr := range states.Accounts {
+ accountList = append(accountList, addr)
+ }
+ // Sort by comparing bytes of address
+ slices.SortFunc(accountList, common.Address.Cmp)
+ // Construct storage list
+ for addr, slots := range states.Storages {
+ slist := make([]common.Hash, 0, len(slots))
+
+ for slotHash := range slots {
+ slist = append(slist, slotHash)
+ }
+ slices.SortFunc(slist, common.Hash.Cmp)
+ storageList[addr] = slist
+ }
+
+ for addr := range states.Incomplete {
+ incomplete = append(incomplete, addr)
+ }
+ slices.SortFunc(incomplete, common.Address.Cmp)
+ return &history{
+ meta: &meta{
+ version: stateHistoryVersion,
+ parent: parent,
+ root: root,
+ block: block,
+ incomplete: incomplete,
+ },
+ accounts: states.Accounts,
+ accountList: accountList,
+ storages: states.Storages,
+ storageList: storageList,
+ }
+}
+
+// encode serializes the currnet state history and returns four byte streams represent
+// concatenated account/storage data, account/storage indexes respectively.
+func (h *history) encode() ([]byte, []byte, []byte, []byte) {
+ var (
+ slotNumber uint32 // the numbber of processed storage slots, 4 bytes
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account indexes
+ storageIndexes []byte // the buffer for concatenated storage indexes
+ )
+
+ for _, addr := range h.accountList {
+ accIndex := accountIndex{
+ address: addr,
+ length: uint8(len(h.accounts[addr])), // get the length of account data
+ offset: uint32(len(accountData)),
+ }
+ slots, exist := h.storages[addr]
+ if exist {
+ // For per account which has storage slots, we need to encode storage slots in order
+ for _, slotHash := range h.storageList[addr] {
+ sIndex := slotIndex{
+ hash: slotHash,
+ length: uint8(len(slots[slotHash])),
+ offset: uint32(len(storageData)),
+ }
+ // Concat.
+ storageData = append(storageData, slots[slotHash]...)
+ storageIndexes = append(storageIndexes, sIndex.encode()...)
+ }
+ // Fill up the storage meta in account index
+ accIndex.storageOffset = slotNumber // 0 for the first account.
+ accIndex.storageSlots = uint32(len(slots))
+ slotNumber += uint32(len(slots)) // collect full accounts in one state.
+ }
+ accountData = append(accountData, h.accounts[addr]...)
+ accountIndexes = append(accountIndexes, accIndex.encode()...)
+ }
+ return accountData, storageData, accountIndexes, storageIndexes
+}
+
+/* decoder */
+// decoder wraps the byte streams for decoding with extra meta fields.
+type decoder struct {
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+
+ lastAccount *common.Address // the address of last resolved account
+ lastAccountRead uint32 // the read-cursor position of account data
+ lastStorageSlotIndexRead uint32 // the read-cursor position of storage slot index
+ lastStorageSlotDataRead uint32 // the read-cursor position of storage slot data
+}
+
+// verify validates the provided byte streams for decoding state history. A few
+// checks will be performed to quickly detect data corruption.
+//
+// The byte stream is regarded as corrupted if:
+// - account indexes buffer is empty(empty state set is invalid)
+// - account indexes/storage indexer buffer is not aligned
+//
+// note, these situations are allowed:
+//
+// - empty account data: all accounts were not present
+// - empty storage set: no slots are modified
+func (r *decoder) verify() error {
+ if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 {
+ return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes))
+ }
+ if len(r.storageIndexes)%slotIndexSize != 0 {
+ return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes))
+ }
+ return nil
+}
+
+// readAccount parses the account from the byte stream with specified position.
+// It returns this account's index and data in byte stream. at this position.
+func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
+ // Decode account index from the index byte stream.
+ var index accountIndex
+ if (pos+1)*accountIndexSize > len(r.accountIndexes) {
+ return accountIndex{}, nil, errors.New("The pos is out range Indexes, seem account data buffer is corrupted")
+ }
+ // Decode account index from the index byte stream.
+ index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize])
+
+ // Perform validation before parsing account data, ensure
+ // - account is sorted in order in byte stream
+ // - account data is strictly encoded with no gap inside
+ // - account data is not out-of-slice
+ if r.lastAccount != nil {
+ if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 {
+ return accountIndex{}, nil, errors.New("account is not in order")
+ }
+ }
+ if index.offset != r.lastAccountRead {
+ return accountIndex{}, nil, errors.New("account data buffer is gaped")
+ }
+ lastOffset := index.offset + uint32(index.length)
+ if uint32(len(r.accountData)) < lastOffset {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ data := r.accountData[index.offset:lastOffset]
+
+ r.lastAccount = &index.address
+ r.lastAccountRead = lastOffset
+ return index, data, nil
+}
+
+// readStorage parses the storage slots from the byte stream with specified account.
+func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
+ var (
+ last common.Hash
+ list []common.Hash
+ storage = make(map[common.Hash][]byte)
+ )
+ for j := 0; j < int(accIndex.storageSlots); j++ {
+ // Need to calculate the start/stop index slot j.
+ var (
+ index slotIndex
+ start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize)
+ end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize)
+ )
+ // Perform validation before parsing storage slot data, ensure
+ // - slot index is not out-of-slice
+ // - slot data is not out-of-slice
+ // - slot is sorted in order in byte stream
+ // - slot indexes is strictly encoded with no gap inside
+ // - slot data is strictly encoded with no gap inside
+ if start != r.lastStorageSlotIndexRead {
+ return nil, nil, errors.New("storage index buffer is gapped")
+ }
+
+ if uint32(len(r.storageIndexes)) < end {
+ return nil, nil, errors.New("Index is out scope, storage index buffer is corrupted")
+ }
+
+ // decode
+ index.decode(r.storageIndexes[start:end])
+
+ if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
+ return nil, nil, errors.New("storage slot is not in order")
+ }
+ if index.offset != r.lastStorageSlotDataRead {
+ return nil, nil, errors.New("storage data buffer is gapped")
+ }
+ sEnd := index.offset + uint32(index.length)
+ if uint32(len(r.storageData)) < sEnd {
+ return nil, nil, errors.New("storage data buffer is corrupted")
+ }
+ storage[index.hash] = r.storageData[r.lastStorageSlotDataRead:sEnd]
+ list = append(list, index.hash)
+
+ last = index.hash
+ r.lastStorageSlotIndexRead = end
+ r.lastStorageSlotDataRead = sEnd
+ }
+ return list, storage, nil
+}
+
+// decode deserializes the account and storage data from the provided byte stream.
+func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
+ var (
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ accountList []common.Address
+ storageList = make(map[common.Address][]common.Hash)
+
+ r = &decoder{
+ accountData: accountData,
+ storageData: storageData,
+ accountIndexes: accountIndexes,
+ storageIndexes: storageIndexes,
+ }
+ )
+ if err := r.verify(); err != nil {
+ return err
+ }
+ for i := 0; i < len(accountIndexes)/accountIndexSize; i++ {
+ // Resolve account first
+ accIndex, accData, err := r.readAccount(i)
+ if err != nil {
+ return err
+ }
+ accounts[accIndex.address] = accData
+ accountList = append(accountList, accIndex.address)
+
+ // Resolve storage slots
+ slotList, slotData, err := r.readStorage(accIndex)
+ if err != nil {
+ return err
+ }
+ if len(slotList) > 0 {
+ storageList[accIndex.address] = slotList
+ storages[accIndex.address] = slotData
+ }
+ }
+ h.accounts = accounts
+ h.accountList = accountList
+ h.storages = storages
+ h.storageList = storageList
+ return nil
+}
+
+// readHistory reads and decodes the state history object by the given id.
+func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) {
+ blob := rawdb.ReadStateHistoryMeta(freezer, id)
+ if len(blob) == 0 {
+ return nil, fmt.Errorf("state history not found %d", id)
+ }
+ var m meta
+ if err := m.decode(blob); err != nil {
+ return nil, err
+ }
+ var (
+ dec = history{meta: &m}
+ accountData = rawdb.ReadStateAccountHistory(freezer, id)
+ storageData = rawdb.ReadStateStorageHistory(freezer, id)
+ accountIndexes = rawdb.ReadStateAccountIndex(freezer, id)
+ storageIndexes = rawdb.ReadStateStorageIndex(freezer, id)
+ )
+ if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
+ return nil, err
+ }
+ return &dec, nil
+}
+
+// writeHistory writes the state history with provided state set. After
+// storing the corresponding state history, it will also prune the stale
+// histories from the disk with the given threshold.
+func writeHistory(db ethdb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error {
+ // Short circuit if state set is not available.
+ if dl.states == nil {
+ return errors.New("state change set is not available")
+ }
+ var (
+ err error
+ n int
+ start = time.Now()
+ h = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states)
+ )
+ // Return byte streams of account and storage infor in current state.
+ accountData, storageData, accountIndex, storageIndex := h.encode()
+ dataSize := common.StorageSize(len(accountData) + len(storageData))
+ indexSize := common.StorageSize(len(accountIndex) + len(storageIndex))
+
+ // Write history data into five freezer table respectively.
+ rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)
+
+ // Prune stale state histories based on the config.
+ if limit != 0 && dl.stateID() > limit {
+ n, err = truncateFromTail(db, freezer, dl.stateID()-limit)
+ if err != nil {
+ return err
+ }
+ }
+ historyDataBytesMeter.Mark(int64(dataSize))
+ historyIndexBytesMeter.Mark(int64(indexSize))
+ historyBuildTimeMeter.UpdateSince(start)
+ log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start)))
+ return nil
+}
+
+// checkHistories retrieves a batch of meta objects with the specified range
+// and performs the callback on each item.
+func checkHistories(freezer *rawdb.ResettableFreezer, start, count uint64, check func(*meta) error) error {
+ for count > 0 {
+ number := count
+ if number > 10000 {
+ number = 10000 // split the big read into small chunks
+ }
+ blobs, err := rawdb.ReadStateHistoryMetaList(freezer, start, number)
+ if err != nil {
+ return err
+ }
+ for _, blob := range blobs {
+ var dec meta
+ if err := dec.decode(blob); err != nil {
+ return err
+ }
+ if err := check(&dec); err != nil {
+ return err
+ }
+ }
+ count -= uint64(len(blobs))
+ start += uint64(len(blobs))
+ }
+ return nil
+}
+
+// truncateFromHead removes the extra state histories from the head with the given
+// parameters. It returns the number of items removed from the head.
+func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead uint64) (int, error) {
+ ohead, err := freezer.Ancients()
+ if err != nil {
+ return 0, err
+ }
+ if ohead <= nhead {
+ return 0, nil
+ }
+ // Load the meta objects in range [nhead+1, ohead]
+ blobs, err := rawdb.ReadStateHistoryMetaList(freezer, nhead+1, ohead-nhead)
+ if err != nil {
+ return 0, err
+ }
+ batch := db.NewBatch()
+ for _, blob := range blobs {
+ var m meta
+ if err := m.decode(blob); err != nil {
+ return 0, err
+ }
+ rawdb.DeleteStateID(batch, m.root)
+ }
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ ohead, err = freezer.TruncateHead(nhead)
+ if err != nil {
+ return 0, err
+ }
+ return int(ohead - nhead), nil
+}
+
+// truncateFromTail removes the extra state histories from the tail with the given
+// parameters. It returns the number of items removed from the tail.
+func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) {
+ otail, err := freezer.Tail()
+ if err != nil {
+ return 0, err
+ }
+ if otail >= ntail {
+ return 0, nil
+ }
+ // Load the meta objects in range [otail+1, ntail]
+ blobs, err := rawdb.ReadStateHistoryMetaList(freezer, otail+1, ntail-otail)
+ if err != nil {
+ return 0, err
+ }
+ batch := db.NewBatch()
+ for _, blob := range blobs {
+ var m meta
+ if err := m.decode(blob); err != nil {
+ return 0, err
+ }
+ // Delete the state root from the state ID table
+ rawdb.DeleteStateID(batch, m.root)
+ }
+ if err := batch.Write(); err != nil {
+ return 0, err
+ }
+ otail, err = freezer.TruncateTail(ntail)
+ if err != nil {
+ return 0, err
+ }
+ return int(ntail - otail), nil
+}
diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go
new file mode 100644
index 0000000000..d76db53d56
--- /dev/null
+++ b/trie/triedb/pathdb/history_test.go
@@ -0,0 +1,312 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/testutil"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+const (
+ SeedingHistory = 10
+)
+
+// randomStateSet generates a random state change set.
+func randomStateSet(n int) *triestate.Set {
+ var (
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ )
+ for i := 0; i < n; i++ {
+ addr := testutil.RandomAddress()
+ storages[addr] = make(map[common.Hash][]byte)
+ for j := 0; j < 3; j++ {
+ v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32)))
+ storages[addr][testutil.RandomHash()] = v
+ }
+ account := generateAccount(types.EmptyRootHash)
+ accounts[addr] = types.SlimAccountRLP(account)
+ }
+ return triestate.New(accounts, storages, nil)
+}
+
+func makeHistory() *history {
+ return newHistory(testutil.RandomHash(), types.EmptyRootHash, 0, randomStateSet(3))
+}
+
+func makeHistories(n int) []*history {
+ var (
+ parent = types.EmptyRootHash
+ result []*history
+ )
+ for i := 0; i < n; i++ {
+ root := testutil.RandomHash()
+ h := newHistory(root, parent, uint64(i), randomStateSet(3))
+ parent = root
+ result = append(result, h)
+ }
+ return result
+}
+
+func TestEncodeDecodeHistory(t *testing.T) {
+ var (
+ m meta
+ dec history
+ obj = makeHistory()
+ )
+ // check if meta data can be correctly encode/decode
+ blob := obj.meta.encode()
+ if err := m.decode(blob); err != nil {
+ t.Fatalf("Failed to decode %v", err)
+ }
+ if !reflect.DeepEqual(&m, obj.meta) {
+ t.Fatal("meta is mismatched")
+ }
+
+ // check if account/storage data can be correctly encode/decode
+ accountData, storageData, accountIndexes, storageIndexes := obj.encode()
+ if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
+ t.Fatalf("Failed to decode, err: %v", err)
+ }
+ if !compareSet(dec.accounts, obj.accounts) {
+ t.Fatal("account data is mismatched")
+ }
+ if !compareStorages(dec.storages, obj.storages) {
+ t.Fatal("storage data is mismatched")
+ }
+ if !compareList(dec.accountList, obj.accountList) {
+ t.Fatal("account list is mismatched")
+ }
+ if !compareStorageList(dec.storageList, obj.storageList) {
+ t.Fatal("storage list is mismatched")
+ }
+}
+
+func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, id uint64, root common.Hash, exist bool) {
+ blob := rawdb.ReadStateHistoryMeta(freezer, id)
+ if exist && len(blob) == 0 {
+ t.Fatalf("Failed to load trie history, %d", id)
+ }
+ if !exist && len(blob) != 0 {
+ t.Fatalf("Unexpected trie history, %d", id)
+ }
+ if exist && rawdb.ReadStateID(db, root) == nil {
+ t.Fatalf("Root->ID mapping is not found, %d", id)
+ }
+ if !exist && rawdb.ReadStateID(db, root) != nil {
+ t.Fatalf("Unexpected root->ID mapping, %d", id)
+ }
+}
+
+func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer *rawdb.ResettableFreezer, from, to uint64, roots []common.Hash, exist bool) {
+ for i, j := from, 0; i <= to; i, j = i+1, j+1 {
+ checkHistory(t, db, freezer, i, roots[j], exist)
+ }
+}
+
+func TestTruncateHeadHistory(t *testing.T) {
+ var (
+ roots []common.Hash
+ hs = makeHistories(SeedingHistory)
+ db = rawdb.NewMemoryDatabase()
+ freezer, _ = openFreezer(t.TempDir(), false)
+ )
+ defer freezer.Close()
+
+ for i := 0; i < len(hs); i++ {
+ accountData, storageData, accountIndex, storageIndex := hs[i].encode()
+ rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
+ roots = append(roots, hs[i].meta.root)
+ }
+ for size := len(hs); size > 0; size-- {
+ pruned, err := truncateFromHead(db, freezer, uint64(size-1))
+ if err != nil {
+ t.Fatalf("Failed to truncate from head %v", err)
+ }
+ if pruned != 1 {
+ t.Error("Unexpected pruned items", "want", 1, "got", pruned)
+ }
+ checkHistoriesInRange(t, db, freezer, uint64(size), uint64(SeedingHistory), roots[size-1:], false)
+ checkHistoriesInRange(t, db, freezer, uint64(1), uint64(size-1), roots[:size-1], true)
+ }
+}
+
+/*
+Create n histories, write them to the freezer, and start truncate from the tail one by one.
+*/
+func TestTruncateTailHistory(t *testing.T) {
+ var (
+ roots []common.Hash
+ hs = makeHistories(SeedingHistory)
+ db = rawdb.NewMemoryDatabase()
+ freezer, err = openFreezer(t.TempDir(), false)
+ )
+ if err != nil {
+ t.Fatalf("Failed to open freezer %v", err)
+ }
+ defer freezer.Close()
+
+ for i := 0; i < len(hs); i++ {
+ accountData, storageData, accountIndex, storageIndex := hs[i].encode()
+ // append i-th history to the freezer.
+ rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ // Update the root->ID mapping in KeyValue database.
+ rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
+ roots = append(roots, hs[i].meta.root)
+ }
+ // truncate from the tail one by one, 1, 2, 3, ..., n-1.
+ for newTail := 1; newTail < len(hs); newTail++ {
+ pruned, _ := truncateFromTail(db, freezer, uint64(newTail))
+ if pruned != 1 {
+ t.Error("Unexpected pruned items", "want", 1, "got", pruned)
+ }
+ // Check this range should not be existed from 1 to newTail.
+ checkHistoriesInRange(t, db, freezer, uint64(1), uint64(newTail), roots[:newTail], false)
+ // Check this range should be existed from newTail+1 to SeedingHistory.
+ checkHistoriesInRange(t, db, freezer, uint64(newTail+1), uint64(SeedingHistory), roots[newTail:], true)
+ }
+}
+
+func TestTruncateTailHistories(t *testing.T) {
+ var cases = []struct {
+ limit uint64
+ expectedPruned int
+ maxPruned uint64
+ minUnprunedOffset uint64
+ empty bool
+ }{
+ {
+ 1, 9, 9, 10, false,
+ },
+ {
+ 0, 10, 10, 0 /* no meaning */, true,
+ },
+ {
+ 10, 0, 0, 1, false,
+ },
+ }
+
+ for i, c := range cases {
+ var (
+ roots []common.Hash
+ hs = makeHistories(SeedingHistory)
+ db = rawdb.NewMemoryDatabase()
+ freezer, err = openFreezer(t.TempDir()+fmt.Sprintf("%d", i), false)
+ )
+ if err != nil {
+ t.Fatalf("Failed to open freezer %v", err)
+ }
+ defer freezer.Close()
+
+ // Write SeedingHistory histories to the freezer.
+ for i := 0; i < len(hs); i++ {
+ accountData, storageData, accountIndex, storageIndex := hs[i].encode()
+ rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
+ roots = append(roots, hs[i].meta.root)
+ }
+ // Truncate from the tail, In this case, we truncate a range of histories.
+ tail := SeedingHistory - int(c.limit)
+ pruned, _ := truncateFromTail(db, freezer, uint64(tail))
+ if pruned != c.expectedPruned {
+ t.Error("Unexpected pruned items", "want", c.expectedPruned, "got", pruned)
+ }
+ // In case of empty, jus make sure the range is truncated.
+ if c.empty {
+ checkHistoriesInRange(t, db, freezer, uint64(1), uint64(SeedingHistory), roots, false)
+ } else {
+ checkHistoriesInRange(t, db, freezer, uint64(1), c.maxPruned, roots[:tail], false)
+ checkHistoriesInRange(t, db, freezer, c.minUnprunedOffset, uint64(SeedingHistory), roots[tail:], true)
+ }
+ }
+}
+
+// openFreezer initializes the freezer instance for storing state histories.
+func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) {
+ return rawdb.NewStateHistoryFreezer(datadir, readOnly)
+}
+
+func compareSet[k comparable](a, b map[k][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for key, valA := range a {
+ valB, ok := b[key]
+ if !ok {
+ return false
+ }
+ if !bytes.Equal(valA, valB) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareList[k comparable](a, b []k) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for h, subA := range a {
+ subB, ok := b[h]
+ if !ok {
+ return false
+ }
+ if !compareSet(subA, subB) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareStorageList(a, b map[common.Address][]common.Hash) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for h, la := range a {
+ lb, ok := b[h]
+ if !ok {
+ return false
+ }
+ if !compareList(la, lb) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go
new file mode 100644
index 0000000000..f6e1854dee
--- /dev/null
+++ b/trie/triedb/pathdb/journal.go
@@ -0,0 +1,401 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+var (
+ errMissJournal = errors.New("journal not found")
+ errMissVersion = errors.New("version not found")
+ errUnexpectedVersion = errors.New("unexpected journal version")
+ errMissDiskRoot = errors.New("disk layer root not found")
+ errUnmatchedJournal = errors.New("unmatched journal")
+)
+
+const journalVersion uint64 = 0
+
+// journalNode represents a trie node persisted in the journal.
+type journalNode struct {
+ Path []byte // Path of node in the trie
+ Blob []byte // RLP-encoded trie node blob, nil means the node is deleted
+}
+
+/* Single account */
+
+// journalNodes represents a list trie nodes belong to a single account
+// or the main account trie.
+type journalNodes struct {
+ Owner common.Hash
+ Nodes []journalNode
+}
+
+// journalStorage represents a list of storage slots belong to an account.
+type journalStorage struct {
+ Incomplete bool
+ Account common.Address
+ Hashes []common.Hash
+ Slots [][]byte
+}
+
+// journalAccounts represents a list accounts belong to the layer.
+type journalAccounts struct {
+ Addresses []common.Address
+ Accounts [][]byte
+}
+
+// loadDiskLayer reads the binary blob from the layer journal, reconstructing
+// a new disk layer on it.
+func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
+ // Resolve disk layer root
+ var root common.Hash
+
+ if err := r.Decode(&root); err != nil {
+ return nil, fmt.Errorf("load disk root: %v", err)
+ }
+ // Resolve the state id of disk layer, it can be different
+ // with the persistent id tracked in disk, the id distance
+ // is the number of transitions aggregated in disk layer.
+ var id uint64
+ if err := r.Decode(&id); err != nil {
+ return nil, fmt.Errorf("load state id: %v", err)
+ }
+ // get the persistent state id from the disk
+ stored := rawdb.ReadPersistentStateID(db.diskdb)
+ if stored > id {
+ return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id)
+ }
+ // Resolve nodes cached in node buffer
+ var encoded []journalNodes
+ if err := r.Decode(&encoded); err != nil {
+ return nil, fmt.Errorf("load disk nodes: %v", err)
+ }
+ /*
+ { "ownerHash": {"path": trieNode}}
+ */
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ for _, entry := range encoded {
+ subset := make(map[string]*trienode.Node)
+ for _, n := range entry.Nodes {
+ isLive := (len(n.Blob) > 0)
+ if isLive {
+ subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob)
+ } else {
+ subset[string(n.Path)] = trienode.NewDeleted()
+ }
+ nodes[entry.Owner] = subset
+ }
+ }
+ // Calculate the internal state transitions by id difference.
+ base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored))
+ return base, nil
+}
+
+// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
+// diff and verifying that it can be linked to the requested parent.
+// It will get the base from diskfirstly, then load next diff layer from the former.
+func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
+ // Read the next diff journal entry
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ // The first read may fail with EOF, marking the end of the journal
+ if err == io.EOF {
+ return parent, nil
+ }
+ return nil, fmt.Errorf("load diff root: %v", err)
+ }
+ var block uint64
+ if err := r.Decode(&block); err != nil {
+ return nil, fmt.Errorf("load block number: %v", err)
+ }
+ // Read in-memory trie nodes from journal
+ var encoded []journalNodes
+ if err := r.Decode(&encoded); err != nil {
+ return nil, fmt.Errorf("load diff nodes: %v", err)
+ }
+ /*
+ { "ownerHash": {"path": trieNode}}
+ */
+ nodes := make(map[common.Hash]map[string]*trienode.Node)
+ for _, entry := range encoded {
+ subset := make(map[string]*trienode.Node)
+ for _, n := range entry.Nodes {
+ isLive := (len(n.Blob) > 0)
+ if isLive {
+ subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob)
+ } else {
+ subset[string(n.Path)] = trienode.NewDeleted()
+ }
+ nodes[entry.Owner] = subset
+ }
+ }
+
+ // Read state changes from journal
+ var (
+ jaccounts journalAccounts
+ jstorages []journalStorage
+ accounts = make(map[common.Address][]byte)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ incomplete = make(map[common.Address]struct{})
+ )
+ // Read the account changes from the journal, changes in one layer.
+ if err := r.Decode(&jaccounts); err != nil {
+ return nil, fmt.Errorf("load diff accounts: %v", err)
+ }
+ for i, addr := range jaccounts.Addresses {
+ accounts[addr] = jaccounts.Accounts[i]
+ }
+ if err := r.Decode(&jstorages); err != nil {
+ return nil, fmt.Errorf("load diff storages: %v", err)
+ }
+
+ for _, entry := range jstorages {
+ set := make(map[common.Hash][]byte)
+ for i, h := range entry.Hashes {
+ hasStorgeSlot := len(entry.Slots[i]) > 0
+ if hasStorgeSlot {
+ set[h] = entry.Slots[i]
+ } else {
+ set[h] = nil
+ }
+ }
+ if entry.Incomplete {
+ incomplete[entry.Account] = struct{}{}
+ }
+ storages[entry.Account] = set
+ }
+ // Recursively load the next diff layer until reaching the end of the journal.
+ return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r)
+}
+
+// loadJournal tries to parse the layer journal from the disk.
+func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
+ // Read the journal raw data from the disk
+ journal := rawdb.ReadTrieJournal(db.diskdb)
+ if len(journal) == 0 {
+ return nil, errMissJournal
+ }
+ // Construct a RLP stream to decode the journal with nolimit size.
+ r := rlp.NewStream(bytes.NewReader(journal), 0)
+
+ // Firstly, resolve the first element as the journal version
+ version, err := r.Uint64()
+ if err != nil {
+ return nil, errMissVersion
+ }
+ if version != journalVersion {
+ return nil, fmt.Errorf("%w want %d got %d", errUnexpectedVersion, journalVersion, version)
+ }
+ // Secondly, resolve the disk layer root, ensure it's continuous
+ // with disk layer. Note now we can ensure it's the layer journal
+ // correct version, so we expect everything can be resolved properly.
+ var root common.Hash
+ if err := r.Decode(&root); err != nil {
+ return nil, errMissDiskRoot
+ }
+ // The journal is not matched with persistent state, discard them.
+ // It can happen that geth crashes without persisting the journal.
+ if !bytes.Equal(root.Bytes(), diskRoot.Bytes()) {
+ return nil, fmt.Errorf("%w want %x got %x", errUnmatchedJournal, root, diskRoot)
+ }
+
+ // Load the disk layer from the journal
+ base, err := db.loadDiskLayer(r)
+ if err != nil {
+ return nil, err
+ }
+ // Load all the diff layers from the journal (parent, RLP stream)
+ head, err := db.loadDiffLayer(base, r)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("Loaded layer journal", "diskroot", diskRoot, "diffhead", head.rootHash())
+ return head, nil
+}
+
+// loadLayers loads a pre-existing state layer backed by a key-value store.
+// expected head or base.
+func (db *Database) loadLayers() layer {
+ // Retrieve the root node of persistent state.
+ _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ root = types.TrieRootHash(root)
+
+ // Load the layers by resolving the journal
+ head, err := db.loadJournal(root)
+ if err == nil {
+ return head
+ }
+ // journal is not matched(or missing) with the persistent state, discard
+ // it. Display log for discarding journal, but try to avoid showing
+ // useless information when the db is created from scratch.
+ if !(root == types.EmptyRootHash && errors.Is(err, errMissJournal)) {
+ log.Info("Failed to load journal, discard it", "err", err)
+ }
+ // Return single layer with persistent state. (base layer, expected all difflayers has corrupted).
+ return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0))
+
+}
+
+// journal implements the layer interface, marshaling the un-flushed trie nodes
+// along with layer meta data into provided byte buffer.
+func (dl *diskLayer) journal(w io.Writer) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+ if dl.stale {
+ return errSnapshotStale
+ }
+ // Step one, write the disk root into the journal.
+ if err := rlp.Encode(w, dl.root); err != nil {
+ return err
+ }
+ // Step two, write the corresponding state id into the journal
+ if err := rlp.Encode(w, dl.id); err != nil {
+ return err
+ }
+ // Step three, write all unwritten nodes into the journal
+ nodes := make([]journalNodes, 0, len(dl.buffer.nodes))
+ for owner, subset := range dl.buffer.nodes {
+ entry := journalNodes{Owner: owner}
+ for path, node := range subset {
+ entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob})
+ }
+ nodes = append(nodes, entry)
+ }
+ if err := rlp.Encode(w, nodes); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes))
+ return nil
+}
+
+// journal implements the layer interface, writing the memory layer contents
+// into a buffer to be stored in the database as the layer journal.
+func (dl *diffLayer) journal(w io.Writer) error {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+ // journal the parent layer first (n-1)
+ if err := dl.parent.journal(w); err != nil {
+ return err
+ }
+ // Everything below was journaled, persist this layer too ( n )
+ if err := rlp.Encode(w, dl.root); err != nil {
+ return err
+ }
+ if err := rlp.Encode(w, dl.block); err != nil {
+ return err
+ }
+ // Write the accumulated trie nodes into buffer
+ nodes := make([]journalNodes, 0, len(dl.nodes))
+ for owner, subset := range dl.nodes {
+ entry := journalNodes{Owner: owner}
+ for path, node := range subset {
+ entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob})
+ }
+ nodes = append(nodes, entry)
+ }
+ if err := rlp.Encode(w, nodes); err != nil {
+ return err
+ }
+ // Write the accumulated state changes into buffer
+ var jacct journalAccounts
+ for addr, account := range dl.states.Accounts {
+ jacct.Addresses = append(jacct.Addresses, addr)
+ jacct.Accounts = append(jacct.Accounts, account)
+ }
+ if err := rlp.Encode(w, jacct); err != nil {
+ return err
+ }
+ storage := make([]journalStorage, 0, len(dl.states.Storages))
+ for addr, slots := range dl.states.Storages {
+ entry := journalStorage{Account: addr}
+ // If the storage is incomplete, mark it as such
+ if _, ok := dl.states.Incomplete[addr]; ok {
+ entry.Incomplete = true
+ }
+ for slotHash, slot := range slots {
+ entry.Hashes = append(entry.Hashes, slotHash)
+ entry.Slots = append(entry.Slots, slot)
+ }
+ storage = append(storage, entry)
+ }
+ if err := rlp.Encode(w, storage); err != nil {
+ return err
+ }
+ log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes))
+ return nil
+}
+
+// Journal commits an entire diff hierarchy to disk into a single journal entry.
+// This is meant to be used during shutdown to persist the layer without
+// flattening everything down (bad for reorgs). And this function will mark the
+// database as read-only to prevent all following mutation to disk.
+func (db *Database) Journal(root common.Hash) error {
+ // Retrieve the head layer to journal from.
+ l := db.tree.get(root)
+ if l == nil {
+ return fmt.Errorf("triedb layer [%#x] missing", root)
+ }
+ // Run the journaling
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errSnapshotReadOnly
+ }
+ // Firstly write out the metadata of journal
+ journal := new(bytes.Buffer)
+ // Write the journal version to journal []bytes
+ if err := rlp.Encode(journal, journalVersion); err != nil {
+ return err
+ }
+
+ // The stored state in disk might be empty, convert the
+ // root to emptyRoot in this case.
+ _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ diskroot = types.TrieRootHash(diskroot)
+ // Secondly write out the state root in disk, ensure all layers
+ // on top are continuous with disk.
+ if err := rlp.Encode(journal, diskroot); err != nil {
+ return err
+ }
+
+ // Finally write out the journal of each layer in reverse order. (Resursive journal)
+ if err := l.journal(journal); err != nil {
+ return err
+ }
+ // Store the journal into the database and return
+ rawdb.WriteTrieJournal(db.diskdb, journal.Bytes())
+
+ // Set the db in read only mode to reject all following mutations to disk.
+ db.readOnly = true
+ log.Info("Stored journal in triedb, db is Readonly mode now.", "disk", diskroot, "size", common.StorageSize(journal.Len()))
+ return nil
+}
diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go
new file mode 100644
index 0000000000..9352fa3f89
--- /dev/null
+++ b/trie/triedb/pathdb/layertree.go
@@ -0,0 +1,214 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+)
+
+// layerTree is a group of state layers identified by the state root.
+// This structure defines a few basic operations for manipulating
+// state layers linked with each other in a tree structure. It's
+// thread-safe to use. However, callers need to ensure the thread-safety
+// of the referenced layer by themselves.
+type layerTree struct {
+ lock sync.RWMutex
+ layers map[common.Hash]layer
+}
+
+// newLayerTree constructs the layerTree with the given head layer.
+func newLayerTree(head layer) *layerTree {
+ tree := new(layerTree)
+ tree.reset(head)
+ return tree
+}
+
+// reset initializes the layerTree by the given head layer.
+// All the ancestors will be iterated out and linked in the tree.
+func (tree *layerTree) reset(head layer) {
+ tree.lock.Lock()
+ defer tree.lock.Unlock()
+
+ var layers = make(map[common.Hash]layer)
+ for head != nil {
+ layers[head.rootHash()] = head
+ head = head.parentLayer()
+ }
+ tree.layers = layers
+}
+
+// get retrieves a layer belonging to the given state root.
+func (tree *layerTree) get(root common.Hash) layer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ return tree.layers[types.TrieRootHash(root)]
+}
+
+// forEach iterates the stored layers inside and applies the
+// given callback on them.
+func (tree *layerTree) forEach(onLayer func(layer)) {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ for _, layer := range tree.layers {
+ onLayer(layer)
+ }
+}
+
+// len returns the number of layers cached.
+func (tree *layerTree) len() int {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ return len(tree.layers)
+}
+
+// add inserts a new layer into the tree if it can be linked to an existing old parent.
+func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
+ // Reject noop updates to avoid self-loops. This is a special case that can
+ // happen for clique networks and proof-of-stake networks where empty blocks
+ // don't modify the state (0 block subsidy).
+ //
+ // Although we could silently ignore this internally, it should be the caller's
+ // responsibility to avoid even attempting to insert such a layer.
+ root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot)
+ if root == parentRoot {
+ return errors.New("layer cycle")
+ }
+ parent := tree.get(parentRoot)
+ if parent == nil {
+ return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot)
+ }
+ l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states)
+
+ tree.lock.Lock()
+ tree.layers[l.rootHash()] = l
+ tree.lock.Unlock()
+ return nil
+}
+
+// cap traverses downwards the diff tree until the number of allowed diff layers
+// are crossed. All diffs beyond the permitted number are flattened downwards.
+func (tree *layerTree) cap(root common.Hash, layers int) error {
+ // Retrieve the head layer to cap from
+ root = types.TrieRootHash(root)
+ l := tree.get(root)
+ if l == nil {
+ return fmt.Errorf("triedb layer [%#x] missing", root)
+ }
+ diff, ok := l.(*diffLayer)
+ if !ok {
+ return fmt.Errorf("triedb layer [%#x] is disk layer", root)
+ }
+ tree.lock.Lock()
+ defer tree.lock.Unlock()
+
+ // If full commit was requested, flatten the diffs and merge onto disk
+ if layers == 0 {
+ base, err := diff.persist(true)
+ if err != nil {
+ return err
+ }
+ // Replace the entire layer tree with the flat base
+ tree.layers = map[common.Hash]layer{base.rootHash(): base}
+ return nil
+ }
+ // Dive until we run out of layers or reach the persistent database
+ for i := 0; i < layers-1; i++ {
+ // If we still have diff layers below, continue down
+ if parent, ok := diff.parentLayer().(*diffLayer); ok {
+ diff = parent
+ } else {
+ // Diff stack too shallow, return without modifications
+ return nil
+ }
+ }
+ // We're out of layers, flatten anything below, stopping if it's the disk or if
+ // the memory limit is not yet exceeded.
+ switch parent := diff.parentLayer().(type) {
+ case *diskLayer:
+ return nil
+
+ case *diffLayer:
+ // Hold the lock to prevent any read operations until the new
+ // parent is linked correctly.
+ diff.lock.Lock()
+
+ base, err := parent.persist(false)
+ if err != nil {
+ diff.lock.Unlock()
+ return err
+ }
+ tree.layers[base.rootHash()] = base
+ diff.parent = base
+
+ diff.lock.Unlock()
+
+ default:
+ panic(fmt.Sprintf("unknown data layer in triedb: %T", parent))
+ }
+ // Remove any layer that is stale or links into a stale layer
+ children := make(map[common.Hash][]common.Hash)
+ for root, layer := range tree.layers {
+ if dl, ok := layer.(*diffLayer); ok {
+ parent := dl.parentLayer().rootHash()
+ children[parent] = append(children[parent], root)
+ }
+ }
+ var remove func(root common.Hash)
+ remove = func(root common.Hash) {
+ delete(tree.layers, root)
+ for _, child := range children[root] {
+ remove(child)
+ }
+ delete(children, root)
+ }
+ for root, layer := range tree.layers {
+ if dl, ok := layer.(*diskLayer); ok && dl.isStale() {
+ remove(root)
+ }
+ }
+ return nil
+}
+
+// bottom returns the bottom-most disk layer in this tree.
+func (tree *layerTree) bottom() *diskLayer {
+ tree.lock.RLock()
+ defer tree.lock.RUnlock()
+
+ if len(tree.layers) == 0 {
+ return nil // Shouldn't happen, empty tree
+ }
+ // pick a random one as the entry point
+ var current layer
+ for _, layer := range tree.layers {
+ current = layer
+ break
+ }
+ for current.parentLayer() != nil {
+ current = current.parentLayer()
+ }
+ return current.(*diskLayer)
+}
diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go
new file mode 100644
index 0000000000..9e2b1dcbf5
--- /dev/null
+++ b/trie/triedb/pathdb/metrics.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package pathdb
+
+import "github.com/ethereum/go-ethereum/metrics"
+
+var (
+ cleanHitMeter = metrics.NewRegisteredMeter("pathdb/clean/hit", nil)
+ cleanMissMeter = metrics.NewRegisteredMeter("pathdb/clean/miss", nil)
+ cleanReadMeter = metrics.NewRegisteredMeter("pathdb/clean/read", nil)
+ cleanWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/write", nil)
+
+ dirtyHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/hit", nil)
+ dirtyMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/miss", nil)
+ dirtyReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/read", nil)
+ dirtyWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/write", nil)
+ dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
+ dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
+ diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
+
+ commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
+ commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
+ commitBytesMeter = metrics.NewRegisteredMeter("pathdb/commit/bytes", nil)
+
+ gcNodesMeter = metrics.NewRegisteredMeter("pathdb/gc/nodes", nil)
+ gcBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/bytes", nil)
+
+ diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil)
+ diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil)
+
+ historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
+ historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
+ historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil)
+)
diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go
new file mode 100644
index 0000000000..b024986d3a
--- /dev/null
+++ b/trie/triedb/pathdb/nodebuffer.go
@@ -0,0 +1,276 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
+// nodebuffer is a collection of modified trie nodes to aggregate the disk write.
+// The content of the nodebuffer must be checked before diving into disk (since it basically is not-yet-written data).
+//
+// nodebuffer serves as an intermediate layer for storing changes before flushing to disk,
+// making it easier for the diskLayer to manage the changes and construct new diskLayers when flushing.
+type nodebuffer struct {
+ layers uint64 // The number of diff layers aggregated inside
+ size uint64 // The size of aggregated writes
+ limit uint64 // The maximum memory allowance in bytes
+ nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path
+}
+
+// newNodeBuffer initializes the node buffer with the provided nodes.
+func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer {
+ if nodes == nil {
+ nodes = make(map[common.Hash]map[string]*trienode.Node)
+ }
+ var size uint64
+ for _, subset := range nodes {
+ for path, n := range subset {
+ size += uint64(len(n.Blob) + len(path))
+ }
+ }
+ return &nodebuffer{
+ layers: layers,
+ nodes: nodes,
+ size: size,
+ limit: uint64(limit),
+ }
+}
+
+// node retrieves the trie node with given node info.
+func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) {
+ subset, ok := b.nodes[owner]
+ if !ok {
+ return nil, nil
+ }
+ n, ok := subset[string(path)]
+ if !ok {
+ return nil, nil
+ }
+ if n.Hash != hash {
+ dirtyFalseMeter.Mark(1)
+ log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash)
+ return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path)
+ }
+ return n, nil
+}
+
+// commit merges the dirty nodes into the nodebuffer. This operation won't take
+// the ownership of the nodes map which belongs to the bottom-most diff layer.
+// It will just hold the node references from the given map which are safe to copy.
+func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer {
+ var (
+ delta int64
+ overwrite int64
+ overwriteSize int64
+ )
+ for owner, subset := range nodes {
+ current, exist := b.nodes[owner]
+ if !exist {
+ // Allocate a new map for the subset instead of claiming it directly
+ // from the passed map to avoid potential concurrent map read/write.
+ // The nodes belong to original diff layer are still accessible even
+ // after merging, thus the ownership of nodes map should still belong
+ // to original layer and any mutation on it should be prevented.
+ current = make(map[string]*trienode.Node)
+ for path, n := range subset {
+ current[path] = n
+ delta += int64(len(n.Blob) + len(path))
+ }
+ b.nodes[owner] = current
+ continue
+ }
+ for path, n := range subset {
+ if orig, exist := current[path]; !exist {
+ delta += int64(len(n.Blob) + len(path))
+ } else {
+ delta += int64(len(n.Blob) - len(orig.Blob))
+ overwrite++
+ overwriteSize += int64(len(orig.Blob) + len(path))
+ }
+ current[path] = n
+ }
+ b.nodes[owner] = current
+ }
+ b.updateSize(delta)
+ b.layers++
+ gcNodesMeter.Mark(overwrite)
+ gcBytesMeter.Mark(overwriteSize)
+ return b
+}
+
+// revert is the reverse operation of commit. It also merges the provided nodes
+// into the nodebuffer, the difference is that the provided node set should
+// revert the changes made by the last state transition.
+func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
+ // Short circuit if no embedded state transition to revert.
+ if b.layers == 0 {
+ return errStateUnrecoverable
+ }
+ b.layers--
+
+ // Reset the entire buffer if only a single transition left.
+ if b.layers == 0 {
+ b.reset()
+ return nil
+ }
+ var delta int64
+ for owner, subset := range nodes {
+ current, ok := b.nodes[owner]
+ if !ok {
+ panic(fmt.Sprintf("non-existent subset (%x)", owner))
+ }
+ for path, n := range subset {
+ orig, ok := current[path]
+ if !ok {
+ // There is a special case in MPT that one child is removed from
+ // a fullNode which only has two children, and then a new child
+ // with different position is immediately inserted into the fullNode.
+ // In this case, the clean child of the fullNode will also be
+ // marked as dirty because of node collapse and expansion.
+ //
+ // In case of database rollback, don't panic if this "clean"
+ // node occurs which is not present in buffer.
+ var nhash common.Hash
+ if owner == (common.Hash{}) {
+ _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path))
+ } else {
+ _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path))
+ }
+ // Ignore the clean node in the case described above.
+ if nhash == n.Hash {
+ continue
+ }
+ panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex()))
+ }
+ current[path] = n
+ delta += int64(len(n.Blob)) - int64(len(orig.Blob))
+ }
+ }
+ b.updateSize(delta)
+ return nil
+}
+
+// updateSize updates the total cache size by the given delta.
+func (b *nodebuffer) updateSize(delta int64) {
+ size := int64(b.size) + delta
+ if size >= 0 {
+ b.size = uint64(size)
+ return
+ }
+ s := b.size
+ b.size = 0
+ log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta))
+}
+
+// reset cleans up the disk cache.
+func (b *nodebuffer) reset() {
+ b.layers = 0
+ b.size = 0
+ b.nodes = make(map[common.Hash]map[string]*trienode.Node)
+}
+
+// empty returns an indicator if nodebuffer contains any state transition inside.
+func (b *nodebuffer) empty() bool {
+ return b.layers == 0
+}
+
+// setSize sets the buffer size to the provided number, and invokes a flush
+// operation if the current memory usage exceeds the new limit.
+func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error {
+ b.limit = uint64(size)
+ return b.flush(db, clean, id, false)
+}
+
+// flush persists the in-memory dirty trie node into the disk if the configured
+// memory threshold is reached. Note, all data must be written atomically.
+func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error {
+ if b.size <= b.limit && !force {
+ return nil
+ }
+ // Ensure the target state id is aligned with the internal counter.
+ head := rawdb.ReadPersistentStateID(db)
+ if head+b.layers != id {
+ return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id)
+ }
+ var (
+ start = time.Now()
+ batch = db.NewBatchWithSize(int(b.size))
+ )
+ nodes := writeNodes(batch, b.nodes, clean)
+ rawdb.WritePersistentStateID(batch, id)
+
+ // Flush all mutations in a single batch
+ size := batch.ValueSize()
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ commitBytesMeter.Mark(int64(size))
+ commitNodesMeter.Mark(int64(nodes))
+ commitTimeTimer.UpdateSince(start)
+ log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start)))
+ b.reset()
+ return nil
+}
+
+// writeNodes writes the trie nodes into the provided database batch.
+// Note this function will also inject all the newly written nodes
+// into clean cache.
+func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) {
+ for owner, subset := range nodes {
+ for path, n := range subset {
+ if n.IsDeleted() {
+ if owner == (common.Hash{}) {
+ rawdb.DeleteAccountTrieNode(batch, []byte(path))
+ } else {
+ rawdb.DeleteStorageTrieNode(batch, owner, []byte(path))
+ }
+ if clean != nil {
+ clean.Del(cacheKey(owner, []byte(path)))
+ }
+ } else {
+ if owner == (common.Hash{}) {
+ rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob)
+ } else {
+ rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob)
+ }
+ if clean != nil {
+ clean.Set(cacheKey(owner, []byte(path)), n.Blob)
+ }
+ }
+ }
+ total += len(subset)
+ }
+ return total
+}
+
+// cacheKey constructs the unique key of clean cache.
+func cacheKey(owner common.Hash, path []byte) []byte {
+ if owner == (common.Hash{}) {
+ return path
+ }
+ return append(owner.Bytes(), path...)
+}
diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go
new file mode 100644
index 0000000000..3feb6217d2
--- /dev/null
+++ b/trie/triedb/pathdb/testutils.go
@@ -0,0 +1,157 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/trie/triestate"
+ "golang.org/x/exp/slices"
+)
+
+// testHasher is a test utility for computing root hash of a batch of state
+// elements. The hash algorithm is to sort all the elements in lexicographical
+// order, concat the key and value in turn, and perform hash calculation on
+// the concatenated bytes. Except the root hash, a nodeset will be returned
+// once Commit is called, which contains all the changes made to hasher.
+type testHasher struct {
+ owner common.Hash // owner identifier
+ root common.Hash // original root
+ dirties map[common.Hash][]byte // dirty states
+ cleans map[common.Hash][]byte // clean states
+}
+
+// newTestHasher constructs a hasher object with provided states.
+func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][]byte) (*testHasher, error) {
+ if cleans == nil {
+ cleans = make(map[common.Hash][]byte)
+ }
+ if got, _ := hash(cleans); got != root {
+ return nil, fmt.Errorf("state root mismatched, want: %x, got: %x", root, got)
+ }
+ return &testHasher{
+ owner: owner,
+ root: root,
+ dirties: make(map[common.Hash][]byte),
+ cleans: cleans,
+ }, nil
+}
+
+// Get returns the value for key stored in the trie.
+func (h *testHasher) Get(key []byte) ([]byte, error) {
+ hash := common.BytesToHash(key)
+ val, ok := h.dirties[hash]
+ if ok {
+ return val, nil
+ }
+ return h.cleans[hash], nil
+}
+
+// Update associates key with value in the trie.
+func (h *testHasher) Update(key, value []byte) error {
+ h.dirties[common.BytesToHash(key)] = common.CopyBytes(value)
+ return nil
+}
+
+// Delete removes any existing value for key from the trie.
+func (h *testHasher) Delete(key []byte) error {
+ h.dirties[common.BytesToHash(key)] = nil
+ return nil
+}
+
+// Commit computes the new hash of the states and returns the set with all
+// state changes.
+func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+ var (
+ nodes = make(map[common.Hash][]byte)
+ set = trienode.NewNodeSet(h.owner)
+ )
+ for hash, val := range h.cleans {
+ nodes[hash] = val
+ }
+ for hash, val := range h.dirties {
+ nodes[hash] = val
+ if bytes.Equal(val, h.cleans[hash]) {
+ continue
+ }
+ if len(val) == 0 {
+ set.AddNode(hash.Bytes(), trienode.NewDeleted())
+ } else {
+ set.AddNode(hash.Bytes(), trienode.New(crypto.Keccak256Hash(val), val))
+ }
+ }
+ root, blob := hash(nodes)
+
+ // Include the dirty root node as well.
+ if root != types.EmptyRootHash && root != h.root {
+ set.AddNode(nil, trienode.New(root, blob))
+ }
+ if root == types.EmptyRootHash && h.root != types.EmptyRootHash {
+ set.AddNode(nil, trienode.NewDeleted())
+ }
+ return root, set
+}
+
+// hash performs the hash computation upon the provided states.
+func hash(states map[common.Hash][]byte) (common.Hash, []byte) {
+ var hs []common.Hash
+ for hash := range states {
+ hs = append(hs, hash)
+ }
+ // Sort hashes.
+ slices.SortFunc(hs, common.Hash.Cmp)
+
+ var input []byte
+ for _, hash := range hs {
+ if len(states[hash]) == 0 {
+ continue
+ }
+ input = append(input, hash.Bytes()...)
+ input = append(input, states[hash]...)
+ }
+ if len(input) == 0 {
+ return types.EmptyRootHash, nil
+ }
+ return crypto.Keccak256Hash(input), input
+}
+
+type hashLoader struct {
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
+}
+
+func newHashLoader(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *hashLoader {
+ return &hashLoader{
+ accounts: accounts,
+ storages: storages,
+ }
+}
+
+// OpenTrie opens the main account trie.
+func (l *hashLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
+ return newTestHasher(common.Hash{}, root, l.accounts)
+}
+
+// OpenStorageTrie opens the storage trie of an account.
+func (l *hashLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
+ return newTestHasher(addrHash, root, l.storages[addrHash])
+}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index ddadbbf371..bffa5a90f1 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -42,35 +42,13 @@ func (n *Node) IsDeleted() bool {
return n.Hash == (common.Hash{})
}
-// NodeWithPrev wraps the Node with the previous node value attached.
-type NodeWithPrev struct {
- *Node
- Prev []byte // Encoded original value, nil means it's non-existent
-}
-
-// Unwrap returns the internal Node object.
-func (n *NodeWithPrev) Unwrap() *Node {
- return n.Node
-}
-
-// Size returns the total memory size used by this node. It overloads
-// the function in Node by counting the size of previous value as well.
-func (n *NodeWithPrev) Size() int {
- return n.Node.Size() + len(n.Prev)
-}
-
// New constructs a node with provided node information.
func New(hash common.Hash, blob []byte) *Node {
return &Node{Hash: hash, Blob: blob}
}
-// NewNodeWithPrev constructs a node with provided node information.
-func NewNodeWithPrev(hash common.Hash, blob []byte, prev []byte) *NodeWithPrev {
- return &NodeWithPrev{
- Node: New(hash, blob),
- Prev: prev,
- }
-}
+// NewDeleted constructs a node which is deleted.
+func NewDeleted() *Node { return New(common.Hash{}, nil) }
// leaf represents a trie leaf node
type leaf struct {
@@ -83,7 +61,7 @@ type leaf struct {
type NodeSet struct {
Owner common.Hash
Leaves []*leaf
- Nodes map[string]*NodeWithPrev
+ Nodes map[string]*Node
updates int // the count of updated and inserted nodes
deletes int // the count of deleted nodes
}
@@ -93,7 +71,7 @@ type NodeSet struct {
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
Owner: owner,
- Nodes: make(map[string]*NodeWithPrev),
+ Nodes: make(map[string]*Node),
}
}
@@ -107,12 +85,12 @@ func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
// Bottom-up, longest path first
sort.Sort(sort.Reverse(paths))
for _, path := range paths {
- callback(path, set.Nodes[path].Unwrap())
+ callback(path, set.Nodes[path])
}
}
// AddNode adds the provided node into set.
-func (set *NodeSet) AddNode(path []byte, n *NodeWithPrev) {
+func (set *NodeSet) AddNode(path []byte, n *Node) {
if n.IsDeleted() {
set.deletes += 1
} else {
@@ -122,7 +100,7 @@ func (set *NodeSet) AddNode(path []byte, n *NodeWithPrev) {
}
// Merge adds a set of nodes into the set.
-func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*NodeWithPrev) error {
+func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
if set.Owner != owner {
return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
}
@@ -167,17 +145,13 @@ func (set *NodeSet) Summary() string {
if set.Nodes != nil {
for path, n := range set.Nodes {
// Deletion
+
if n.IsDeleted() {
- fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev)
- continue
- }
- // Insertion
- if len(n.Prev) == 0 {
- fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash)
+ fmt.Fprintf(out, " [-]: %x\n", path)
continue
}
- // Update
- fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev)
+ // Insertion or update
+ fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
}
}
for _, n := range set.Leaves {
@@ -213,3 +187,12 @@ func (set *MergedNodeSet) Merge(other *NodeSet) error {
set.Sets[other.Owner] = other
return nil
}
+
+// Flatten returns a two-dimensional map for internal nodes.
+func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node {
+ nodes := make(map[common.Hash]map[string]*Node)
+ for owner, set := range set.Sets {
+ nodes[owner] = set.Nodes
+ }
+ return nodes
+}
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
index e5d0b87cb7..0e00b67d78 100644
--- a/trie/triestate/state.go
+++ b/trie/triestate/state.go
@@ -16,13 +16,254 @@
package triestate
-import "github.com/ethereum/go-ethereum/common"
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "golang.org/x/crypto/sha3"
+)
+
+// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia
+
+// tree or Verkle tree.
+type Trie interface {
+ // Get returns the value for key stored in the trie.
+ Get(key []byte) ([]byte, error)
+
+ // Update associates key with value in the trie.
+ Update(key, value []byte) error
+
+ // Delete removes any existing value for key from the trie.
+ Delete(key []byte) error
+
+ // Commit the trie and returns a set of dirty nodes generated along with
+ // the new root hash.
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+}
+
+// TrieLoader wraps functions to load tries.
+type TrieLoader interface {
+ // OpenTrie opens the main account trie.
+ OpenTrie(root common.Hash) (Trie, error)
+
+ // OpenStorageTrie opens the storage trie of an account.
+ OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error)
+}
// Set represents a collection of mutated states during a state transition.
// The value refers to the original content of state before the transition
// is made. Nil means that the state was not present previously.
type Set struct {
- Accounts map[common.Hash][]byte // Mutated account set, nil means the account was not present
- Storages map[common.Hash]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
- Incomplete map[common.Hash]struct{} // Indicator whether the storage slot is incomplete due to large deletion
+ Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
+ Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
+ Incomplete map[common.Address]struct{} // Indicator whether the storage slot is incomplete due to large deletion
+ size common.StorageSize // Approximate size of set
+}
+
+// New constructs the state set with provided data.
+func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set {
+
+ return &Set{
+ Accounts: accounts,
+ Storages: storages,
+ Incomplete: incomplete,
+ }
+}
+
+// Size returns the approximate memory size occupied by the set.
+func (s *Set) Size() common.StorageSize {
+ if s.size != 0 {
+ return s.size
+ }
+ for _, account := range s.Accounts {
+ s.size += common.StorageSize(common.AddressLength + len(account))
+ }
+ for _, slots := range s.Storages {
+ for _, val := range slots {
+ s.size += common.StorageSize(common.HashLength + len(val))
+ }
+ s.size += common.StorageSize(common.AddressLength)
+ }
+ s.size += common.StorageSize(common.AddressLength * len(s.Incomplete))
+ return s.size
+}
+
+// context wraps all fields for executing state diffs.
+type context struct {
+ prevRoot common.Hash
+ postRoot common.Hash
+ accounts map[common.Address][]byte
+ storages map[common.Address]map[common.Hash][]byte
+ accountTrie Trie
+ nodes *trienode.MergedNodeSet
+}
+
+// Apply traverses the provided state diffs, apply them in the associated
+// post-state and return the generated dirty trie nodes. The state can be
+// loaded via the provided trie loader.
+func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) {
+ tr, err := loader.OpenTrie(postRoot)
+ if err != nil {
+ return nil, err
+ }
+ ctx := &context{
+ prevRoot: prevRoot,
+ postRoot: postRoot,
+ accounts: accounts,
+ storages: storages,
+ accountTrie: tr,
+ nodes: trienode.NewMergedNodeSet(),
+ }
+ for addr, account := range accounts {
+ var err error
+ if len(account) == 0 {
+ err = deleteAccount(ctx, loader, addr)
+ } else {
+ err = updateAccount(ctx, loader, addr)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to revert state, err: %w", err)
+ }
+ }
+ root, result := tr.Commit(false)
+ if root != prevRoot {
+ return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
+ }
+ if err := ctx.nodes.Merge(result); err != nil {
+ return nil, err
+ }
+ return ctx.nodes.Flatten(), nil
+}
+
+// updateAccount the account was present in prev-state, and may or may not
+// existent in post-state. Apply the reverse diff and verify if the storage
+// root matches the one in prev-state account.
+func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
+ // The account was present in prev-state, decode it from the
+ // 'slim-rlp' format bytes.
+ h := newHasher()
+ defer h.release()
+
+ addrHash := h.hash(addr.Bytes())
+ prev, err := types.FullAccount(ctx.accounts[addr])
+ if err != nil {
+ return err
+ }
+ // The account may or may not existent in post-state, try to
+ // load it and decode if it's found.
+ blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ post := types.NewEmptyStateAccount()
+ if len(blob) != 0 {
+ if err := rlp.DecodeBytes(blob, &post); err != nil {
+ return err
+ }
+ }
+ // Apply all storage changes into the post-state storage trie.
+ st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
+ if err != nil {
+ return err
+ }
+ for key, val := range ctx.storages[addr] {
+ var err error
+ if len(val) == 0 {
+ err = st.Delete(key.Bytes())
+ } else {
+ err = st.Update(key.Bytes(), val)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ root, result := st.Commit(false)
+ if root != prev.Root {
+ return errors.New("failed to reset storage trie")
+ }
+ // The returned set can be nil if storage trie is not changed
+ // at all.
+ if result != nil {
+ if err := ctx.nodes.Merge(result); err != nil {
+ return err
+ }
+ }
+ // Write the prev-state account into the main trie
+ full, err := rlp.EncodeToBytes(prev)
+ if err != nil {
+ return err
+ }
+ return ctx.accountTrie.Update(addrHash.Bytes(), full)
+}
+
+// deleteAccount the account was not present in prev-state, and is expected
+// to be existent in post-state. Apply the reverse diff and verify if the
+// account and storage is wiped out correctly.
+func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
+ // The account must be existent in post-state, load the account.
+ h := newHasher()
+ defer h.release()
+
+ addrHash := h.hash(addr.Bytes())
+ blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ if err != nil {
+ return err
+ }
+ if len(blob) == 0 {
+ return fmt.Errorf("account is non-existent %#x", addrHash)
+ }
+ var post types.StateAccount
+ if err := rlp.DecodeBytes(blob, &post); err != nil {
+ return err
+ }
+ st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root)
+ if err != nil {
+ return err
+ }
+ for key, val := range ctx.storages[addr] {
+ if len(val) != 0 {
+ return errors.New("expect storage deletion")
+ }
+ if err := st.Delete(key.Bytes()); err != nil {
+ return err
+ }
+ }
+ root, result := st.Commit(false)
+ if root != types.EmptyRootHash {
+ return errors.New("failed to clear storage trie")
+ }
+ // The returned set can be nil if storage trie is not changed
+ // at all.
+ if result != nil {
+ if err := ctx.nodes.Merge(result); err != nil {
+ return err
+ }
+ }
+ // Delete the post-state account from the main trie.
+ return ctx.accountTrie.Delete(addrHash.Bytes())
+}
+
+// hasher is used to compute the sha256 hash of the provided data.
+type hasher struct{ sha crypto.KeccakState }
+
+var hasherPool = sync.Pool{
+ New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} },
+}
+
+func newHasher() *hasher {
+ return hasherPool.Get().(*hasher)
+}
+
+func (h *hasher) hash(data []byte) common.Hash {
+ return crypto.HashData(h.sha, data)
+}
+
+func (h *hasher) release() {
+ hasherPool.Put(h)
}
From e18619a93f5be45bee3eabd0f1beb6a0da61d8dc Mon Sep 17 00:00:00 2001
From: Francesco4203
Date: Fri, 11 Oct 2024 17:33:48 +0700
Subject: [PATCH 24/41] trie: remove nodes method and add diskdb method for
consistency with pathdb
---
core/state/iterator_test.go | 67 ++++++++++++++++++++++++++--------
trie/database.go | 23 +++++-------
trie/triedb/hashdb/database.go | 14 -------
trie/triedb/pathdb/database.go | 5 +++
4 files changed, 66 insertions(+), 43 deletions(-)
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index b093083db2..24b192c26c 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -17,10 +17,11 @@
package state
import (
- "bytes"
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
)
// Tests that the node iterator indeed walks over the entire database contents.
@@ -40,29 +41,63 @@ func TestNodeIteratorCoverage(t *testing.T) {
hashes[it.Hash] = struct{}{}
}
}
+ // Check in-disk nodes
+ var (
+ seenNodes = make(map[common.Hash]struct{})
+ seenCodes = make(map[common.Hash]struct{})
+ )
+ it := db.NewIterator(nil, nil)
+ for it.Next() {
+ ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value())
+ if !ok {
+ continue
+ }
+ seenNodes[hash] = struct{}{}
+ }
+ it.Release()
+
+ // Check in-disk codes
+ it = db.NewIterator(nil, nil)
+ for it.Next() {
+ ok, hash := rawdb.IsCodeKey(it.Key())
+ if !ok {
+ continue
+ }
+ if _, ok := hashes[common.BytesToHash(hash)]; !ok {
+ t.Errorf("state entry not reported %x", it.Key())
+ }
+ seenCodes[common.BytesToHash(hash)] = struct{}{}
+ }
+ it.Release()
+
// Cross check the iterated hashes and the database/nodepool content
for hash := range hashes {
- if _, err = sdb.TrieDB().Node(hash); err != nil {
- _, err = sdb.ContractCode(common.Hash{}, hash)
+ _, ok := seenNodes[hash]
+ if !ok {
+ _, ok = seenCodes[hash]
}
- if err != nil {
+ if !ok {
t.Errorf("failed to retrieve reported node %x", hash)
}
}
- for _, hash := range sdb.TrieDB().Nodes() {
- if _, ok := hashes[hash]; !ok {
- t.Errorf("state entry not reported %x", hash)
+}
+
+// isTrieNode is a helper function which reports if the provided
+// database entry belongs to a trie node or not.
+func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) {
+ if scheme == rawdb.HashScheme {
+ if rawdb.IsLegacyTrieNode(key, val) {
+ return true, common.BytesToHash(key)
}
- }
- it := db.NewIterator(nil, nil)
- for it.Next() {
- key := it.Key()
- if bytes.HasPrefix(key, []byte("secure-key-")) {
- continue
+ } else {
+ ok, _ := rawdb.IsAccountTrieNode(key)
+ if ok {
+ return true, crypto.Keccak256Hash(val)
}
- if _, ok := hashes[common.BytesToHash(key)]; !ok {
- t.Errorf("state entry not reported %x", key)
+ ok, _, _ = rawdb.IsStorageTrieNode(key)
+ if ok {
+ return true, crypto.Keccak256Hash(val)
}
}
- it.Release()
+ return false, common.Hash{}
}
diff --git a/trie/database.go b/trie/database.go
index 8988859a50..92791a92d8 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
)
@@ -57,11 +58,6 @@ type backend interface {
// everything. Therefore, these maps must not be changed afterwards.
Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error
- // Nodes retrieves the hashes of all the nodes cached within the memory database.
- // This method is extremely expensive and should only be used to validate internal
- // states in test code.
- Nodes() []common.Hash
-
// DiskDB retrieves the persistent storage backing the trie database.
DiskDB() ethdb.KeyValueStore
@@ -120,7 +116,15 @@ func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
// Reader returns a reader for accessing all trie nodes with provided state root.
// Nil is returned in case the state is not available.
func (db *Database) Reader(blockRoot common.Hash) Reader {
- return db.backend.(*hashdb.Database).Reader(blockRoot)
+ switch b := db.backend.(type) {
+ case *hashdb.Database:
+ return b.Reader(blockRoot)
+ case *pathdb.Database:
+ reader, _ := b.Reader(blockRoot)
+ return reader
+ }
+ return nil
+
}
// Update performs a state transition by committing dirty nodes contained in the
@@ -177,13 +181,6 @@ func (db *Database) DiskDB() ethdb.KeyValueStore {
return db.backend.DiskDB()
}
-// Nodes retrieves the hashes of all the nodes cached within the memory database.
-// This method is extremely expensive and should only be used to validate internal
-// states in test code.
-func (db *Database) Nodes() []common.Hash {
- return db.backend.Nodes()
-}
-
// Close flushes the dangling preimages to disk and closes the trie database.
// It is meant to be called when closing the blockchain object, so that all
// resources held can be released correctly.
diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go
index 56cd38699a..8d8701a60b 100644
--- a/trie/triedb/hashdb/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -221,20 +221,6 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
return nil, errors.New("not found")
}
-// Nodes retrieves the hashes of all the nodes cached within the memory database.
-// This method is extremely expensive and should only be used to validate internal
-// states in test code.
-func (db *Database) Nodes() []common.Hash {
- db.lock.RLock()
- defer db.lock.RUnlock()
-
- var hashes = make([]common.Hash, 0, len(db.dirties))
- for hash := range db.dirties {
- hashes = append(hashes, hash)
- }
- return hashes
-}
-
// Reference adds a new reference from a parent node to a child node.
// This function is used to add reference between internal trie node
// and external node(e.g. storage trie root), all internal trie nodes
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
index 48b4744b40..588e477eb0 100644
--- a/trie/triedb/pathdb/database.go
+++ b/trie/triedb/pathdb/database.go
@@ -383,6 +383,11 @@ func (db *Database) SetBufferSize(size int) error {
return db.tree.bottom().setBufferSize(db.bufferSize)
}
+// DiskDB retrieves the persistent storage backing the trie database.
+func (db *Database) DiskDB() ethdb.KeyValueStore {
+ return db.diskdb
+}
+
// Scheme returns the node scheme used in the database.
func (db *Database) Scheme() string {
return rawdb.PathScheme
From 80313597d2b904f78dcf4736e4560b372fb502ec Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Tue, 15 Oct 2024 12:54:04 +0700
Subject: [PATCH 25/41] all: reworkNodeResolver for working with multiple state
schemes with calling ReadTrieNode underlying (#603)
---
cmd/ronin/snapshot.go | 4 +++-
core/state/snapshot/generate.go | 23 ++++++++++++++---------
trie/encoding_test.go | 6 +++---
trie/iterator.go | 29 +++++++++++++++++++----------
4 files changed, 39 insertions(+), 23 deletions(-)
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index e83c4c31ba..0a756ed6da 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -185,6 +185,8 @@ block is used.
}
)
+// Deprecation: this command should be deprecated once the hash-based
+// scheme is deprecated.
func pruneState(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
@@ -392,7 +394,7 @@ func traverseRawState(ctx *cli.Context) error {
node := accIter.Hash()
if node != (common.Hash{}) {
- // Check the present for non-empty hash node(embedded node doesn't
+ // Check the presence for non-empty hash node(embedded node doesn't
// have their own hash).
if !rawdb.HasLegacyTrieNode(chaindb, node) {
log.Error("Missing trie node(account)", "hash", node)
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index a9ea6be54d..80e3d61bba 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -429,20 +429,25 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
// We use the snap data to build up a cache which can be used by the
// main account trie as a primary lookup when resolving hashes
- var snapNodeCache ethdb.Database
+ var resolver trie.NodeResolver
if len(result.keys) > 0 {
- snapNodeCache = rawdb.NewMemoryDatabase()
- snapTrieDb := trie.NewDatabase(snapNodeCache)
- snapTrie := trie.NewEmpty(snapTrieDb)
+ mdb := rawdb.NewMemoryDatabase()
+ tdb := trie.NewDatabase(mdb)
+ snapTrie := trie.NewEmpty(tdb)
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
}
- root, nodes, _ := snapTrie.Commit(false)
- if nodes != nil {
- snapTrieDb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ root, nodes, err := snapTrie.Commit(false)
+
+ if err != nil && nodes != nil {
+ tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ tdb.Commit(root, false)
+ }
+ resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte {
+ return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme()) // Read the TrieNode based on scheme
}
- snapTrieDb.Commit(root, false)
}
+
tr := result.tr
if tr == nil {
tr, err = trie.New(trieID, dl.triedb)
@@ -469,7 +474,7 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
start = time.Now()
internal time.Duration
)
- nodeIt.AddResolver(snapNodeCache)
+ nodeIt.AddResolver(resolver)
for iter.Next() {
if last != nil && bytes.Compare(iter.Key, last) > 0 {
trieMore = true
diff --git a/trie/encoding_test.go b/trie/encoding_test.go
index 16393313f7..e8fe4f3c6b 100644
--- a/trie/encoding_test.go
+++ b/trie/encoding_test.go
@@ -78,17 +78,17 @@ func TestHexKeybytes(t *testing.T) {
}
func TestHexToCompactInPlace(t *testing.T) {
- for i, keyS := range []string{
+ for i, key := range []string{
"00",
"060a040c0f000a090b040803010801010900080d090a0a0d0903000b10",
"10",
} {
- hexBytes, _ := hex.DecodeString(keyS)
+ hexBytes, _ := hex.DecodeString(key)
exp := hexToCompact(hexBytes)
sz := hexToCompactInPlace(hexBytes)
got := hexBytes[:sz]
if !bytes.Equal(exp, got) {
- t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, keyS, got, exp)
+ t.Fatalf("test %d: encoding err\ninp %v\ngot %x\nexp %x\n", i, key, got, exp)
}
}
}
diff --git a/trie/iterator.go b/trie/iterator.go
index 5bd69af3f0..54afb69cee 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -22,10 +22,16 @@ import (
"errors"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
)
+// NodeResolver is used for looking up trie nodes before reaching into the real
+// persistent layer. This is not mandatory, rather is an optimization for cases
+// where trie nodes can be recovered from some external mechanism without reading
+// from disk. In those cases, this resolver allows short circuiting accesses and
+// returning them from memory. (It should be supported for multiple schemes in iterator)
+type NodeResolver func(owner common.Hash, path []byte, hash common.Hash) []byte
+
// Iterator is a key-value trie iterator that traverses a Trie.
type Iterator struct {
nodeIt NodeIterator
@@ -108,8 +114,8 @@ type NodeIterator interface {
// to the value after calling Next.
LeafProof() [][]byte
- // AddResolver sets an intermediate database to use for looking up trie nodes
- // before reaching into the real persistent layer.
+ // AddResolver sets a node resolver to use for looking up trie nodes before
+ // reaching into the real persistent layer.
//
// This is not required for normal operation, rather is an optimization for
// cases where trie nodes can be recovered from some external mechanism without
@@ -119,7 +125,7 @@ type NodeIterator interface {
// Before adding a similar mechanism to any other place in Geth, consider
// making trie.Database an interface and wrapping at that level. It's a huge
// refactor, but it could be worth it if another occurrence arises.
- AddResolver(ethdb.KeyValueStore)
+ AddResolver(NodeResolver)
}
// nodeIteratorState represents the iteration state at one particular node of the
@@ -138,7 +144,7 @@ type nodeIterator struct {
path []byte // Path to the current node
err error // Failure set in case of an internal error in the iterator
- resolver ethdb.KeyValueStore // Optional intermediate resolver above the disk layer
+ resolver NodeResolver // optional node resolver for avoiding disk hits
}
// errIteratorEnd is stored in nodeIterator.err when iteration is done.
@@ -166,7 +172,7 @@ func newNodeIterator(trie *Trie, start []byte) NodeIterator {
return it
}
-func (it *nodeIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *nodeIterator) AddResolver(resolver NodeResolver) {
it.resolver = resolver
}
@@ -371,12 +377,15 @@ func (it *nodeIterator) peekSeek(seekKey []byte) (*nodeIteratorState, *int, []by
func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
if it.resolver != nil {
- if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
+ // Support hash/path from memory.
+ if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
if resolved, err := decodeNode(hash, blob); err == nil {
return resolved, nil
}
}
}
+
+ // Retrieve the specified node from the underlying node reader.
blob, err := it.trie.reader.node(path, common.BytesToHash(hash))
if err != nil {
return nil, err
@@ -389,7 +398,7 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error) {
if it.resolver != nil {
- if blob, err := it.resolver.Get(hash); err == nil && len(blob) > 0 {
+ if blob := it.resolver(it.trie.owner, path, common.BytesToHash(hash)); len(blob) > 0 {
return blob, nil
}
}
@@ -593,7 +602,7 @@ func (it *differenceIterator) NodeBlob() []byte {
return it.b.NodeBlob()
}
-func (it *differenceIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *differenceIterator) AddResolver(resolver NodeResolver) {
panic("not implemented")
}
@@ -708,7 +717,7 @@ func (it *unionIterator) NodeBlob() []byte {
return (*it.items)[0].NodeBlob()
}
-func (it *unionIterator) AddResolver(resolver ethdb.KeyValueStore) {
+func (it *unionIterator) AddResolver(resolver NodeResolver) {
panic("not implemented")
}
From cf4a7523bff93a98579274b74507924247da789e Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Thu, 17 Oct 2024 14:36:35 +0700
Subject: [PATCH 26/41] trie: fix issue insert wrong path in stack trie and
remove the offset when inserting in stack trie reference by 86fe359 (#604)
---
trie/stacktrie.go | 81 ++++++++++++++++++++++-------------------------
1 file changed, 37 insertions(+), 44 deletions(-)
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 753da13c31..79f6e39bfd 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -58,13 +58,12 @@ func returnToPool(st *StackTrie) {
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
- owner common.Hash // the owner of the trie
- nodeType uint8 // node type (as in branch, ext, leaf)
- val []byte // value contained by this node if it's a leaf
- key []byte // key chunk covered by this (full|ext) node
- keyOffset int // offset of the key chunk inside a full key
- children [16]*StackTrie // list of children (for fullnodes and exts)
- writeFn NodeWriteFunc // function for commiting nodes, can be nil
+ owner common.Hash // the owner of the trie
+ nodeType uint8 // node type (as in branch, ext, leaf)
+ val []byte // value contained by this node if it's a leaf
+ key []byte // key chunk covered by this (full|ext) node
+ children [16]*StackTrie // list of children (for fullnodes and exts)
+ writeFn NodeWriteFunc // function for commiting nodes, can be nil
}
// NewStackTrie allocates and initializes an empty trie.
@@ -105,17 +104,15 @@ func (st *StackTrie) MarshalBinary() (data []byte, err error) {
w = bufio.NewWriter(&b)
)
if err := gob.NewEncoder(w).Encode(struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- KeyOffset uint8
+ Owner common.Hash
+ NodeType uint8
+ Val []byte
+ Key []byte
}{
st.owner,
st.nodeType,
st.val,
st.key,
- uint8(st.keyOffset),
}); err != nil {
return nil, err
}
@@ -143,18 +140,16 @@ func (st *StackTrie) UnmarshalBinary(data []byte) error {
func (st *StackTrie) unmarshalBinary(r io.Reader) error {
var dec struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- KeyOffset uint8
+ Owner common.Hash
+ NodeType uint8
+ Val []byte
+ Key []byte
}
gob.NewDecoder(r).Decode(&dec)
st.owner = dec.Owner
st.nodeType = dec.NodeType
st.val = dec.Val
st.key = dec.Key
- st.keyOffset = int(dec.KeyOffset)
var hasChild = make([]byte, 1)
for i := range st.children {
@@ -179,20 +174,18 @@ func (st *StackTrie) setWriteFunc(writeFn NodeWriteFunc) {
}
}
-func newLeaf(owner common.Hash, ko int, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
+func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
st := stackTrieFromPool(writeFn, owner)
st.nodeType = leafNode
- st.keyOffset = ko
- st.key = append(st.key, key[ko:]...)
+ st.key = append(st.key, key...)
st.val = val
return st
}
-func newExt(owner common.Hash, ko int, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
+func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
st := stackTrieFromPool(writeFn, owner)
st.nodeType = extNode
- st.keyOffset = ko
- st.key = append(st.key, key[ko:]...)
+ st.key = append(st.key, key...)
st.children[0] = child
return st
}
@@ -231,25 +224,26 @@ func (st *StackTrie) Reset() {
st.children[i] = nil
}
st.nodeType = emptyNode
- st.keyOffset = 0
}
// Helper function that, given a full key, determines the index
// at which the chunk pointed by st.keyOffset is different from
// the same chunk in the full key.
func (st *StackTrie) getDiffIndex(key []byte) int {
- diffindex := 0
- for ; diffindex < len(st.key) && st.key[diffindex] == key[st.keyOffset+diffindex]; diffindex++ {
+ for idx, nibble := range st.key {
+ if nibble != key[idx] {
+ return idx
+ }
}
- return diffindex
+ return len(st.key)
}
// Helper function to that inserts a (key, value) pair into
// the trie. Adding the prefix when inserting too.
-func (st *StackTrie) insert(key, value []byte, prefix []byte) {
+func (st *StackTrie) insert(key, value, prefix []byte) {
switch st.nodeType {
case branchNode: /* Branch */
- idx := int(key[st.keyOffset])
+ idx := int(key[0])
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
@@ -261,10 +255,11 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
}
// Add new child
if st.children[idx] == nil {
- st.children[idx] = stackTrieFromPool(st.writeFn, st.owner)
- st.children[idx].keyOffset = st.keyOffset + 1
+ st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn)
+ } else {
+ st.children[idx].insert(key[1:], value, append(prefix, key[0]))
}
- st.children[idx].insert(key, value, append(prefix, key[st.keyOffset]))
+
case extNode: /* Ext */
// Compare both key chunks and see where they differ
diffidx := st.getDiffIndex(key)
@@ -277,7 +272,7 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
- st.children[0].insert(key, value, append(prefix, key[:diffidx]...))
+ st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...))
return
}
// Save the original part. Depending if the break is
@@ -289,7 +284,7 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// Break on the non-last byte, insert an intermediate
// extension. The path prefix of the newly-inserted
// extension should also contain the different byte.
- n = newExt(st.owner, diffidx+1, st.key, st.children[0], st.writeFn)
+ n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn)
n.hash(append(prefix, st.key[:diffidx+1]...))
} else {
// an extension node: reuse the current node.
@@ -313,15 +308,14 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// node.
st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
st.children[0].nodeType = branchNode
- st.children[0].keyOffset = st.keyOffset + diffidx
p = st.children[0]
}
// Create a leaf for the inserted part
- o := newLeaf(st.owner, st.keyOffset+diffidx+1, key, value, st.writeFn)
+ o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
- newIdx := key[diffidx+st.keyOffset]
+ newIdx := key[diffidx]
p.children[origIdx] = n
p.children[newIdx] = o
st.key = st.key[:diffidx]
@@ -355,7 +349,6 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
st.nodeType = extNode
st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
st.children[0].nodeType = branchNode
- st.children[0].keyOffset = st.keyOffset + diffidx
p = st.children[0]
}
@@ -364,11 +357,11 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
// The child leave will be hashed directly in order to
// free up some memory.
origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.owner, diffidx+1, st.key, st.val, st.writeFn)
+ p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn)
p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
- newIdx := key[diffidx+st.keyOffset]
- p.children[newIdx] = newLeaf(st.owner, p.keyOffset+1, key, value, st.writeFn)
+ newIdx := key[diffidx]
+ p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
// Finally, cut off the key part that has been passed
// over to the children.
@@ -376,7 +369,7 @@ func (st *StackTrie) insert(key, value []byte, prefix []byte) {
st.val = nil
case emptyNode: /* Empty */
st.nodeType = leafNode
- st.key = key[st.keyOffset:]
+ st.key = key
st.val = value
case hashedNode:
panic("trying to insert into hash")
From 3ea219b56c2a5ce4329651a69a193638f5a73a56 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Fri, 18 Oct 2024 13:02:27 +0700
Subject: [PATCH 27/41] all: enable pbss (#600)
* trie: enable pathdb: add path config and enable tests
* core/rawdb: now also inspect the state freezer in pathdb; rename
* cmd: working on cmd ronin
* core: refactor; add pathbase config; fix tests
- all: fix and enable tests for pathbase
- blockchain: open triedb explicitly in blockchain functions and close right after use, since diskLayer inside pathdb is a skeleton
- blockchain: when writeBlockWithState, pathbase will skip the explicit garbage collector, which is only needed for hashbase
- genesis.go: nit: change check genesis state, ref https://github.com/ethereum/go-ethereum/commit/08bf8a60c3b1dec73c67a187093cd066021d0453
* tests: enable path tests
* eth: enable path scheme
- all: fix tests, enable path scheme tests
- state_accessor: split function to retrieve statedb from block to hash scheme and path scheme
* light, miner, les, ethclient: clean up tests
* trie: refactor triereader, return err when state reader won't be created in hash and path
* trie: fix failed test in iterator and sync test tie
* trie,core: improve trie reader and add checking config nil when initing
database
* trie: statedb instance is committed, then it's not usable, a new instance must be created based on new root updated database, reference by commit 6d2aeb4
* cmd,les,eth: fixed unittest and adding flag Parrallel correctly
* core, eth: fix tests
* core: refactor and fix sync_test logic
* tmp: disable pathbase for TestIsPeriodBlock, TestIsTrippEffective
---------
Co-authored-by: Huy Ngo
---
accounts/abi/bind/backends/simulated.go | 3 +-
cmd/evm/internal/t8ntool/execution.go | 6 +
cmd/evm/runner.go | 20 +-
cmd/evm/staterunner.go | 30 +-
cmd/ronin/chaincmd.go | 22 +-
cmd/ronin/dbcmd.go | 11 +-
cmd/ronin/genesis_test.go | 4 +-
cmd/ronin/main.go | 5 +-
cmd/ronin/snapshot.go | 63 +-
cmd/utils/flags.go | 146 +++-
cmd/utils/flags_legacy.go | 8 +
consensus/clique/clique_test.go | 5 +-
consensus/clique/snapshot_test.go | 3 +-
consensus/consortium/v2/consortium_test.go | 69 +-
core/bench_test.go | 3 +-
core/block_validator_test.go | 15 +-
core/blockchain.go | 278 +++---
core/blockchain_reader.go | 14 +-
core/blockchain_repair_test.go | 53 +-
core/blockchain_sethead_test.go | 36 +-
core/blockchain_snapshot_test.go | 371 ++++----
core/blockchain_test.go | 799 +++++++++++++-----
core/chain_makers.go | 20 +-
core/chain_makers_test.go | 5 +-
core/dao_test.go | 36 +-
core/error.go | 1 +
core/genesis.go | 12 +-
core/genesis_test.go | 54 +-
core/headerchain_test.go | 3 +-
core/rawdb/accessors_trie.go | 31 +-
core/rawdb/ancient_scheme.go | 10 +-
core/rawdb/ancient_utils.go | 61 +-
core/rawdb/database.go | 22 +-
core/rawdb/schema.go | 26 +-
core/rawdb/schema_test.go | 8 +-
core/rlp_test.go | 3 +-
core/state/database.go | 4 +-
core/state/dump.go | 15 +-
core/state/iterator.go | 11 +-
core/state/iterator_test.go | 15 +-
core/state/pruner/pruner.go | 22 +-
core/state/snapshot/generate.go | 5 +-
core/state/snapshot/generate_test.go | 130 ++-
core/state/state_test.go | 6 +-
core/state/statedb.go | 29 +-
core/state/statedb_fuzz_test.go | 2 +-
core/state/statedb_test.go | 101 ++-
core/state/sync_test.go | 202 +++--
core/state_processor_test.go | 16 +-
core/types/hashing_test.go | 8 +-
core/vote/vote_pool_test.go | 7 +-
eth/api.go | 19 +-
eth/api_backend.go | 2 +-
eth/api_test.go | 26 +-
eth/backend.go | 13 +-
eth/catalyst/api_test.go | 3 +-
eth/downloader/downloader_test.go | 4 +-
eth/downloader/statesync.go | 2 +-
eth/downloader/testchain_test.go | 3 +-
eth/ethconfig/config.go | 9 +
eth/ethconfig/gen_config.go | 18 +
eth/fetcher/block_fetcher_test.go | 2 +-
eth/filters/filter_system_test.go | 5 +-
eth/filters/filter_test.go | 5 +-
eth/gasprice/gasprice_test.go | 5 +-
eth/handler.go | 13 +-
eth/handler_eth_test.go | 4 +-
eth/handler_test.go | 5 +-
eth/protocols/eth/handler_test.go | 2 +-
eth/protocols/eth/handlers.go | 7 +-
eth/protocols/snap/handler.go | 8 +-
eth/protocols/snap/sync_test.go | 211 +++--
eth/state_accessor.go | 96 ++-
eth/tracers/api_test.go | 2 +-
.../internal/tracetest/calltrace2_test.go | 9 +-
.../internal/tracetest/calltrace_test.go | 11 +-
.../internal/tracetest/flat_calltrace_test.go | 3 +-
.../internal/tracetest/prestate_test.go | 3 +-
eth/tracers/tracers_test.go | 4 +-
ethclient/ethclient_test.go | 5 +-
ethclient/gethclient/gethclient_test.go | 3 +-
les/client.go | 2 +-
les/downloader/downloader_test.go | 2 +-
les/downloader/statesync.go | 2 +-
les/downloader/testchain_test.go | 3 +-
les/fetcher/block_fetcher_test.go | 2 +-
les/handler_test.go | 8 +-
les/server_handler.go | 2 +-
les/test_helper.go | 5 +-
light/lightchain_test.go | 5 +-
light/odr_test.go | 6 +-
light/postprocess.go | 4 +-
light/trie.go | 14 +-
light/trie_test.go | 16 +-
light/txpool_test.go | 5 +-
miner/miner_test.go | 2 +-
miner/worker_test.go | 5 +-
tests/block_test.go | 17 +-
tests/block_test_util.go | 22 +-
tests/fuzzers/les/les-fuzzer.go | 6 +-
tests/fuzzers/rangeproof/rangeproof-fuzzer.go | 2 +-
tests/fuzzers/stacktrie/trie_fuzzer.go | 6 +-
tests/fuzzers/trie/trie-fuzzer.go | 4 +-
tests/state_test.go | 62 +-
tests/state_test_util.go | 53 +-
trie/database.go | 115 ++-
trie/database_test.go | 8 +-
trie/errors.go | 6 +
trie/iterator_test.go | 78 +-
trie/proof.go | 3 +
trie/proof_test.go | 20 +-
trie/secure_trie.go | 8 +-
trie/secure_trie_test.go | 4 +-
trie/stacktrie_test.go | 12 +-
trie/sync.go | 11 +-
trie/sync_test.go | 97 ++-
trie/tracer_test.go | 18 +-
trie/trie.go | 56 +-
trie/trie_reader.go | 35 +-
trie/trie_test.go | 70 +-
trie/triedb/hashdb/database.go | 48 +-
trie/triedb/pathdb/database.go | 80 +-
trie/triedb/pathdb/database_test.go | 9 +-
trie/triedb/pathdb/difflayer_test.go | 2 +-
trie/triedb/pathdb/disklayer.go | 20 +-
trie/triedb/pathdb/history_test.go | 2 +-
trie/triedb/pathdb/testutils.go | 10 +-
trie/triestate/state.go | 37 +-
128 files changed, 3005 insertions(+), 1330 deletions(-)
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 96e85a9fa5..146b209f7b 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -43,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
)
// This nil assignment ensures at compile time that SimulatedBackend implements bind.ContractBackend.
@@ -77,7 +78,7 @@ type SimulatedBackend struct {
// A simulated backend always uses chainID 1337.
func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc}
- genesis.MustCommit(database)
+ genesis.MustCommit(database, trie.NewDatabase(database, trie.HashDefaults))
blockchain, _ := core.NewBlockChain(database, nil, &genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
backend := &SimulatedBackend{
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index ea59c48427..b3773b9fa2 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -258,6 +258,12 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
GasUsed: (math.HexOrDecimal64)(gasUsed),
}
+ // Re-create statedb instance with new root upon the updated database
+ // for accessing latest states.
+ statedb, err = state.New(root, statedb.Database(), nil)
+ if err != nil {
+ return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
+ }
return statedb, execRs, nil
}
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 67aaa03ea9..4a060f3e11 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -30,6 +30,8 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
"github.com/ethereum/go-ethereum/cmd/utils"
@@ -37,6 +39,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/log"
@@ -139,11 +142,22 @@ func runCmd(ctx *cli.Context) error {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
- genesis := gen.MustCommit(db)
- statedb, _ = state.New(genesis.Root(), state.NewDatabase(db), nil)
+ triedb := trie.NewDatabase(db, &trie.Config{
+ HashDB: hashdb.Defaults,
+ })
+ defer triedb.Close()
+ genesis := gen.MustCommit(db, triedb)
+ sdb := state.NewDatabaseWithNodeDB(db, triedb)
+ statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = gen.Config
} else {
- statedb, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ db := rawdb.NewMemoryDatabase()
+ triedb := trie.NewDatabase(db, &trie.Config{
+ HashDB: hashdb.Defaults,
+ })
+ defer triedb.Close()
+ sdb := state.NewDatabaseWithNodeDB(db, triedb)
+ statedb, _ = state.New(types.EmptyRootHash, sdb, nil)
genesisConfig = new(core.Genesis)
}
if ctx.String(SenderFlag.Name) != "" {
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 0e05237343..506f789a8a 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -20,11 +20,14 @@ import (
"encoding/json"
"errors"
"fmt"
- "github.com/ethereum/go-ethereum/eth/tracers/logger"
"io/ioutil"
"os"
+ "github.com/ethereum/go-ethereum/eth/tracers/logger"
+
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests"
@@ -96,21 +99,22 @@ func stateTestCmd(ctx *cli.Context) error {
results := make([]StatetestResult, 0, len(tests))
for key, test := range tests {
for _, st := range test.Subtests() {
+ dump := ctx.Bool(DumpFlag.Name)
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
- _, s, err := test.Run(st, cfg, false)
- // print state root for evmlab tracing
- if ctx.Bool(MachineFlag.Name) && s != nil {
- fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false))
- }
- if err != nil {
- // Test failed, mark as so and dump any state to aid debugging
- result.Pass, result.Error = false, err.Error()
- if ctx.Bool(DumpFlag.Name) && s != nil {
- dump := s.RawDump(nil)
- result.State = &dump
+ test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ if err != nil {
+ // Test failed, mark as so and dump any state to aid debugging
+ result.Pass, result.Error = false, err.Error()
+ if dump {
+ dump := state.RawDump(nil)
+ result.State = &dump
+ }
}
- }
+ if ctx.Bool(MachineFlag.Name) && state != nil {
+ fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
+ }
+ })
results = append(results, *result)
diff --git a/cmd/ronin/chaincmd.go b/cmd/ronin/chaincmd.go
index 937c5b3d68..07475ef219 100644
--- a/cmd/ronin/chaincmd.go
+++ b/cmd/ronin/chaincmd.go
@@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/trie"
"github.com/urfave/cli/v2"
)
@@ -52,6 +51,8 @@ var (
utils.DataDirFlag,
utils.DBEngineFlag,
utils.ForceOverrideChainConfigFlag,
+ utils.CachePreimagesFlag,
+ utils.StateSchemeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -106,6 +107,9 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
utils.TxLookupLimitFlag,
+ utils.TransactionHistoryFlag,
+ utils.StateSchemeFlag,
+ utils.StateHistoryFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -125,6 +129,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
utils.DBEngineFlag,
utils.CacheFlag,
utils.SyncModeFlag,
+ utils.StateSchemeFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -226,14 +231,13 @@ func initGenesis(ctx *cli.Context) error {
utils.Fatalf("Failed to open database: %v", err)
}
// Create triedb firstly
- triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{
- Preimages: ctx.Bool(utils.CachePreimagesFlag.Name),
- })
+
+ triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
+ defer chaindb.Close()
_, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis, overrideChainConfig)
if err != nil {
utils.Fatalf("Failed to write genesis block: %v", err)
}
- chaindb.Close()
log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
}
return nil
@@ -471,10 +475,10 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
- config := &trie.Config{
- Preimages: true, // always enable preimage lookup
- }
- state, err := state.New(root, state.NewDatabaseWithConfig(db, config), nil)
+ triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup
+ defer triedb.Close()
+ state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
+
if err != nil {
return err
}
diff --git a/cmd/ronin/dbcmd.go b/cmd/ronin/dbcmd.go
index dc8ce61742..53a83b5031 100644
--- a/cmd/ronin/dbcmd.go
+++ b/cmd/ronin/dbcmd.go
@@ -190,6 +190,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
+ utils.StateSchemeFlag,
},
Description: "This command looks up the specified database key from the database.",
}
@@ -476,6 +477,8 @@ func dbDumpTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
+ triedb := utils.MakeTrieDatabase(ctx, db, false, true)
+ defer triedb.Close()
var (
state []byte
storage []byte
@@ -509,12 +512,16 @@ func dbDumpTrie(ctx *cli.Context) error {
}
}
id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
- theTrie, err := trie.New(id, trie.NewDatabase(db))
+ theTrie, err := trie.New(id, triedb)
+ if err != nil {
+ return err
+ }
+ trieIt, err := theTrie.NodeIterator(start)
if err != nil {
return err
}
var count int64
- it := trie.NewIterator(theTrie.NodeIterator(start))
+ it := trie.NewIterator(trieIt)
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
diff --git a/cmd/ronin/genesis_test.go b/cmd/ronin/genesis_test.go
index 2d4cb6a3ac..41b50b45d7 100644
--- a/cmd/ronin/genesis_test.go
+++ b/cmd/ronin/genesis_test.go
@@ -173,12 +173,12 @@ func TestCustomBackend(t *testing.T) {
{ // Can't start pebble on top of leveldb
initArgs: []string{"--db.engine", "leveldb"},
execArgs: []string{"--db.engine", "pebble"},
- execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
+ execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
},
{ // Can't start leveldb on top of pebble
initArgs: []string{"--db.engine", "pebble"},
execArgs: []string{"--db.engine", "leveldb"},
- execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
+ execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
},
{ // Reject invalid backend choice
initArgs: []string{"--db.engine", "mssql"},
diff --git a/cmd/ronin/main.go b/cmd/ronin/main.go
index 3e94098ab3..057080de40 100644
--- a/cmd/ronin/main.go
+++ b/cmd/ronin/main.go
@@ -101,7 +101,10 @@ var (
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.SnapshotFlag,
- utils.TxLookupLimitFlag,
+ utils.TxLookupLimitFlag, // deprecated
+ utils.TransactionHistoryFlag,
+ utils.StateSchemeFlag,
+ utils.StateHistoryFlag,
utils.TriesInMemoryFlag,
utils.LightServeFlag,
utils.LightIngressFlag,
diff --git a/cmd/ronin/snapshot.go b/cmd/ronin/snapshot.go
index 0a756ed6da..23fd7d19dc 100644
--- a/cmd/ronin/snapshot.go
+++ b/cmd/ronin/snapshot.go
@@ -77,10 +77,7 @@ two version states are available: genesis and the specific one.
The default pruning target is the HEAD-127 state.
-WARNING: It's necessary to delete the trie clean cache after the pruning.
-If you specify another directory for the trie clean cache via "--cache.trie.journal"
-during the use of Geth, please also specify it here for correct deletion. Otherwise
-the trie clean cache with default directory will be deleted.
+WARNING: it's only supported in hash mode(--state.scheme=hash)".
`,
},
{
@@ -97,6 +94,7 @@ the trie clean cache with default directory will be deleted.
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
+ utils.StateSchemeFlag,
},
Description: `
geth snapshot verify-state
@@ -119,6 +117,7 @@ In other words, this command does the snapshot to trie conversion.
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
+ utils.StateSchemeFlag,
},
Description: `
geth snapshot traverse-state
@@ -143,6 +142,7 @@ It's also usable without snapshot enabled.
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
+ utils.StateSchemeFlag,
},
Description: `
geth snapshot traverse-rawstate
@@ -172,6 +172,7 @@ It's also usable without snapshot enabled.
utils.ExcludeStorageFlag,
utils.StartKeyFlag,
utils.DumpLimitFlag,
+ utils.StateSchemeFlag,
},
Description: `
This command is semantically equivalent to 'geth dump', but uses the snapshots
@@ -192,6 +193,9 @@ func pruneState(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, false)
+ if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
+ log.Crit("Offline pruning is not required for path scheme")
+ }
pruner, err := pruner.NewPruner(chaindb, stack.ResolvePath(""),
ctx.Uint64(utils.BloomFilterSizeFlag.Name))
if err != nil {
@@ -227,7 +231,10 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
- snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
+ triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
+ defer triedb.Close()
+
+ snaptree, err := snapshot.New(chaindb, triedb, 256, headBlock.Root(), false, false, false)
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
@@ -260,6 +267,10 @@ func traverseState(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
+ defer chaindb.Close()
+
+ triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
+ defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
@@ -284,7 +295,6 @@ func traverseState(ctx *cli.Context) error {
root = headBlock.Root()
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
- triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(trie.StateTrieID(root), triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
@@ -297,7 +307,12 @@ func traverseState(ctx *cli.Context) error {
lastReport time.Time
start = time.Now()
)
- accIter := trie.NewIterator(t.NodeIterator(nil))
+ acctIt, err := t.NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to open iterator", "root", root, "err", err)
+ return err
+ }
+ accIter := trie.NewIterator(acctIt)
for accIter.Next() {
accounts += 1
var acc types.StateAccount
@@ -311,7 +326,12 @@ func traverseState(ctx *cli.Context) error {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err
}
- storageIter := trie.NewIterator(storageTrie.NodeIterator(nil))
+ storageIt, err := storageTrie.NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to open storage iterator", "root", acc.Root, "err", err)
+ return err
+ }
+ storageIter := trie.NewIterator(storageIt)
for storageIter.Next() {
slots += 1
}
@@ -350,6 +370,10 @@ func traverseRawState(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
+ defer chaindb.Close()
+
+ triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
+ defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
@@ -374,7 +398,6 @@ func traverseRawState(ctx *cli.Context) error {
root = headBlock.Root()
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
- triedb := trie.NewDatabase(chaindb)
t, err := trie.NewSecure(trie.StateTrieID(root), triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
@@ -388,7 +411,17 @@ func traverseRawState(ctx *cli.Context) error {
lastReport time.Time
start = time.Now()
)
- accIter := t.NodeIterator(nil)
+ accIter, err := t.NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to open iterator", "root", root, "err", err)
+ return err
+ }
+ // using reader.
+ //reader, err := triedb.Reader(root)
+ if err != nil {
+ log.Error("State is non-existent", "root", root)
+ return nil
+ }
for accIter.Next(true) {
nodes += 1
node := accIter.Hash()
@@ -416,7 +449,11 @@ func traverseRawState(ctx *cli.Context) error {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie")
}
- storageIter := storageTrie.NodeIterator(nil)
+ storageIter, err := storageTrie.NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to open storage iterator", "root", acc.Root, "err", err)
+ return err
+ }
for storageIter.Next(true) {
nodes += 1
node := storageIter.Hash()
@@ -477,7 +514,9 @@ func dumpState(ctx *cli.Context) error {
if err != nil {
return err
}
- snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
+ triedb := utils.MakeTrieDatabase(ctx, db, false, true)
+ defer triedb.Close()
+ snaptree, err := snapshot.New(db, triedb, 256, root, false, false, false)
if err != nil {
return err
}
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 806384adce..2e070815b9 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -31,6 +31,9 @@ import (
"time"
"github.com/ethereum/go-ethereum/internal/flags"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem"
@@ -225,18 +228,30 @@ var (
Value: "full",
Category: flags.StateCategory,
}
+ StateSchemeFlag = &cli.StringFlag{
+ Name: "state.scheme",
+ Usage: `State scheme to use for trie storage ("hash" or "path")`,
+ Value: rawdb.HashScheme, // Default to hash scheme
+ Category: flags.StateCategory,
+ }
+ StateHistoryFlag = &cli.Uint64Flag{
+ Name: "history.state",
+ Usage: "Number of recent blocks to retain state history for (default = 90,000 blocks, 0 = entire chain)",
+ Value: ethconfig.Defaults.StateHistory,
+ Category: flags.StateCategory,
+ }
+ TransactionHistoryFlag = &cli.Uint64Flag{
+ Name: "history.transactions",
+ Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)",
+ Value: ethconfig.Defaults.TransactionHistory,
+ Category: flags.StateCategory,
+ }
SnapshotFlag = &cli.BoolFlag{
Name: "snapshot",
Usage: `Enables snapshot-database mode (default = enable)`,
Value: true,
Category: flags.EthCategory,
}
- TxLookupLimitFlag = &cli.Uint64Flag{
- Name: "txlookuplimit",
- Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)",
- Value: ethconfig.Defaults.TxLookupLimit,
- Category: flags.EthCategory,
- }
TriesInMemoryFlag = &cli.IntFlag{
Name: "triesinmemory",
Usage: "The number of tries is kept in memory before pruning (default = 128)",
@@ -1835,17 +1850,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
- if ctx.String(GCModeFlag.Name) == "archive" && ctx.Uint64(TxLookupLimitFlag.Name) != 0 {
- ctx.Set(TxLookupLimitFlag.Name, "0")
- log.Warn("Disable transaction unindexing for archive node")
- }
- if ctx.IsSet(LightServeFlag.Name) && ctx.Uint64(TxLookupLimitFlag.Name) != 0 {
- log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
- }
+
var ks *keystore.KeyStore
if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 {
ks = keystores[0].(*keystore.KeyStore)
}
+ // Set configurations from CLI flags.
setEtherbase(ctx, ks, cfg)
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
setTxPool(ctx, &cfg.TxPool)
@@ -1904,9 +1914,42 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.Preimages = true
log.Info("Enabling recording of key preimages since archive mode is used")
}
- if ctx.IsSet(TxLookupLimitFlag.Name) {
- cfg.TxLookupLimit = ctx.Uint64(TxLookupLimitFlag.Name)
+ if ctx.IsSet(StateHistoryFlag.Name) {
+ cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
+ }
+
+ /* State Scheme Config logic */
+ // Parse the state scheme from chaindb firstly.
+ chaindb := tryMakeReadOnlyDatabase(ctx, stack)
+ scheme, err := ParseStateScheme(ctx, chaindb)
+ chaindb.Close()
+ if err != nil {
+ Fatalf("%v", err)
}
+ cfg.StateScheme = scheme
+
+ // Parse transaction history flag, if user is still using legacy config
+ // file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
+ if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {
+ log.Warn("The config option 'TxLookupLimit' is deprecated and will be removed, please use 'TransactionHistory'")
+ cfg.TransactionHistory = cfg.TxLookupLimit
+ }
+
+ if ctx.IsSet(TransactionHistoryFlag.Name) {
+ cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name)
+ } else if ctx.IsSet(TxLookupLimitFlag.Name) {
+ log.Warn("The flag --txlookuplimit is deprecated and will be removed, please use --history.transactions")
+ cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name)
+ }
+
+ if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 {
+ cfg.TransactionHistory = 0
+ log.Warn("Disabled transaction unindexing for archive node")
+ }
+ if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 {
+ log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
+ }
+
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
}
@@ -2356,3 +2399,76 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
}
return preloads
}
+
+// tryMakeReadOnlyDatabase try to open the chain database in read-only mode,
+// or fallback to write mode if the database is not initialized.
+func tryMakeReadOnlyDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
+ // If datadir doesn't exist we need to open db in write-mode
+ // so database engine can create files.
+ readonly := true
+ if !common.FileExist(stack.ResolvePath("chaindata")) {
+ readonly = false
+ }
+ return MakeChainDatabase(ctx, stack, readonly)
+}
+
+// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
+// state scheme is not compatible with the one of persistent scheme, an error
+// will be returned
+//
+// - none: use the scheme consistent with persistent state, or fallback
+// to hash-based scheme if state is empty.
+// - hash: use hash-based scheme or error out if not compatible with
+// persistent state scheme.
+// - path: use path-based scheme or error out if not compatible with
+// persistent state scheme.
+func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
+ // If state scheme is not specified, use the scheme consistent
+ // with persistent state, or fallback to hash mode if database
+ // is empty.
+ stored := rawdb.ReadStateScheme(disk)
+
+ // If the flag is not set.
+ if !ctx.IsSet(StateSchemeFlag.Name) {
+ if stored == "" {
+ // If the database is empty, use hash-based scheme.
+ log.Info("State schema set to default when database is empty", "scheme", "hash")
+ return rawdb.HashScheme, nil
+ }
+ log.Info("State scheme set to already existing", "scheme", stored)
+ return stored, nil
+ }
+ // If state scheme is specified, ensure it's compatible with
+ // persistent state.
+ scheme := ctx.String(StateSchemeFlag.Name)
+ if stored != "" || scheme == stored {
+ log.Info("State scheme set by user", "scheme", scheme)
+ return scheme, nil
+ }
+ return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme)
+}
+
+// MakeTrieDatabase constructs a trie database based on the configured scheme.
+func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
+ config := &trie.Config{
+ Preimages: preimage,
+ }
+ scheme, err := ParseStateScheme(ctx, disk)
+ if err != nil {
+ Fatalf("%v", err)
+ }
+ // Readonly only support in PathScheme
+ if scheme == rawdb.HashScheme {
+ // Read-only mode is not implemented in hash mode,
+ // ignore the parameter silently. TODO(rjl493456442)
+ // please config it if read mode is implemented.
+ config.HashDB = hashdb.Defaults
+ return trie.NewDatabase(disk, config)
+ }
+ if readOnly {
+ config.PathDB = pathdb.ReadOnly
+ } else {
+ config.PathDB = pathdb.Defaults
+ }
+ return trie.NewDatabase(disk, config)
+}
diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go
index 63b6d64879..8aa77e1ba0 100644
--- a/cmd/utils/flags_legacy.go
+++ b/cmd/utils/flags_legacy.go
@@ -38,6 +38,7 @@ var DeprecatedFlags = []cli.Flag{
NoUSBFlag,
CacheTrieJournalFlag,
CacheTrieRejournalFlag,
+ TxLookupLimitFlag,
}
var (
@@ -63,6 +64,13 @@ var (
Usage: "Time interval to regenerate the trie cache journal",
Category: flags.PerfCategory,
}
+ // Deprecated Nov 2024
+ TxLookupLimitFlag = &cli.Uint64Flag{
+ Name: "txlookuplimit",
+ Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain) (deprecated, use history.transactions instead)",
+ Value: ethconfig.Defaults.TransactionHistory,
+ Category: flags.DeprecatedCategory,
+ }
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index b82ea54b71..744d84595f 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// This test case is a repro of an annoying bug that took us forever to catch.
@@ -53,7 +54,7 @@ func TestReimportMirroredState(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
}
copy(genspec.ExtraData[extraVanity:], addr[:])
- genesis := genspec.MustCommit(db)
+ genesis := genspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
// Generate a batch of blocks, each properly signed
chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
@@ -88,7 +89,7 @@ func TestReimportMirroredState(t *testing.T) {
}
// Insert the first two blocks and make sure the chain is valid
db = rawdb.NewMemoryDatabase()
- genspec.MustCommit(db)
+ genspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
defer chain.Stop()
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index e10baea556..09d8aaaf5d 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// testerAccountPool is a pool to maintain currently active tester accounts,
@@ -403,7 +404,7 @@ func TestClique(t *testing.T) {
}
// Create a pristine blockchain with the genesis injected
db := rawdb.NewMemoryDatabase()
- genesisBlock := genesis.MustCommit(db)
+ genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
// Assemble a chain of headers from the cast votes
config := *params.TestChainConfig
diff --git a/consensus/consortium/v2/consortium_test.go b/consensus/consortium/v2/consortium_test.go
index f80c86ca52..394e7b22c6 100644
--- a/consensus/consortium/v2/consortium_test.go
+++ b/consensus/consortium/v2/consortium_test.go
@@ -1417,6 +1417,10 @@ func TestAssembleFinalityVoteTripp(t *testing.T) {
}
func TestVerifyVote(t *testing.T) {
+ testVeiryVote(t, rawdb.PathScheme)
+ testVeiryVote(t, rawdb.HashScheme)
+}
+func testVeiryVote(t *testing.T, scheme string) {
const numValidator = 3
var err error
@@ -1441,8 +1445,8 @@ func TestVerifyVote(t *testing.T) {
Config: params.TestChainConfig,
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true)
if _, err := chain.InsertChain(bs[:], nil); err != nil {
@@ -1525,6 +1529,11 @@ func TestVerifyVote(t *testing.T) {
}
func TestKnownBlockReorg(t *testing.T) {
+ testKnowBlockReorg(t, rawdb.PathScheme)
+ testKnowBlockReorg(t, rawdb.HashScheme)
+}
+
+func testKnowBlockReorg(t *testing.T, scheme string) {
db := rawdb.NewMemoryDatabase()
blsKeys := make([]blsCommon.SecretKey, 3)
@@ -1572,7 +1581,7 @@ func TestKnownBlockReorg(t *testing.T) {
gspec := &core.Genesis{
Config: &chainConfig,
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
mock := &mockContract{
validators: make(map[common.Address]blsCommon.PublicKey),
@@ -1590,7 +1599,7 @@ func TestKnownBlockReorg(t *testing.T) {
db: db,
}
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, &v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, &v2, vm.Config{}, nil, nil)
extraData := [consortiumCommon.ExtraVanity + consortiumCommon.ExtraSeal]byte{}
blocks, _ := core.GenerateConsortiumChain(
@@ -1789,6 +1798,11 @@ func TestKnownBlockReorg(t *testing.T) {
}
func TestUpgradeRoninTrustedOrg(t *testing.T) {
+ testUpgradeRoninTrustedOrg(t, rawdb.PathScheme)
+ testUpgradeRoninTrustedOrg(t, rawdb.HashScheme)
+}
+
+func testUpgradeRoninTrustedOrg(t *testing.T, scheme string) {
db := rawdb.NewMemoryDatabase()
blsSecretKey, err := blst.RandKey()
if err != nil {
@@ -1824,7 +1838,7 @@ func TestUpgradeRoninTrustedOrg(t *testing.T) {
common.Address{0x10}: core.GenesisAccount{Balance: common.Big1},
},
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
mock := &mockContract{
validators: map[common.Address]blsCommon.PublicKey{
@@ -1844,7 +1858,7 @@ func TestUpgradeRoninTrustedOrg(t *testing.T) {
},
}
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, &v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, &v2, vm.Config{}, nil, nil)
extraData := [consortiumCommon.ExtraVanity + consortiumCommon.ExtraSeal]byte{}
parent := genesis
@@ -1914,6 +1928,11 @@ func TestUpgradeRoninTrustedOrg(t *testing.T) {
}
func TestUpgradeAxieProxyCode(t *testing.T) {
+ testUpgradeAxieProxyCode(t, rawdb.PathScheme)
+ testUpgradeAxieProxyCode(t, rawdb.HashScheme)
+}
+
+func testUpgradeAxieProxyCode(t *testing.T, scheme string) {
secretKey, err := crypto.GenerateKey()
if err != nil {
t.Fatal(err)
@@ -1966,7 +1985,7 @@ func TestUpgradeAxieProxyCode(t *testing.T) {
gspec := &core.Genesis{
Config: chainConfig,
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
mock := &mockTrippContract{
checkpointValidators: []validatorWithBlsWeight{
validatorWithBlsWeight{
@@ -1993,7 +2012,7 @@ func TestUpgradeAxieProxyCode(t *testing.T) {
testTrippEffective: true,
}
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, v2, vm.Config{}, nil, nil)
extraData := &finality.HeaderExtraData{}
parent := genesis
@@ -2054,6 +2073,11 @@ func TestUpgradeAxieProxyCode(t *testing.T) {
}
func TestSystemTransactionOrder(t *testing.T) {
+ testSystemTransactionOrder(t, rawdb.PathScheme)
+ testSystemTransactionOrder(t, rawdb.HashScheme)
+}
+
+func testSystemTransactionOrder(t *testing.T, scheme string) {
db := rawdb.NewMemoryDatabase()
blsSecretKey, err := blst.RandKey()
if err != nil {
@@ -2093,7 +2117,7 @@ func TestSystemTransactionOrder(t *testing.T) {
common.Address{0x10}: core.GenesisAccount{Balance: common.Big1},
},
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
mock := &mockContract{
validators: map[common.Address]blsCommon.PublicKey{
@@ -2113,7 +2137,7 @@ func TestSystemTransactionOrder(t *testing.T) {
},
}
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, &v2, vm.Config{}, nil, nil)
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, &v2, vm.Config{}, nil, nil)
extraData := [consortiumCommon.ExtraVanity + consortiumCommon.ExtraSeal]byte{}
signer := types.NewEIP155Signer(big.NewInt(2021))
@@ -2194,6 +2218,11 @@ func TestSystemTransactionOrder(t *testing.T) {
}
func TestIsPeriodBlock(t *testing.T) {
+ //testIsPeriodBlock(t, rawdb.PathScheme)
+ testIsPeriodBlock(t, rawdb.HashScheme)
+}
+
+func testIsPeriodBlock(t *testing.T, scheme string) {
const NUM_OF_VALIDATORS = 21
dateInSeconds := uint64(86400)
now := uint64(time.Now().Unix())
@@ -2215,8 +2244,8 @@ func TestIsPeriodBlock(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
Timestamp: midnight, // genesis at day 1
}
- genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
// create chain of up to 399 blocks, all of them are not period block
bs, _ := core.GenerateChain(&chainConfig, genesis, ethash.NewFaker(), db, 399, nil, true) // create chain of up to 399 blocks
if _, err := chain.InsertChain(bs[:], nil); err != nil {
@@ -2292,7 +2321,19 @@ func TestIsPeriodBlock(t *testing.T) {
}
}
+/*
+Got issues related to parent layer missing in the test
+panic: triedb parent [0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421] layer missing [recovered]
+panic: triedb parent [0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421] layer missing
+Will disable this test firstly for further investigation.
+*/
func TestIsTrippEffective(t *testing.T) {
+ testIsTrippEffective(t, rawdb.HashScheme)
+ // testIsTrippEffective(t, rawdb.PathScheme)
+
+}
+
+func testIsTrippEffective(t *testing.T, scheme string) {
now := uint64(time.Now().Unix())
midnight := uint64(now / dayInSeconds * dayInSeconds)
db := rawdb.NewMemoryDatabase()
@@ -2312,8 +2353,8 @@ func TestIsTrippEffective(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
Timestamp: midnight, // genesis at day 1
}
- genesis := gspec.MustCommit(db)
- chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, _ := core.NewBlockChain(db, core.DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
// create chain of up to 399 blocks, all of them are not Tripp effective
bs, _ := core.GenerateChain(&chainConfig, genesis, ethash.NewFaker(), db, 399, nil, true)
if _, err := chain.InsertChain(bs[:], nil); err != nil {
diff --git a/core/bench_test.go b/core/bench_test.go
index 0fd7082e0c..cd0b3fbaba 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
func BenchmarkInsertChain_empty_memdb(b *testing.B) {
@@ -196,7 +197,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
Config: params.TestChainConfig,
Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}},
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, b.N, gen, true)
// Time the insertion of the new chain.
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 2ba2cff31e..27138f5df2 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -26,15 +26,20 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
-// Tests that simple header verification works, for both good and bad blocks.
func TestHeaderVerification(t *testing.T) {
+ testHeaderVerification(t, rawdb.HashScheme)
+ testHeaderVerification(t, rawdb.PathScheme)
+}
+
+func testHeaderVerification(t *testing.T, scheme string) {
// Create a simple chain to verify
var (
testdb = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig}
- genesis = gspec.MustCommit(testdb)
+ genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, newDbConfig(scheme)))
blocks, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), testdb, 8, nil, true)
)
headers := make([]*types.Header, len(blocks))
@@ -42,7 +47,7 @@ func TestHeaderVerification(t *testing.T) {
headers[i] = block.Header()
}
// Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
- chain, _ := NewBlockChain(testdb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ chain, _ := NewBlockChain(testdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer chain.Stop()
for i := 0; i < len(blocks); i++ {
@@ -86,7 +91,7 @@ func testHeaderConcurrentVerification(t *testing.T, threads int) {
var (
testdb = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig}
- genesis = gspec.MustCommit(testdb)
+ genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, trie.HashDefaults))
blocks, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), testdb, 8, nil, true)
)
headers := make([]*types.Header, len(blocks))
@@ -158,7 +163,7 @@ func testHeaderConcurrentAbortion(t *testing.T, threads int) {
var (
testdb = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig}
- genesis = gspec.MustCommit(testdb)
+ genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, trie.HashDefaults))
blocks, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), testdb, 1024, nil, true)
)
headers := make([]*types.Header, len(blocks))
diff --git a/core/blockchain.go b/core/blockchain.go
index 67b8c9fffd..632a679a08 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -46,6 +46,8 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
lru "github.com/hashicorp/golang-lru/v2"
)
@@ -139,10 +141,30 @@ type CacheConfig struct {
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
Preimages bool // Whether to store preimage of trie key to the disk
TriesInMemory int // The number of tries is kept in memory before pruning
+ StateHistory uint64 // Number of blocks from head whose state histories are reserved.
+ StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}
+// triedbConfig derives the configures for trie database.
+func (c *CacheConfig) triedbConfig() *trie.Config {
+ config := &trie.Config{Preimages: c.Preimages}
+ if c.StateScheme == rawdb.HashScheme {
+ config.HashDB = &hashdb.Config{
+ CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
+ }
+ }
+ if c.StateScheme == rawdb.PathScheme {
+ config.PathDB = &pathdb.Config{
+ StateHistory: c.StateHistory,
+ CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
+ DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024,
+ }
+ }
+ return config
+}
+
// defaultCacheConfig are the default caching values if none are specified by the
// user (also used during testing).
var defaultCacheConfig = &CacheConfig{
@@ -152,6 +174,15 @@ var defaultCacheConfig = &CacheConfig{
SnapshotLimit: 256,
SnapshotWait: true,
TriesInMemory: DefaultTriesInMemory,
+ StateScheme: rawdb.HashScheme,
+}
+
+// DefaultCacheConfigWithScheme returns a deep copied default cache config with
+// a provided trie node scheme.
+func DefaultCacheConfigWithScheme(scheme string) *CacheConfig {
+ config := *defaultCacheConfig
+ config.StateScheme = scheme
+ return &config
}
// BlockChain represents the canonical chain given a database with a genesis
@@ -263,12 +294,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
blobSidecarsCache, _ := lru.New[common.Hash, types.BlobSidecars](blobSidecarsCacheLimit)
// Open trie database with provided config
- triedb := trie.NewDatabaseWithConfig(
- db,
- &trie.Config{
- Cache: cacheConfig.TrieCleanLimit,
- Preimages: cacheConfig.Preimages,
- })
+ triedb := trie.NewDatabase(db, cacheConfig.triedbConfig())
// Setup the genesis block, commit the provided genesis specification
// to database if the genesis block is not present yet, or load the
// stored one from database.
@@ -279,15 +305,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
log.Info("Initialised chain configuration", "config", chainConfig)
bc := &BlockChain{
- chainConfig: chainConfig,
- cacheConfig: cacheConfig,
- db: db,
- triedb: triedb,
- triegc: prque.New(nil),
- stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
- Cache: cacheConfig.TrieCleanLimit,
- Preimages: cacheConfig.Preimages,
- }),
+ chainConfig: chainConfig,
+ cacheConfig: cacheConfig,
+ db: db,
+ triedb: triedb,
+ triegc: prque.New(nil),
+ stateCache: state.NewDatabaseWithNodeDB(db, triedb),
quit: make(chan struct{}),
chainmu: syncx.NewClosableMutex(),
shouldPreserve: shouldPreserve,
@@ -664,6 +687,26 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients()
+ // resetState resets the persistent state to genesis if it's not available.
+ resetState := func() {
+ // Short circuit if the genesis state is already present.
+ if bc.HasState(bc.genesisBlock.Root()) {
+ return
+ }
+ // Reset the state database to empty for committing genesis state.
+ // Note, it should only happen in path-based scheme and Reset function
+ // is also only call-able in this mode.
+ if bc.triedb.Scheme() == rawdb.PathScheme {
+ if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
+ log.Crit("Failed to clean state", "err", err) // Shouldn't happen
+ }
+ }
+ // Write genesis state into database.
+ if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
+ log.Crit("Failed to commit genesis state", "err", err)
+ }
+ }
+
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
@@ -673,6 +716,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
+ resetState()
} else {
// Block exists, keep rewinding until we find one with state,
// keeping rewinding until we exceed the optional threshold
@@ -684,7 +728,9 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root {
beyondRoot, rootNumber = true, newHeadBlock.NumberU64()
}
- if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil {
+
+ // In pbss, if the state is missing, we can possibly recover it from history db
+ if !bc.HasState(newHeadBlock.Root()) && !bc.stateRecoverable(newHeadBlock.Root()) {
log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
if pivot == nil || newHeadBlock.NumberU64() > *pivot {
parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
@@ -701,16 +747,12 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 {
- // Recommit the genesis state into disk in case the rewinding destination
- // is genesis block and the relevant state is gone. In the future this
- // rewinding destination can be the earliest block stored in the chain
- // if the historical chain pruning is enabled. In that case the logic
- // needs to be improved here.
- if !bc.HasState(bc.genesisBlock.Root()) {
- if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
- log.Crit("Failed to commit genesis state", "err", err)
- }
- log.Debug("Recommitted genesis state to disk")
+ resetState()
+ } else if !bc.HasState(newHeadBlock.Root()) {
+ // Rewind to a block with recoverable state. If the state is
+ // missing, run the state recovery here.
+ if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil {
+ log.Crit("Failed to rollback state", "err", err) // Shouldn't happen
}
}
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
@@ -808,7 +850,13 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
if block == nil {
return fmt.Errorf("non existent block [%x..]", hash[:4])
}
+ // Reset the trie database with the fresh fast synced state.
root := block.Root()
+ if bc.triedb.Scheme() == rawdb.PathScheme {
+ if err := bc.triedb.Reset(root); err != nil {
+ return err
+ }
+ }
if !bc.HasState(root) {
return fmt.Errorf("non existent state [%x..]", root[:4])
}
@@ -988,38 +1036,48 @@ func (bc *BlockChain) Stop() {
log.Error("Failed to journal state snapshot", "err", err)
}
}
-
- // Ensure the state of a recent block is also stored to disk before exiting.
- // We're writing three different states to catch different restart scenarios:
- // - HEAD: So we don't need to reprocess any blocks in the general case
- // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
- // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
- if !bc.cacheConfig.TrieDirtyDisabled {
- triedb := bc.triedb
-
- for _, offset := range []uint64{0, 1, uint64(bc.cacheConfig.TriesInMemory) - 1} {
- if number := bc.CurrentBlock().NumberU64(); number > offset {
- recent := bc.GetBlockByNumber(number - offset)
-
- log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
- if err := triedb.Commit(recent.Root(), true); err != nil {
+ if bc.triedb.Scheme() == rawdb.PathScheme {
+ // Ensure that the in-memory trie nodes are journaled to disk properly.
+ if err := bc.triedb.Journal(bc.CurrentBlock().Root()); err != nil {
+ log.Info("Failed to journal in-memory trie nodes", "err", err)
+ }
+ } else {
+ // Ensure the state of a recent block is also stored to disk before exiting.
+ // We're writing three different states to catch different restart scenarios:
+ // - HEAD: So we don't need to reprocess any blocks in the general case
+ // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
+ // - HEAD-127: So we have a hard limit on the number of blocks reexecuted
+ if !bc.cacheConfig.TrieDirtyDisabled {
+ triedb := bc.triedb
+
+ for _, offset := range []uint64{0, 1, uint64(bc.cacheConfig.TriesInMemory) - 1} {
+ if number := bc.CurrentBlock().NumberU64(); number > offset {
+ recent := bc.GetBlockByNumber(number - offset)
+
+ log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
+ if err := triedb.Commit(recent.Root(), true); err != nil {
+ log.Error("Failed to commit recent state trie", "err", err)
+ }
+ }
+ }
+ if snapBase != (common.Hash{}) {
+ log.Info("Writing snapshot state to disk", "root", snapBase)
+ if err := triedb.Commit(snapBase, true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
}
}
- }
- if snapBase != (common.Hash{}) {
- log.Info("Writing snapshot state to disk", "root", snapBase)
- if err := triedb.Commit(snapBase, true); err != nil {
- log.Error("Failed to commit recent state trie", "err", err)
+ for !bc.triegc.Empty() {
+ triedb.Dereference(bc.triegc.PopItem().(common.Hash))
+ }
+ if size, _ := triedb.Size(); size != 0 {
+ log.Error("Dangling trie nodes after full cleanup")
}
- }
- for !bc.triegc.Empty() {
- triedb.Dereference(bc.triegc.PopItem().(common.Hash))
- }
- if size, _ := triedb.Size(); size != 0 {
- log.Error("Dangling trie nodes after full cleanup")
}
}
+ // Flush the collected preimages to disk
+ if err := bc.triedb.Close(); err != nil {
+ log.Error("Failed to close trie db", "err", err)
+ }
log.Info("Blockchain stopped")
}
@@ -1558,62 +1616,65 @@ func (bc *BlockChain) writeBlockWithState(
if err != nil {
return NonStatTy, err
}
-
- // If we're running an archive node, always flush
- if bc.cacheConfig.TrieDirtyDisabled {
- if err := bc.triedb.Commit(root, false); err != nil {
- return NonStatTy, err
- }
- } else {
- // Full but not archive node, do proper garbage collection
- bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
- bc.triegc.Push(root, -int64(block.NumberU64()))
-
- triesInMemory := uint64(bc.cacheConfig.TriesInMemory)
- if current := block.NumberU64(); current > triesInMemory {
- // If we exceeded our memory allowance, flush matured singleton nodes to disk
- var (
- nodes, imgs = bc.triedb.Size()
- limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
- )
- if nodes > limit || imgs > 4*1024*1024 {
- bc.triedb.Cap(limit - ethdb.IdealBatchSize)
- }
- // Find the next state trie we need to commit
- chosen := current - triesInMemory
-
- // If we exceeded out time allowance, flush an entire trie to disk
- if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
- // If the header is missing (canonical chain behind), we're reorging a low
- // diff sidechain. Suspend committing until this operation is completed.
- header := bc.GetHeaderByNumber(chosen)
- if header == nil {
- log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
- } else {
- // If we're exceeding limits but haven't reached a large enough memory gap,
- // warn the user that the system is becoming unstable.
- if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
- log.Info(
- "State in memory for too long, committing",
- "time", bc.gcproc,
- "allowance", bc.cacheConfig.TrieTimeLimit,
- "optimum", float64(chosen-lastWrite)/float64(triesInMemory),
- )
+ // If node is running in path mode, skip explicit gc operation
+ // which is unnecessary in this mode.
+ if bc.triedb.Scheme() != rawdb.PathScheme {
+ // If we're running an archive node, always flush
+ if bc.cacheConfig.TrieDirtyDisabled {
+ if err := bc.triedb.Commit(root, false); err != nil {
+ return NonStatTy, err
+ }
+ } else {
+ // Full but not archive node, do proper garbage collection
+ bc.triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive
+ bc.triegc.Push(root, -int64(block.NumberU64()))
+
+ triesInMemory := uint64(bc.cacheConfig.TriesInMemory)
+ if current := block.NumberU64(); current > triesInMemory {
+ // If we exceeded our memory allowance, flush matured singleton nodes to disk
+ var (
+ nodes, imgs = bc.triedb.Size()
+ limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
+ )
+ if nodes > limit || imgs > 4*1024*1024 {
+ bc.triedb.Cap(limit - ethdb.IdealBatchSize)
+ }
+ // Find the next state trie we need to commit
+ chosen := current - triesInMemory
+
+ // If we exceeded out time allowance, flush an entire trie to disk
+ if bc.gcproc > bc.cacheConfig.TrieTimeLimit {
+ // If the header is missing (canonical chain behind), we're reorging a low
+ // diff sidechain. Suspend committing until this operation is completed.
+ header := bc.GetHeaderByNumber(chosen)
+ if header == nil {
+ log.Warn("Reorg in progress, trie commit postponed", "number", chosen)
+ } else {
+ // If we're exceeding limits but haven't reached a large enough memory gap,
+ // warn the user that the system is becoming unstable.
+ if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit {
+ log.Info(
+ "State in memory for too long, committing",
+ "time", bc.gcproc,
+ "allowance", bc.cacheConfig.TrieTimeLimit,
+ "optimum", float64(chosen-lastWrite)/float64(triesInMemory),
+ )
+ }
+ // Flush an entire trie and restart the counters
+ bc.triedb.Commit(header.Root, true)
+ lastWrite = chosen
+ bc.gcproc = 0
}
- // Flush an entire trie and restart the counters
- bc.triedb.Commit(header.Root, true)
- lastWrite = chosen
- bc.gcproc = 0
}
- }
- // Garbage collect anything below our required write retention
- for !bc.triegc.Empty() {
- root, number := bc.triegc.Pop()
- if uint64(-number) > chosen {
- bc.triegc.Push(root, number)
- break
+ // Garbage collect anything below our required write retention
+ for !bc.triegc.Empty() {
+ root, number := bc.triegc.Pop()
+ if uint64(-number) > chosen {
+ bc.triegc.Push(root, number)
+ break
+ }
+ bc.triedb.Dereference(root.(common.Hash))
}
- bc.triedb.Dereference(root.(common.Hash))
}
}
}
@@ -2164,6 +2225,13 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, si
)
parent := it.previous()
for parent != nil && !bc.HasState(parent.Root) {
+ // If this is in pathdb and the state is in the history, recover it
+ if bc.stateRecoverable(parent.Root) {
+ if err := bc.triedb.Recover(parent.Root); err != nil {
+ return 0, err
+ }
+ break
+ }
hashes = append(hashes, parent.Hash())
numbers = append(numbers, parent.Number.Uint64())
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index f8371afa2d..f9e7621d12 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -320,10 +320,16 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
return bc.HasState(block.Root())
}
-// TrieNode retrieves a blob of data associated with a trie node
-// either from ephemeral in-memory cache, or from persistent storage.
-func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
- return bc.stateCache.TrieDB().Node(hash)
+// stateRecoverable checks if the specified state is recoverable.
+// Note, this function assumes the state is not present, because
+// state is not treated as recoverable if it's available, thus
+// false will be returned in this case.
+func (bc *BlockChain) stateRecoverable(root common.Hash) bool {
+ if bc.triedb.Scheme() == rawdb.HashScheme {
+ return false
+ }
+ result, _ := bc.triedb.Recoverable(root)
+ return result
}
// TrieDB retrieves the low level trie database used for data storage.
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index 81503ad132..6d28624c03 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -24,6 +24,7 @@ import (
"io/ioutil"
"math/big"
"os"
+ "path"
"testing"
"time"
@@ -33,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// Tests a recovery for a short canonical chain where a recent block was already
@@ -1751,12 +1753,18 @@ func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) {
}
func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
+ testRepairWithScheme(t, tt, snapshots, rawdb.PathScheme)
+ testRepairWithScheme(t, tt, snapshots, rawdb.HashScheme)
+}
+
+func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) {
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump(true))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
+ ancient := path.Join(datadir, "ancient")
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
@@ -1764,7 +1772,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
- AncientsDirectory: datadir,
+ AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
@@ -1775,15 +1783,18 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
// Initialize a fresh chain
var (
gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0, // Disable snapshot by default
+ StateScheme: scheme,
}
)
+ triedb.Close()
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
@@ -1810,7 +1821,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
+ chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), true)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
@@ -1833,11 +1844,12 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
// Pull the plug on the database, simulating a hard crash
db.Close()
+ chain.triedb.Close()
// Start a new blockchain back up and see where the repair leads us
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
- AncientsDirectory: datadir,
+ AncientsDirectory: ancient,
Ephemeral: true,
})
@@ -1846,7 +1858,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close()
- chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -1889,11 +1901,17 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
// In this case the snapshot layer of B3 is not created because of existent
// state.
func TestIssue23496(t *testing.T) {
+ //testIssue23496(t, rawdb.HashScheme)
+ testIssue23496(t, rawdb.PathScheme)
+}
+
+func testIssue23496(t *testing.T, scheme string) {
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
+ ancient := path.Join(datadir, "ancient")
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
@@ -1901,7 +1919,7 @@ func TestIssue23496(t *testing.T) {
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
- AncientsDirectory: datadir,
+ AncientsDirectory: ancient,
})
if err != nil {
@@ -1912,7 +1930,8 @@ func TestIssue23496(t *testing.T) {
// Initialize a fresh chain
var (
gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
@@ -1920,8 +1939,10 @@ func TestIssue23496(t *testing.T) {
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
+ StateScheme: scheme,
}
)
+ triedb.Close()
chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
@@ -1935,7 +1956,7 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[:1], nil); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[0].Root(), true)
+ chain.triedb.Commit(blocks[0].Root(), true)
// Insert block B2 and commit the snapshot into disk
if _, err := chain.InsertChain(blocks[1:2], nil); err != nil {
@@ -1949,20 +1970,20 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[2:3], nil); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[2].Root(), true)
+ chain.triedb.Commit(blocks[2].Root(), true)
// Insert the remaining blocks
if _, err := chain.InsertChain(blocks[3:], nil); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
-
// Pull the plug on the database, simulating a hard crash
db.Close()
+ chain.triedb.Close()
// Start a new blockchain back up and see where the repair leads us
db, err = rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
- AncientsDirectory: datadir,
+ AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
@@ -1970,7 +1991,7 @@ func TestIssue23496(t *testing.T) {
}
defer db.Close()
- chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err = NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -1982,8 +2003,12 @@ func TestIssue23496(t *testing.T) {
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
- if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) {
- t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1))
+ expHead := uint64(1)
+ if scheme == rawdb.PathScheme {
+ expHead = uint64(2)
+ }
+ if head := chain.CurrentBlock(); head.NumberU64() != expHead {
+ t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), expHead)
}
// Reinsert B2-B4
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 3a66101c1c..8d48e3134e 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -24,6 +24,7 @@ import (
"io/ioutil"
"math/big"
"os"
+ "path"
"strings"
"testing"
"time"
@@ -31,9 +32,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
// rewindTest is a test case for chain rollback upon user request.
@@ -1950,12 +1955,18 @@ func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) {
}
func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
+ testSetHeadWithScheme(t, tt, snapshots, rawdb.PathScheme)
+ testSetHeadWithScheme(t, tt, snapshots, rawdb.HashScheme)
+}
+
+func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump(false))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
+ ancient := path.Join(datadir, "ancient")
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
@@ -1963,7 +1974,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
- AncientsDirectory: datadir,
+ AncientsDirectory: ancient,
Ephemeral: true,
})
if err != nil {
@@ -1974,15 +1985,18 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
// Initialize a fresh chain
var (
gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, newDbConfig(scheme))
+ genesis = gspec.MustCommit(db, triedb)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0, // Disable snapshot
+ StateScheme: scheme,
}
)
+ triedb.Close()
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
@@ -2009,7 +2023,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
+ chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), true)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
@@ -2019,13 +2033,17 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
if _, err := chain.InsertChain(canonblocks[tt.commitBlock:], nil); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
- // Manually dereference anything not committed to not have to work with 128+ tries
- for _, block := range sideblocks {
- chain.stateCache.TrieDB().Dereference(block.Root())
- }
- for _, block := range canonblocks {
- chain.stateCache.TrieDB().Dereference(block.Root())
+ // Reopen the trie database without persisting in-memory dirty nodes.
+ chain.triedb.Close()
+ dbconfig := &trie.Config{}
+ if scheme == rawdb.PathScheme {
+ dbconfig.PathDB = pathdb.Defaults
+
+ } else {
+ dbconfig.HashDB = hashdb.Defaults
}
+ chain.triedb = trie.NewDatabase(chain.db, dbconfig)
+ chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
// Force run a freeze cycle
type freezer interface {
Freeze(threshold uint64) error
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index c4e1229648..d9f54e4a1f 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -22,9 +22,9 @@ package core
import (
"bytes"
"fmt"
- "io/ioutil"
"math/big"
"os"
+ "path"
"strings"
"testing"
"time"
@@ -40,6 +40,7 @@ import (
// snapshotTestBasic wraps the common testing fields in the snapshot tests.
type snapshotTestBasic struct {
+ scheme string // Disk scheme used for storing trie nodes
chainBlocks int // Number of blocks to generate for the canonical chain
snapshotBlock uint64 // Block number of the relevant snapshot disk layer
commitBlock uint64 // Block number for which to commit the state to disk
@@ -52,44 +53,38 @@ type snapshotTestBasic struct {
// share fields, set in runtime
datadir string
+ ancient string
db ethdb.Database
- gendb ethdb.Database
+ genDb ethdb.Database
engine consensus.Engine
+ gspec *Genesis
}
func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
// Create a temporary persistent database
- datadir, err := ioutil.TempDir("", "")
- if err != nil {
- t.Fatalf("Failed to create temporary datadir: %v", err)
- }
- os.RemoveAll(datadir)
+ datadir := t.TempDir()
+ ancient := path.Join(datadir, "ancient")
db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir,
- AncientsDirectory: datadir,
- Ephemeral: true,
+ AncientsDirectory: ancient,
})
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
// Initialize a fresh chain
var (
- gspec = &Genesis{Config: params.AllEthashProtocolChanges, BaseFee: big.NewInt(params.InitialBaseFee)}
- genesis = gspec.MustCommit(db)
- engine = ethash.NewFullFaker()
- gendb = rawdb.NewMemoryDatabase()
-
- // Snapshot is enabled, the first snapshot is created from the Genesis.
- // The snapshot memory allowance is 256MB, it means no snapshot flush
- // will happen during the block insertion.
- cacheConfig = defaultCacheConfig
+ gspec = &Genesis{
+ BaseFee: big.NewInt(params.InitialBaseFee),
+ Config: params.AllEthashProtocolChanges,
+ }
+ engine = ethash.NewFullFaker()
)
- chain, err := NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
- blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *BlockGen) {}, true)
+ genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, basic.chainBlocks, func(i int, b *BlockGen) {})
// Insert the blocks with configured settings.
var breakpoints []uint64
@@ -106,7 +101,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
startPoint = point
if basic.commitBlock > 0 && basic.commitBlock == point {
- chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true)
+ chain.TrieDB().Commit(blocks[point-1].Root(), false)
}
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
// Flushing the entire snap tree into the disk, the
@@ -125,9 +120,11 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
// Set runtime fields
basic.datadir = datadir
+ basic.ancient = ancient
basic.db = db
- basic.gendb = gendb
+ basic.genDb = genDb
basic.engine = engine
+ basic.gspec = gspec
return chain, blocks
}
@@ -139,17 +136,17 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
}
- if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock {
- t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock)
+ if head := chain.CurrentFastBlock(); head.Number().Uint64() != basic.expHeadFastBlock {
+ t.Errorf("Head fast block mismatch: have %d, want %d", head.Number(), basic.expHeadFastBlock)
}
- if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock {
- t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock)
+ if head := chain.CurrentBlock(); head.Number().Uint64() != basic.expHeadBlock {
+ t.Errorf("Head block mismatch: have %d, want %d", head.Number(), basic.expHeadBlock)
}
// Check the disk layer, ensure they are matched
block := chain.GetBlockByNumber(basic.expSnapshotBottom)
if block == nil {
- t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
+ t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
}
@@ -160,6 +157,7 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [
}
}
+//nolint:unused
func (basic *snapshotTestBasic) dump() string {
buffer := new(strings.Builder)
@@ -210,8 +208,9 @@ func (basic *snapshotTestBasic) dump() string {
func (basic *snapshotTestBasic) teardown() {
basic.db.Close()
- basic.gendb.Close()
+ basic.genDb.Close()
os.RemoveAll(basic.datadir)
+ os.RemoveAll(basic.ancient)
}
// snapshotTest is a test case type for normal snapshot recovery.
@@ -228,8 +227,7 @@ func (snaptest *snapshotTest) test(t *testing.T) {
// Restart the chain normally
chain.Stop()
- gspec := &Genesis{Config: params.AllEthashProtocolChanges}
- newchain, err := NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -238,7 +236,7 @@ func (snaptest *snapshotTest) test(t *testing.T) {
snaptest.verify(t, newchain, blocks)
}
-// crashSnapshotTest is a test case type for innormal snapshot recovery.
+// crashSnapshotTest is a test case type for irregular snapshot recovery.
// It can be used for testing that restart Geth after the crash.
type crashSnapshotTest struct {
snapshotTestBasic
@@ -253,14 +251,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
// Pull the plug on the database, simulating a hard crash
db := chain.db
db.Close()
+ chain.triedb.Close()
// Start a new blockchain back up and see where the repair leads us
newdb, err := rawdb.Open(rawdb.OpenOptions{
Directory: snaptest.datadir,
- AncientsDirectory: snaptest.datadir,
- Ephemeral: true,
+ AncientsDirectory: snaptest.ancient,
})
-
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
}
@@ -270,14 +267,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) {
// the crash, we do restart twice here: one after the crash and one
// after the normal stop. It's used to ensure the broken snapshot
// can be detected all the time.
- gspec := &Genesis{Config: params.AllEthashProtocolChanges}
- newchain, err := NewBlockChain(newdb, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newchain.Stop()
- newchain, err = NewBlockChain(newdb, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -304,7 +300,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
// Insert blocks without enabling snapshot if gapping is required.
chain.Stop()
- gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *BlockGen) {}, true)
+ gappedBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {}, false)
// Insert a few more blocks without enabling snapshot
var cacheConfig = &CacheConfig{
@@ -312,9 +308,9 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
+ StateScheme: snaptest.scheme,
}
- gspec := &Genesis{Config: params.AllEthashProtocolChanges}
- newchain, err := NewBlockChain(snaptest.db, cacheConfig, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -322,7 +318,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) {
newchain.Stop()
// Restart the chain with enabling the snapshot
- newchain, err = NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -349,56 +345,8 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
// Rewind the chain if setHead operation is required.
chain.SetHead(snaptest.setHead)
chain.Stop()
- gspec := &Genesis{Config: params.AllEthashProtocolChanges}
- newchain, err := NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- defer newchain.Stop()
-
- snaptest.verify(t, newchain, blocks)
-}
-// restartCrashSnapshotTest is the test type used to test this scenario:
-// - have a complete snapshot
-// - restart chain
-// - insert more blocks with enabling the snapshot
-// - commit the snapshot
-// - crash
-// - restart again
-type restartCrashSnapshotTest struct {
- snapshotTestBasic
- newBlocks int
-}
-
-func (snaptest *restartCrashSnapshotTest) test(t *testing.T) {
- // It's hard to follow the test case, visualize the input
- // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
- // fmt.Println(tt.dump())
- chain, blocks := snaptest.prepare(t)
-
- // Firstly, stop the chain properly, with all snapshot journal
- // and state committed.
- chain.Stop()
- gspec := &Genesis{Config: params.AllEthashProtocolChanges}
- newchain, err := NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
- if err != nil {
- t.Fatalf("Failed to recreate chain: %v", err)
- }
- newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {}, true)
- newchain.InsertChain(newBlocks, nil)
-
- // Commit the entire snapshot into the disk if requested. Note only
- // (a) snapshot root and (b) snapshot generator will be committed,
- // the diff journal is not.
- newchain.Snapshots().Cap(newBlocks[len(newBlocks)-1].Root(), 0)
-
- // Simulate the blockchain crash
- // Don't call chain.Stop here, so that no snapshot
- // journal and latest state will be committed
-
- // Restart the chain after the crash
- newchain, err = NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
@@ -432,35 +380,39 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
+ StateScheme: snaptest.scheme,
}
- gspec := &Genesis{Config: params.AllEthashProtocolChanges}
- newchain, err := NewBlockChain(snaptest.db, config, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
- newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {}, true)
+ newBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {}, false)
newchain.InsertChain(newBlocks, nil)
newchain.Stop()
- // Restart the chain, the wiper should starts working
+ // Restart the chain, the wiper should start working
config = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: false, // Don't wait rebuild
+ StateScheme: snaptest.scheme,
}
- newchain, err = NewBlockChain(snaptest.db, config, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
+
// Simulate the blockchain crash.
+ tmp.triedb.Close()
- newchain, err = NewBlockChain(snaptest.db, nil, gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
+ newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
snaptest.verify(t, newchain, blocks)
+ newchain.Stop()
}
// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
@@ -484,20 +436,23 @@ func TestRestartWithNewSnapshot(t *testing.T) {
// Expected head fast block: C8
// Expected head block : C8
// Expected snapshot disk : G
- test := &snapshotTest{
- snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 0,
- commitBlock: 0,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 8,
- expSnapshotBottom: 0, // Initial disk layer built from genesis
- },
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ test := &snapshotTest{
+ snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 8,
+ expSnapshotBottom: 0, // Initial disk layer built from genesis
+ },
+ }
+ test.test(t)
+ test.teardown()
+ }
}
// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
@@ -523,20 +478,23 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
// Expected head fast block: C8
// Expected head block : G
// Expected snapshot disk : C4
- test := &crashSnapshotTest{
- snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 0,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- expSnapshotBottom: 4, // Last committed disk layer, wait recovery
- },
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 0,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ expSnapshotBottom: 4, // Last committed disk layer, wait recovery
+ },
+ }
+ test.test(t)
+ test.teardown()
+ }
}
// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
@@ -562,20 +520,23 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
// Expected head fast block: C8
// Expected head block : C2
// Expected snapshot disk : C4
- test := &crashSnapshotTest{
- snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 2,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 2,
- expSnapshotBottom: 4, // Last committed disk layer, wait recovery
- },
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 2,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 2,
+ expSnapshotBottom: 4, // Last committed disk layer, wait recovery
+ },
+ }
+ test.test(t)
+ test.teardown()
+ }
}
// Tests a Geth was crashed and restarts with a broken snapshot. In this case
@@ -601,20 +562,27 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
// Expected head fast block: C8
// Expected head block : G
// Expected snapshot disk : C4
- test := &crashSnapshotTest{
- snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 6,
- expCanonicalBlocks: 8,
- expHeadHeader: 8,
- expHeadFastBlock: 8,
- expHeadBlock: 0,
- expSnapshotBottom: 4, // Last committed disk layer, wait recovery
- },
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ expHead := uint64(0)
+ if scheme == rawdb.PathScheme {
+ expHead = uint64(4)
+ }
+ test := &crashSnapshotTest{
+ snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 6,
+ expCanonicalBlocks: 8,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: expHead,
+ expSnapshotBottom: 4, // Last committed disk layer, wait recovery
+ },
+ }
+ test.test(t)
+ test.teardown()
+ }
}
// Tests a Geth was running with snapshot enabled. Then restarts without
@@ -638,21 +606,24 @@ func TestGappedNewSnapshot(t *testing.T) {
// Expected head fast block: C10
// Expected head block : C10
// Expected snapshot disk : C10
- test := &gappedSnapshotTest{
- snapshotTestBasic: snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 0,
- commitBlock: 0,
- expCanonicalBlocks: 10,
- expHeadHeader: 10,
- expHeadFastBlock: 10,
- expHeadBlock: 10,
- expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
- },
- gapped: 2,
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ test := &gappedSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 10,
+ expHeadHeader: 10,
+ expHeadFastBlock: 10,
+ expHeadBlock: 10,
+ expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
+ },
+ gapped: 2,
+ }
+ test.test(t)
+ test.teardown()
+ }
}
// Tests the Geth was running with snapshot enabled and resetHead is applied.
@@ -676,21 +647,24 @@ func TestSetHeadWithNewSnapshot(t *testing.T) {
// Expected head fast block: C4
// Expected head block : C4
// Expected snapshot disk : G
- test := &setHeadSnapshotTest{
- snapshotTestBasic: snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 0,
- commitBlock: 0,
- expCanonicalBlocks: 4,
- expHeadHeader: 4,
- expHeadFastBlock: 4,
- expHeadBlock: 4,
- expSnapshotBottom: 0, // The initial disk layer is built from the genesis
- },
- setHead: 4,
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ test := &setHeadSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 0,
+ commitBlock: 0,
+ expCanonicalBlocks: 4,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ expSnapshotBottom: 0, // The initial disk layer is built from the genesis
+ },
+ setHead: 4,
+ }
+ test.test(t)
+ test.teardown()
+ }
}
// Tests the Geth was running with a complete snapshot and then imports a few
@@ -714,19 +688,22 @@ func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
// Expected head fast block: C10
// Expected head block : C8
// Expected snapshot disk : C10
- test := &wipeCrashSnapshotTest{
- snapshotTestBasic: snapshotTestBasic{
- chainBlocks: 8,
- snapshotBlock: 4,
- commitBlock: 0,
- expCanonicalBlocks: 10,
- expHeadHeader: 10,
- expHeadFastBlock: 10,
- expHeadBlock: 10,
- expSnapshotBottom: 10,
- },
- newBlocks: 2,
- }
- test.test(t)
- test.teardown()
+ for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} {
+ test := &wipeCrashSnapshotTest{
+ snapshotTestBasic: snapshotTestBasic{
+ scheme: scheme,
+ chainBlocks: 8,
+ snapshotBlock: 4,
+ commitBlock: 0,
+ expCanonicalBlocks: 10,
+ expHeadHeader: 10,
+ expHeadFastBlock: 10,
+ expHeadBlock: 10,
+ expSnapshotBottom: 10,
+ },
+ newBlocks: 2,
+ }
+ test.test(t)
+ test.teardown()
+ }
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index cdf4a8c956..c24c700593 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -57,15 +57,15 @@ var (
// newCanonical creates a chain database, and injects a deterministic canonical
// chain. Depending on the full flag, if creates either a full block chain or a
// header only chain.
-func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) {
+func newCanonical(engine consensus.Engine, n int, full bool, scheme string) (ethdb.Database, *BlockChain, error) {
var (
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
)
-
// Initialize a fresh chain with only a genesis block
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@@ -87,9 +87,9 @@ func newGwei(n int64) *big.Int {
}
// Test fork of length N starting from block i
-func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
+func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) {
// Copy old chain up to #i into a new db
- db, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
+ db, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme)
if err != nil {
t.Fatal("could not make new canonical in testFork", err)
}
@@ -204,7 +204,12 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
}
func TestLastBlock(t *testing.T) {
- _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
+ testLastBlock(t, rawdb.HashScheme)
+ testLastBlock(t, rawdb.PathScheme)
+}
+
+func testLastBlock(t *testing.T, scheme string) {
+ _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
@@ -221,14 +226,19 @@ func TestLastBlock(t *testing.T) {
// Tests that given a starting canonical chain of a given size, it can be extended
// with various length chains.
-func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
-func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) }
-
-func testExtendCanonical(t *testing.T, full bool) {
+func TestExtendCanonicalHeaders(t *testing.T) {
+ testExtendCanonical(t, false, rawdb.HashScheme)
+ testExtendCanonical(t, false, rawdb.PathScheme)
+}
+func TestExtendCanonicalBlocks(t *testing.T) {
+ testExtendCanonical(t, true, rawdb.HashScheme)
+ testExtendCanonical(t, true, rawdb.PathScheme)
+}
+func testExtendCanonical(t *testing.T, full bool, scheme string) {
length := 5
// Make first chain starting from genesis
- _, processor, err := newCanonical(ethash.NewFaker(), length, full)
+ _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
@@ -241,22 +251,28 @@ func testExtendCanonical(t *testing.T, full bool) {
}
}
// Start fork from current height
- testFork(t, processor, length, 1, full, better)
- testFork(t, processor, length, 2, full, better)
- testFork(t, processor, length, 5, full, better)
- testFork(t, processor, length, 10, full, better)
+ testFork(t, processor, length, 1, full, better, scheme)
+ testFork(t, processor, length, 2, full, better, scheme)
+ testFork(t, processor, length, 5, full, better, scheme)
+ testFork(t, processor, length, 10, full, better, scheme)
}
// Tests that given a starting canonical chain of a given size, creating shorter
// forks do not take canonical ownership.
-func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
-func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) }
+func TestShorterForkHeaders(t *testing.T) {
+ testShorterFork(t, false, rawdb.HashScheme)
+ testShorterFork(t, false, rawdb.PathScheme)
+}
+func TestShorterForkBlocks(t *testing.T) {
+ testShorterFork(t, true, rawdb.HashScheme)
+ testShorterFork(t, true, rawdb.PathScheme)
+}
-func testShorterFork(t *testing.T, full bool) {
+func testShorterFork(t *testing.T, full bool, scheme string) {
length := 10
// Make first chain starting from genesis
- _, processor, err := newCanonical(ethash.NewFaker(), length, full)
+ _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
@@ -269,24 +285,30 @@ func testShorterFork(t *testing.T, full bool) {
}
}
// Sum of numbers must be less than `length` for this to be a shorter fork
- testFork(t, processor, 0, 3, full, worse)
- testFork(t, processor, 0, 7, full, worse)
- testFork(t, processor, 1, 1, full, worse)
- testFork(t, processor, 1, 7, full, worse)
- testFork(t, processor, 5, 3, full, worse)
- testFork(t, processor, 5, 4, full, worse)
+ testFork(t, processor, 0, 3, full, worse, scheme)
+ testFork(t, processor, 0, 7, full, worse, scheme)
+ testFork(t, processor, 1, 1, full, worse, scheme)
+ testFork(t, processor, 1, 7, full, worse, scheme)
+ testFork(t, processor, 5, 3, full, worse, scheme)
+ testFork(t, processor, 5, 4, full, worse, scheme)
}
// Tests that given a starting canonical chain of a given size, creating longer
// forks do take canonical ownership.
-func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
-func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) }
+func TestLongerForkHeaders(t *testing.T) {
+ testLongerFork(t, false, rawdb.HashScheme)
+ testLongerFork(t, false, rawdb.PathScheme)
+}
+func TestLongerForkBlocks(t *testing.T) {
+ testLongerFork(t, true, rawdb.HashScheme)
+ testLongerFork(t, true, rawdb.PathScheme)
+}
-func testLongerFork(t *testing.T, full bool) {
+func testLongerFork(t *testing.T, full bool, scheme string) {
length := 10
// Make first chain starting from genesis
- _, processor, err := newCanonical(ethash.NewFaker(), length, full)
+ _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
@@ -299,24 +321,30 @@ func testLongerFork(t *testing.T, full bool) {
}
}
// Sum of numbers must be greater than `length` for this to be a longer fork
- testFork(t, processor, 0, 11, full, better)
- testFork(t, processor, 0, 15, full, better)
- testFork(t, processor, 1, 10, full, better)
- testFork(t, processor, 1, 12, full, better)
- testFork(t, processor, 5, 6, full, better)
- testFork(t, processor, 5, 8, full, better)
+ testFork(t, processor, 0, 11, full, better, scheme)
+ testFork(t, processor, 0, 15, full, better, scheme)
+ testFork(t, processor, 1, 10, full, better, scheme)
+ testFork(t, processor, 1, 12, full, better, scheme)
+ testFork(t, processor, 5, 6, full, better, scheme)
+ testFork(t, processor, 5, 8, full, better, scheme)
}
// Tests that given a starting canonical chain of a given size, creating equal
// forks do take canonical ownership.
-func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
-func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) }
+func TestEqualForkHeaders(t *testing.T) {
+ testEqualFork(t, false, rawdb.HashScheme)
+ testEqualFork(t, false, rawdb.PathScheme)
+}
+func TestEqualForkBlocks(t *testing.T) {
+ testEqualFork(t, true, rawdb.HashScheme)
+ testEqualFork(t, true, rawdb.PathScheme)
+}
-func testEqualFork(t *testing.T, full bool) {
+func testEqualFork(t *testing.T, full bool, scheme string) {
length := 10
// Make first chain starting from genesis
- _, processor, err := newCanonical(ethash.NewFaker(), length, full)
+ _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
@@ -329,21 +357,27 @@ func testEqualFork(t *testing.T, full bool) {
}
}
// Sum of numbers must be equal to `length` for this to be an equal fork
- testFork(t, processor, 0, 10, full, equal)
- testFork(t, processor, 1, 9, full, equal)
- testFork(t, processor, 2, 8, full, equal)
- testFork(t, processor, 5, 5, full, equal)
- testFork(t, processor, 6, 4, full, equal)
- testFork(t, processor, 9, 1, full, equal)
+ testFork(t, processor, 0, 10, full, equal, scheme)
+ testFork(t, processor, 1, 9, full, equal, scheme)
+ testFork(t, processor, 2, 8, full, equal, scheme)
+ testFork(t, processor, 5, 5, full, equal, scheme)
+ testFork(t, processor, 6, 4, full, equal, scheme)
+ testFork(t, processor, 9, 1, full, equal, scheme)
}
// Tests that chains missing links do not get accepted by the processor.
-func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
-func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
+func TestBrokenHeaderChain(t *testing.T) {
+ testBrokenChain(t, false, rawdb.HashScheme)
+ testBrokenChain(t, false, rawdb.PathScheme)
+}
+func TestBrokenBlockChain(t *testing.T) {
+ testBrokenChain(t, true, rawdb.HashScheme)
+ testBrokenChain(t, true, rawdb.PathScheme)
+}
-func testBrokenChain(t *testing.T, full bool) {
+func testBrokenChain(t *testing.T, full bool, scheme string) {
// Make chain starting from genesis
- db, blockchain, err := newCanonical(ethash.NewFaker(), 10, full)
+ db, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme)
if err != nil {
t.Fatalf("failed to make new canonical chain: %v", err)
}
@@ -365,19 +399,31 @@ func testBrokenChain(t *testing.T, full bool) {
// Tests that reorganising a long difficult chain after a short easy one
// overwrites the canonical numbers and links in the database.
-func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
-func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
+func TestReorgLongHeaders(t *testing.T) {
+ testReorgLong(t, false, rawdb.HashScheme)
+ testReorgLong(t, false, rawdb.PathScheme)
+}
+func TestReorgLongBlocks(t *testing.T) {
+ testReorgLong(t, true, rawdb.HashScheme)
+ testReorgLong(t, true, rawdb.PathScheme)
+}
-func testReorgLong(t *testing.T, full bool) {
- testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
+func testReorgLong(t *testing.T, full bool, scheme string) {
+ testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme)
}
// Tests that reorganising a short difficult chain after a long easy one
// overwrites the canonical numbers and links in the database.
-func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
-func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
+func TestReorgShortHeaders(t *testing.T) {
+ testReorgShort(t, false, rawdb.HashScheme)
+ testReorgShort(t, false, rawdb.PathScheme)
+}
+func TestReorgShortBlocks(t *testing.T) {
+ testReorgShort(t, true, rawdb.HashScheme)
+ testReorgShort(t, true, rawdb.PathScheme)
+}
-func testReorgShort(t *testing.T, full bool) {
+func testReorgShort(t *testing.T, full bool, scheme string) {
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
// we need a fairly long chain of blocks with different difficulties for a short
// one to become heavyer than a long one. The 96 is an empirical value.
@@ -389,12 +435,12 @@ func testReorgShort(t *testing.T, full bool) {
for i := 0; i < len(diff); i++ {
diff[i] = -9
}
- testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
+ testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme)
}
-func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
+func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string) {
// Create a pristine chain and database
- db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+ db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
@@ -462,12 +508,18 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
}
// Tests that the insertion functions detect banned hashes.
-func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
-func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
+func TestBadHeaderHashes(t *testing.T) {
+ testBadHashes(t, false, rawdb.HashScheme)
+ testBadHashes(t, false, rawdb.PathScheme)
+}
+func TestBadBlockHashes(t *testing.T) {
+ testBadHashes(t, true, rawdb.HashScheme)
+ testBadHashes(t, true, rawdb.PathScheme)
+}
-func testBadHashes(t *testing.T, full bool) {
+func testBadHashes(t *testing.T, full bool, scheme string) {
// Create a pristine chain and database
- db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+ db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
@@ -496,12 +548,18 @@ func testBadHashes(t *testing.T, full bool) {
// Tests that bad hashes are detected on boot, and the chain rolled back to a
// good state prior to the bad hash.
-func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
-func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
+func TestReorgBadHeaderHashes(t *testing.T) {
+ testReorgBadHashes(t, false, rawdb.HashScheme)
+ testReorgBadHashes(t, false, rawdb.PathScheme)
+}
+func TestReorgBadBlockHashes(t *testing.T) {
+ testReorgBadHashes(t, true, rawdb.HashScheme)
+ testReorgBadHashes(t, true, rawdb.PathScheme)
+}
-func testReorgBadHashes(t *testing.T, full bool) {
+func testReorgBadHashes(t *testing.T, full bool, scheme string) {
// Create a pristine chain and database
- db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+ db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
@@ -532,7 +590,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
// Create a new BlockChain and check that it rolled back the state.
gspec := &Genesis{Config: blockchain.chainConfig}
- ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -552,13 +610,19 @@ func testReorgBadHashes(t *testing.T, full bool) {
}
// Tests chain insertions in the face of one entity containing an invalid nonce.
-func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
-func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
+func TestHeadersInsertNonceError(t *testing.T) {
+ testInsertNonceError(t, false, rawdb.HashScheme)
+ testInsertNonceError(t, false, rawdb.PathScheme)
+}
+func TestBlocksInsertNonceError(t *testing.T) {
+ testInsertNonceError(t, true, rawdb.HashScheme)
+ testInsertNonceError(t, true, rawdb.PathScheme)
+}
-func testInsertNonceError(t *testing.T, full bool) {
+func testInsertNonceError(t *testing.T, full bool, scheme string) {
for i := 1; i < 25 && !t.Failed(); i++ {
// Create a pristine chain and database
- db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
+ db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
@@ -610,6 +674,11 @@ func testInsertNonceError(t *testing.T, full bool) {
// Tests that fast importing a block chain produces the same chain data as the
// classical full block processing.
func TestFastVsFullChains(t *testing.T) {
+ testFastVsFullChains(t, rawdb.HashScheme)
+ testFastVsFullChains(t, rawdb.PathScheme)
+}
+
+func testFastVsFullChains(t *testing.T, scheme string) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
@@ -621,7 +690,8 @@ func TestFastVsFullChains(t *testing.T) {
Alloc: GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(gendb)
+ triedb = trie.NewDatabase(gendb, nil)
+ genesis = gspec.MustCommit(gendb, triedb)
signer = types.LatestSigner(gspec.Config)
)
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1024, func(i int, block *BlockGen) {
@@ -644,8 +714,8 @@ func TestFastVsFullChains(t *testing.T) {
}, true)
// Import the chain as an archive node for the comparison baseline
archiveDb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ gspec.MustCommit(archiveDb, trie.NewDatabase(archiveDb, newDbConfig(scheme)))
+ archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer archive.Stop()
if n, err := archive.InsertChain(blocks, nil); err != nil {
@@ -653,8 +723,8 @@ func TestFastVsFullChains(t *testing.T) {
}
// Fast import the chain as a non-archive node to test
fastDb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ gspec.MustCommit(fastDb, trie.NewDatabase(fastDb, newDbConfig(scheme)))
+ fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -677,8 +747,9 @@ func TestFastVsFullChains(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ triedb = trie.NewDatabase(ancientDb, nil)
+ gspec.MustCommit(ancientDb, triedb)
+ ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -746,6 +817,11 @@ func TestFastVsFullChains(t *testing.T) {
// Tests that various import methods move the chain head pointers to the correct
// positions.
func TestLightVsFastVsFullChainHeads(t *testing.T) {
+ testLightVsFastVsFullChainHeads(t, rawdb.HashScheme)
+ testLightVsFastVsFullChainHeads(t, rawdb.PathScheme)
+}
+
+func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
@@ -757,7 +833,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
Alloc: GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(gendb)
+ triedb = trie.NewDatabase(gendb, nil)
+ genesis = gspec.MustCommit(gendb, triedb)
)
height := uint64(1024)
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil, true)
@@ -773,7 +850,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
return db, func() { os.RemoveAll(dir) }
}
// Configure a subchain to roll back
@@ -793,12 +870,14 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
}
}
+
// Import the chain as an archive node and ensure all pointers are updated
archiveDb, delfn := makeDb()
defer delfn()
archiveCaching := *defaultCacheConfig
archiveCaching.TrieDirtyDisabled = true
+ archiveCaching.StateScheme = scheme
archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if n, err := archive.InsertChain(blocks, nil); err != nil {
@@ -813,7 +892,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, delfn := makeDb()
defer delfn()
- fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer fast.Stop()
headers := make([]*types.Header, len(blocks))
@@ -833,7 +912,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a ancient-first node and ensure all pointers are updated
ancientDb, delfn := makeDb()
defer delfn()
- ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
@@ -852,7 +931,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a light node and ensure all pointers are updated
lightDb, delfn := makeDb()
defer delfn()
- light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -865,6 +944,11 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Tests that chain reorganisations handle transaction removals and reinsertions.
func TestChainTxReorgs(t *testing.T) {
+ testChainTxReorgs(t, rawdb.HashScheme)
+ testChainTxReorgs(t, rawdb.PathScheme)
+}
+
+func testChainTxReorgs(t *testing.T, scheme string) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
@@ -882,10 +966,10 @@ func TestChainTxReorgs(t *testing.T) {
addr3: {Balance: big.NewInt(1000000000000000)},
},
}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
signer = types.LatestSigner(gspec.Config)
)
-
// Create two transactions shared between the chains:
// - postponed: transaction included at a later block in the forked chain
// - swapped: transaction included at the same block number in the forked chain
@@ -921,7 +1005,7 @@ func TestChainTxReorgs(t *testing.T) {
}
}, true)
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if i, err := blockchain.InsertChain(chain, nil); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -980,6 +1064,11 @@ func TestChainTxReorgs(t *testing.T) {
}
func TestLogReorgs(t *testing.T) {
+ testLogReorgs(t, rawdb.HashScheme)
+ testLogReorgs(t, rawdb.PathScheme)
+}
+
+func testLogReorgs(t *testing.T, scheme string) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
@@ -987,11 +1076,11 @@ func TestLogReorgs(t *testing.T) {
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
signer = types.LatestSigner(gspec.Config)
)
-
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
rmLogsCh := make(chan RemovedLogsEvent)
@@ -1036,15 +1125,20 @@ var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd
// This test checks that log events and RemovedLogsEvent are sent
// when the chain reorganizes.
func TestLogRebirth(t *testing.T) {
+ testLogRebirth(t, rawdb.HashScheme)
+ testLogRebirth(t, rawdb.PathScheme)
+}
+
+func testLogRebirth(t *testing.T, scheme string) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil))
signer = types.LatestSigner(gspec.Config)
engine = ethash.NewFaker()
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -1100,14 +1194,19 @@ func TestLogRebirth(t *testing.T) {
// This test is a variation of TestLogRebirth. It verifies that log events are emitted
// when a side chain containing log events overtakes the canonical chain.
func TestSideLogRebirth(t *testing.T) {
+ testSideLogRebirth(t, rawdb.HashScheme)
+ testSideLogRebirth(t, rawdb.PathScheme)
+}
+
+func testSideLogRebirth(t *testing.T, scheme string) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil))
signer = types.LatestSigner(gspec.Config)
- blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -1170,6 +1269,11 @@ func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan Re
}
func TestReorgSideEvent(t *testing.T) {
+ testReorgSideEvent(t, rawdb.HashScheme)
+ testReorgSideEvent(t, rawdb.PathScheme)
+}
+
+func testReorgSideEvent(t *testing.T, scheme string) {
var (
db = rawdb.NewMemoryDatabase()
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -1178,11 +1282,11 @@ func TestReorgSideEvent(t *testing.T) {
Config: params.TestChainConfig,
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
signer = types.LatestSigner(gspec.Config)
)
-
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {}, true)
@@ -1254,7 +1358,12 @@ done:
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
- _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
+ testCanonicalBlockRetrieval(t, rawdb.HashScheme)
+ testCanonicalBlockRetrieval(t, rawdb.PathScheme)
+}
+
+func testCanonicalBlockRetrieval(t *testing.T, scheme string) {
+ _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
@@ -1300,6 +1409,11 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
}
func TestEIP155Transition(t *testing.T) {
+ testEIP155Transition(t, rawdb.HashScheme)
+ testEIP155Transition(t, rawdb.PathScheme)
+}
+
+func testEIP155Transition(t *testing.T, scheme string) {
// Configure and generate a sample block chain
var (
db = rawdb.NewMemoryDatabase()
@@ -1311,10 +1425,10 @@ func TestEIP155Transition(t *testing.T) {
Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)},
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
)
-
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 4, func(i int, block *BlockGen) {
@@ -1403,6 +1517,11 @@ func TestEIP155Transition(t *testing.T) {
}
func TestEIP161AccountRemoval(t *testing.T) {
+ testEIP161AccountRemoval(t, rawdb.HashScheme)
+ testEIP161AccountRemoval(t, rawdb.PathScheme)
+}
+
+func testEIP161AccountRemoval(t *testing.T, scheme string) {
// Configure and generate a sample block chain
var (
db = rawdb.NewMemoryDatabase()
@@ -1420,9 +1539,10 @@ func TestEIP161AccountRemoval(t *testing.T) {
},
Alloc: GenesisAlloc{address: {Balance: funds}},
}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
)
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 3, func(i int, block *BlockGen) {
@@ -1475,11 +1595,16 @@ func TestEIP161AccountRemoval(t *testing.T) {
//
// https://github.com/ethereum/go-ethereum/pull/15941
func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
+ testBlockchainHeaderchainReorgConsistency(t, rawdb.HashScheme)
+ testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme)
+}
+
+func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) {
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }, true)
// Generate a bunch of fork blocks, each side forking from the canonical chain
@@ -1495,10 +1620,10 @@ func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
// Import the canonical and fork chain side by side, verifying the current block
// and current header consistency
diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, newDbConfig(scheme)))
gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1525,7 +1650,7 @@ func TestTrieForkGC(t *testing.T) {
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }, true)
// Generate a bunch of fork blocks, each side forking from the canonical chain
@@ -1540,7 +1665,7 @@ func TestTrieForkGC(t *testing.T) {
}
// Import the canonical and fork chain side by side, forcing the trie cache to cache both
diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, newDbConfig(rawdb.HashScheme)))
gspec := &Genesis{Config: params.TestChainConfig}
chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
@@ -1557,8 +1682,8 @@ func TestTrieForkGC(t *testing.T) {
}
// Dereference all the recent tries and ensure no past trie is left in
for i := 0; i < DefaultTriesInMemory; i++ {
- chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
- chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
+ chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
+ chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
}
if nodes, _ := chain.TrieDB().Size(); nodes > 0 {
t.Fatalf("stale tries still alive after garbase collection")
@@ -1568,11 +1693,16 @@ func TestTrieForkGC(t *testing.T) {
// Tests that doing large reorgs works even if the state associated with the
// forking point is not available any more.
func TestLargeReorgTrieGC(t *testing.T) {
+ testLargeReorgTrieGC(t, rawdb.HashScheme)
+ testLargeReorgTrieGC(t, rawdb.PathScheme)
+}
+
+func testLargeReorgTrieGC(t *testing.T, scheme string) {
// Generate the original common chain segment and the two competing forks
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
shared, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }, true)
original, _ := GenerateChain(params.TestChainConfig, shared[len(shared)-1], engine, db, 2*DefaultTriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }, true)
@@ -1580,10 +1710,12 @@ func TestLargeReorgTrieGC(t *testing.T) {
// Import the shared chain and the original canonical one
diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, newDbConfig(scheme)))
gspec := &Genesis{Config: params.TestChainConfig}
+ db, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
+ defer db.Close()
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1594,7 +1726,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to insert original chain: %v", err)
}
// Ensure that the state associated with the forking point is pruned away
- if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
+ if chain.HasState(shared[len(shared)-1].Root()) {
t.Fatalf("common-but-old ancestor still cache")
}
// Import the competitor chain without exceeding the canonical's TD and ensure
@@ -1603,7 +1735,7 @@ func TestLargeReorgTrieGC(t *testing.T) {
t.Fatalf("failed to insert competitor chain: %v", err)
}
for i, block := range competitor[:len(competitor)-2] {
- if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
+ if chain.HasState(block.Root()) {
t.Fatalf("competitor %d: low TD chain became processed", i)
}
}
@@ -1612,14 +1744,30 @@ func TestLargeReorgTrieGC(t *testing.T) {
if _, err := chain.InsertChain(competitor[len(competitor)-2:], nil); err != nil {
t.Fatalf("failed to finalize competitor chain: %v", err)
}
- for i, block := range competitor[:len(competitor)-DefaultTriesInMemory] {
- if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
+ // In path-based trie database implementation, it will keep 128 diff + 1 disk
+ // layers, totally 129 latest states available. In hash-based it's 128.
+ states := 128
+ if scheme == rawdb.PathScheme {
+ states = states + 1
+ }
+ for i, block := range competitor[:len(competitor)-states] {
+ if chain.HasState(block.Root()) {
+ t.Fatalf("competitor %d: unexpected competing chain state", i)
+ }
+ }
+ for i, block := range competitor[len(competitor)-states:] {
+ if !chain.HasState(block.Root()) {
t.Fatalf("competitor %d: competing chain state missing", i)
}
}
}
func TestBlockchainRecovery(t *testing.T) {
+ testBlockchainRecovery(t, rawdb.HashScheme)
+ testBlockchainRecovery(t, rawdb.PathScheme)
+}
+
+func testBlockchainRecovery(t *testing.T, scheme string) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
@@ -1627,7 +1775,8 @@ func TestBlockchainRecovery(t *testing.T) {
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
- genesis = gspec.MustCommit(gendb)
+ triedb = trie.NewDatabase(gendb, nil)
+ genesis = gspec.MustCommit(gendb, triedb)
)
height := uint64(1024)
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil, true)
@@ -1643,8 +1792,8 @@ func TestBlockchainRecovery(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(ancientDb)
- ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ gspec.MustCommit(ancientDb, trie.NewDatabase(ancientDb, nil))
+ ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -1664,7 +1813,7 @@ func TestBlockchainRecovery(t *testing.T) {
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
// Reopen broken blockchain again
- ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancient.Stop()
if num := ancient.CurrentBlock().NumberU64(); num != 0 {
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
@@ -1679,8 +1828,13 @@ func TestBlockchainRecovery(t *testing.T) {
// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
func TestInsertReceiptChainRollback(t *testing.T) {
+ testInsertReceiptChainRollback(t, rawdb.HashScheme)
+ testInsertReceiptChainRollback(t, rawdb.PathScheme)
+}
+
+func testInsertReceiptChainRollback(t *testing.T, scheme string) {
// Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
- tmpChain, sideblocks, canonblocks, err := getLongAndShortChains()
+ tmpChain, sideblocks, canonblocks, err := getLongAndShortChains(scheme)
if err != nil {
t.Fatal(err)
}
@@ -1715,8 +1869,8 @@ func TestInsertReceiptChainRollback(t *testing.T) {
t.Fatalf("failed to create temp freezer db: %v", err)
}
gspec := Genesis{Config: params.AllEthashProtocolChanges, BaseFee: big.NewInt(params.InitialBaseFee)}
- gspec.MustCommit(ancientDb)
- ancientChain, _ := NewBlockChain(ancientDb, nil, &gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ gspec.MustCommit(ancientDb, trie.NewDatabase(ancientDb, nil))
+ ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), &gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer ancientChain.Stop()
// Import the canonical header chain.
@@ -1761,10 +1915,15 @@ func TestInsertReceiptChainRollback(t *testing.T) {
// - https://github.com/ethereum/go-ethereum/issues/18977
// - https://github.com/ethereum/go-ethereum/pull/18988
func TestLowDiffLongChain(t *testing.T) {
+ testLowDiffLongChain(t, rawdb.HashScheme)
+ testLowDiffLongChain(t, rawdb.PathScheme)
+}
+
+func testLowDiffLongChain(t *testing.T, scheme string) {
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
// We must use a pretty long chain to ensure that the fork doesn't overtake us
// until after at least 128 blocks post tip
@@ -1774,11 +1933,12 @@ func TestLowDiffLongChain(t *testing.T) {
}, true)
// Import the canonical chain
- diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
+ defer diskdb.Close()
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, newDbConfig(rawdb.HashScheme)))
gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -1819,12 +1979,12 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
// Generate and import the canonical chain
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*DefaultTriesInMemory, nil, true)
diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, newDbConfig(rawdb.HashScheme)))
gspec := &Genesis{Config: params.TestChainConfig}
chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
@@ -1893,15 +2053,24 @@ func TestPrunedImportSide(t *testing.T) {
testSideImport(t, 1, -10)
}
-func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") }
-func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts") }
-func TestInsertKnownBlocks(t *testing.T) { testInsertKnownChainData(t, "blocks") }
+func TestInsertKnownHeaders(t *testing.T) {
+ testInsertKnownChainData(t, "headers", rawdb.HashScheme)
+ testInsertKnownChainData(t, "headers", rawdb.PathScheme)
+}
+func TestInsertKnownReceiptChain(t *testing.T) {
+ testInsertKnownChainData(t, "receipts", rawdb.HashScheme)
+ testInsertKnownChainData(t, "receipts", rawdb.PathScheme)
+}
+func TestInsertKnownBlocks(t *testing.T) {
+ testInsertKnownChainData(t, "blocks", rawdb.HashScheme)
+ testInsertKnownChainData(t, "blocks", rawdb.PathScheme)
+}
-func testInsertKnownChainData(t *testing.T, typ string) {
+func testInsertKnownChainData(t *testing.T, typ string, scheme string) {
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
blocks, receipts := GenerateChain(params.TestChainConfig, genesis, engine, db, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }, true)
// A longer chain but total difficulty is lower.
@@ -1921,11 +2090,11 @@ func testInsertKnownChainData(t *testing.T, typ string) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb, trie.NewDatabase(chaindb, nil))
defer os.RemoveAll(dir)
gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(chaindb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2020,11 +2189,11 @@ func testInsertKnownChainData(t *testing.T, typ string) {
}
// getLongAndShortChains returns two chains: A is longer, B is heavier.
-func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyChain []*types.Block, err error) {
+func getLongAndShortChains(scheme string) (bc *BlockChain, longChain []*types.Block, heavyChain []*types.Block, err error) {
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
// Generate and import the canonical chain,
// Offset the time, to keep the difficulty low
@@ -2032,10 +2201,10 @@ func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyCha
b.SetCoinbase(common.Address{1})
}, true)
diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
}
@@ -2081,7 +2250,12 @@ func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyCha
// 3. Then there should be no canon mapping for the block at height X
// 4. The forked block should still be retrievable by hash
func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
- chain, canonblocks, sideblocks, err := getLongAndShortChains()
+ testReorgToShorterRemovesCanonMapping(t, rawdb.HashScheme)
+ testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme)
+}
+
+func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) {
+ chain, canonblocks, sideblocks, err := getLongAndShortChains(scheme)
if err != nil {
t.Fatal(err)
}
@@ -2117,7 +2291,12 @@ func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
// as TestReorgToShorterRemovesCanonMapping, but applied on headerchain
// imports -- that is, for fast sync
func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
- chain, canonblocks, sideblocks, err := getLongAndShortChains()
+ testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.HashScheme)
+ testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme)
+}
+
+func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme string) {
+ chain, canonblocks, sideblocks, err := getLongAndShortChains(scheme)
if err != nil {
t.Fatal(err)
}
@@ -2169,7 +2348,8 @@ func TestTransactionIndices(t *testing.T) {
Alloc: GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(gendb)
+ triedb = trie.NewDatabase(gendb, nil)
+ genesis = gspec.MustCommit(gendb, triedb)
signer = types.LatestSigner(gspec.Config)
)
height := uint64(128)
@@ -2224,7 +2404,7 @@ func TestTransactionIndices(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(ancientDb)
+ gspec.MustCommit(ancientDb, trie.NewDatabase(ancientDb, nil))
// Import all blocks into ancient db
l := uint64(0)
@@ -2252,7 +2432,7 @@ func TestTransactionIndices(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(ancientDb)
+ gspec.MustCommit(ancientDb, trie.NewDatabase(ancientDb, nil))
chain, err = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
@@ -2272,7 +2452,7 @@ func TestTransactionIndices(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(ancientDb)
+ gspec.MustCommit(ancientDb, trie.NewDatabase(ancientDb, nil))
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
tails := []uint64{0, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */, 69 /* 132 - 64 + 1 */, 0}
@@ -2287,8 +2467,12 @@ func TestTransactionIndices(t *testing.T) {
chain.Stop()
}
}
-
func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
+ testSkipStaleTxIndicesInFastSync(t, rawdb.HashScheme)
+ testSkipStaleTxIndicesInFastSync(t, rawdb.PathScheme)
+}
+
+func testSkipStaleTxIndicesInFastSync(t *testing.T, scheme string) {
// Configure and generate a sample block chain
var (
gendb = rawdb.NewMemoryDatabase()
@@ -2296,7 +2480,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(100000000000000000)
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
- genesis = gspec.MustCommit(gendb)
+ genesis = gspec.MustCommit(gendb, trie.NewDatabase(gendb, nil))
signer = types.LatestSigner(gspec.Config)
)
height := uint64(128)
@@ -2351,11 +2535,14 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) {
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
}
- gspec.MustCommit(ancientDb)
+ triedb := trie.NewDatabase(ancientDb, nil)
+ gspec.MustCommit(ancientDb, triedb)
+
+ defer ancientDb.Close()
// Import all blocks into ancient db, only HEAD-32 indices are kept.
l := uint64(32)
- chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
+ chain, err := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2396,7 +2583,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
// Generate the original common chain segment and the two competing forks
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blockGenerator := func(i int, block *BlockGen) {
block.SetCoinbase(common.Address{1})
@@ -2417,7 +2604,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
for i := 0; i < b.N; i++ {
// Import the shared chain and the original canonical one
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
+ gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, newDbConfig(rawdb.HashScheme)))
chain, err := NewBlockChain(diskdb, nil, &gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
@@ -2492,17 +2679,22 @@ func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
// 2. Downloader starts to sync again
// 3. The blocks fetched are all known and canonical blocks
func TestSideImportPrunedBlocks(t *testing.T) {
+ testSideImportPrunedBlocks(t, rawdb.HashScheme)
+ testSideImportPrunedBlocks(t, rawdb.PathScheme)
+}
+
+func testSideImportPrunedBlocks(t *testing.T, scheme string) {
// Generate a canonical chain to act as the main dataset
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
// Generate and import the canonical chain
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*DefaultTriesInMemory, nil, true)
diskdb := rawdb.NewMemoryDatabase()
- (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb)
+ (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
gspec := &Genesis{Config: params.TestChainConfig}
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2510,14 +2702,20 @@ func TestSideImportPrunedBlocks(t *testing.T) {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
- lastPrunedIndex := len(blocks) - DefaultTriesInMemory - 1
+ // In path-based trie database implementation, it will keep 128 diff + 1 disk
+ // layers, totally 129 latest states available. In hash-based it's 128.
+ states := DefaultTriesInMemory
+ if scheme == rawdb.PathScheme {
+ states = DefaultTriesInMemory + 1
+ }
+ lastPrunedIndex := len(blocks) - states - 1
lastPrunedBlock := blocks[lastPrunedIndex]
// Verify pruning of lastPrunedBlock
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
}
- firstNonPrunedBlock := blocks[len(blocks)-DefaultTriesInMemory]
+ firstNonPrunedBlock := blocks[len(blocks)-states]
// Verify firstNonPrunedBlock is not pruned
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
@@ -2539,6 +2737,11 @@ func TestSideImportPrunedBlocks(t *testing.T) {
// each transaction, so this works ok. The rework accumulated writes in memory
// first, but the journal wiped the entire state object on create-revert.
func TestDeleteCreateRevert(t *testing.T) {
+ testDeleteCreateRevert(t, rawdb.HashScheme)
+ testDeleteCreateRevert(t, rawdb.PathScheme)
+}
+
+func testDeleteCreateRevert(t *testing.T, scheme string) {
var (
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
@@ -2578,7 +2781,8 @@ func TestDeleteCreateRevert(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
)
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 1, func(i int, b *BlockGen) {
@@ -2594,9 +2798,9 @@ func TestDeleteCreateRevert(t *testing.T) {
}, true)
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
+ gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -2613,6 +2817,11 @@ func TestDeleteCreateRevert(t *testing.T) {
// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
// and then the new slots exist
func TestDeleteRecreateSlots(t *testing.T) {
+ testDeleteRecreateSlots(t, rawdb.HashScheme)
+ testDeleteRecreateSlots(t, rawdb.PathScheme)
+}
+
+func testDeleteRecreateSlots(t *testing.T, scheme string) {
var (
// Generate a canonical chain to act as the main dataset
engine = ethash.NewFaker()
@@ -2693,7 +2902,8 @@ func TestDeleteRecreateSlots(t *testing.T) {
},
},
}
- genesis := gspec.MustCommit(db)
+ triedb := trie.NewDatabase(db, nil)
+ genesis := gspec.MustCommit(db, triedb)
blocks, _ := GenerateChain(&chainConfig, genesis, engine, db, 1, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
@@ -2707,9 +2917,9 @@ func TestDeleteRecreateSlots(t *testing.T) {
b.AddTx(tx)
}, true)
// Import the canonical chain
+ db.Close()
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
Debug: true,
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -2742,6 +2952,11 @@ func TestDeleteRecreateSlots(t *testing.T) {
// regular value-transfer
// Expected outcome is that _all_ slots are cleared from A
func TestDeleteRecreateAccount(t *testing.T) {
+ testDeleteRecreateAccount(t, rawdb.HashScheme)
+ testDeleteRecreateAccount(t, rawdb.PathScheme)
+}
+
+func testDeleteRecreateAccount(t *testing.T, scheme string) {
var (
// Generate a canonical chain to act as the main dataset
engine = ethash.NewFaker()
@@ -2775,7 +2990,7 @@ func TestDeleteRecreateAccount(t *testing.T) {
},
},
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
blocks, _ := GenerateChain(&chainConfig, genesis, engine, db, 1, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
@@ -2790,8 +3005,8 @@ func TestDeleteRecreateAccount(t *testing.T) {
}, true)
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
+ gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
Debug: true,
Tracer: logger.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -2820,6 +3035,11 @@ func TestDeleteRecreateAccount(t *testing.T) {
// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
// and then the new slots exist
func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
+ testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.HashScheme)
+ testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.PathScheme)
+}
+
+func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
var (
// Generate a canonical chain to act as the main dataset
engine = ethash.NewFaker()
@@ -2901,7 +3121,9 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
},
},
}
- genesis := gspec.MustCommit(db)
+ triedb := trie.NewDatabase(db, nil)
+ genesis := gspec.MustCommit(db, triedb)
+
var nonce uint64
type expectation struct {
@@ -2965,8 +3187,10 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
}, true)
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
+ triedb = trie.NewDatabase(diskdb, nil)
+ gspec.MustCommit(diskdb, triedb)
+
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -3023,6 +3247,11 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
// We need to either roll back the snapDestructs, or not place it into snapDestructs
// in the first place.
func TestInitThenFailCreateContract(t *testing.T) {
+ testInitThenFailCreateContract(t, rawdb.HashScheme)
+ testInitThenFailCreateContract(t, rawdb.PathScheme)
+}
+
+func testInitThenFailCreateContract(t *testing.T, scheme string) {
var (
// Generate a canonical chain to act as the main dataset
engine = ethash.NewFaker()
@@ -3085,7 +3314,9 @@ func TestInitThenFailCreateContract(t *testing.T) {
},
},
}
- genesis := gspec.MustCommit(db)
+ triedb := trie.NewDatabase(db, nil)
+ genesis := gspec.MustCommit(db, triedb)
+
nonce := uint64(0)
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 4, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{1})
@@ -3098,8 +3329,9 @@ func TestInitThenFailCreateContract(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{
+ triedb = trie.NewDatabase(diskdb, nil)
+ gspec.MustCommit(diskdb, triedb)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{
//Debug: true,
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
}, nil, nil)
@@ -3135,6 +3367,11 @@ func TestInitThenFailCreateContract(t *testing.T) {
// checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated
// correctly.
func TestEIP2718Transition(t *testing.T) {
+ testEIP2718Transition(t, rawdb.HashScheme)
+ testEIP2718Transition(t, rawdb.PathScheme)
+}
+
+func testEIP2718Transition(t *testing.T, scheme string) {
var (
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
@@ -3163,7 +3400,8 @@ func TestEIP2718Transition(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
)
blocks, _ := GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *BlockGen) {
@@ -3187,9 +3425,9 @@ func TestEIP2718Transition(t *testing.T) {
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
+ gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3218,6 +3456,11 @@ func TestEIP2718Transition(t *testing.T) {
// gasFeeCap - gasTipCap < baseFee.
// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
func TestEIP1559Transition(t *testing.T) {
+ testEIP1559Transition(t, rawdb.HashScheme)
+ testEIP1559Transition(t, rawdb.PathScheme)
+}
+
+func testEIP1559Transition(t *testing.T, scheme string) {
var (
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
@@ -3253,7 +3496,9 @@ func TestEIP1559Transition(t *testing.T) {
gspec.Config.BerlinBlock = common.Big0
gspec.Config.LondonBlock = common.Big0
- genesis := gspec.MustCommit(db)
+ triedb := trie.NewDatabase(db, nil)
+ genesis := gspec.MustCommit(db, triedb)
+
signer := types.LatestSigner(gspec.Config)
blocks, _ := GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *BlockGen) {
@@ -3282,9 +3527,10 @@ func TestEIP1559Transition(t *testing.T) {
}, true)
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
+ triedb = trie.NewDatabase(diskdb, nil)
+ gspec.MustCommit(diskdb, triedb)
- chain, err := NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3362,6 +3608,11 @@ func TestEIP1559Transition(t *testing.T) {
}
func TestSponsoredTxTransitionBeforeMiko(t *testing.T) {
+ testSponsoredTxTransitionBeforeMiko(t, rawdb.HashScheme)
+ testSponsoredTxTransitionBeforeMiko(t, rawdb.PathScheme)
+}
+
+func testSponsoredTxTransitionBeforeMiko(t *testing.T, scheme string) {
var chainConfig params.ChainConfig
chainConfig.HomesteadBlock = common.Big0
@@ -3385,8 +3636,8 @@ func TestSponsoredTxTransitionBeforeMiko(t *testing.T) {
gspec := &Genesis{
Config: &chainConfig,
}
- genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -3428,6 +3679,10 @@ func TestSponsoredTxTransitionBeforeMiko(t *testing.T) {
}
func TestSponsoredTxTransition(t *testing.T) {
+ testSponsoredTxTransition(t, rawdb.HashScheme)
+ testSponsoredTxTransition(t, rawdb.PathScheme)
+}
+func testSponsoredTxTransition(t *testing.T, scheme string) {
var chainConfig params.ChainConfig
chainConfig.HomesteadBlock = common.Big0
@@ -3464,8 +3719,8 @@ func TestSponsoredTxTransition(t *testing.T) {
adminAddr: {Balance: math.BigPow(10, 18)},
},
}
- genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -3577,6 +3832,7 @@ func TestSponsoredTxTransition(t *testing.T) {
// 5. Sender does not have sufficient fund
gasFee := new(big.Int).Mul(innerTx.GasFeeCap, new(big.Int).SetUint64(innerTx.Gas))
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil))
blocks, _ := GenerateChain(&chainConfig, genesis, engine, db, 1, func(i int, bg *BlockGen) {
tx, err := types.SignTx(types.NewTransaction(0, payerAddr, gasFee, params.TxGas, bg.header.BaseFee, nil), mikoSigner, adminKey)
if err != nil {
@@ -3683,6 +3939,11 @@ func TestSponsoredTxTransition(t *testing.T) {
// TestTransientStorageReset ensures the transient storage is wiped correctly
// between transactions.
func TestTransientStorageReset(t *testing.T) {
+ testTransientStorageReset(t, rawdb.HashScheme)
+ testTransientStorageReset(t, rawdb.PathScheme)
+}
+
+func testTransientStorageReset(t *testing.T, scheme string) {
var (
engine = ethash.NewFaker()
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@@ -3754,7 +4015,7 @@ func TestTransientStorageReset(t *testing.T) {
})
// Initialize the blockchain with 1153 enabled.
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vmConfig, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vmConfig, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3775,6 +4036,11 @@ func TestTransientStorageReset(t *testing.T) {
}
func TestEIP3651(t *testing.T) {
+ testEIP3651(t, rawdb.HashScheme)
+ testEIP3651(t, rawdb.PathScheme)
+}
+
+func testEIP3651(t *testing.T, scheme string) {
var (
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
@@ -3845,7 +4111,7 @@ func TestEIP3651(t *testing.T) {
b.AddTx(tx)
})
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
@@ -3934,6 +4200,11 @@ func randBlob() (*kzg4844.Blob, *kzg4844.Commitment, *kzg4844.Proof) {
}
func TestInsertChainWithSidecars(t *testing.T) {
+ testInsertChainWithSidecars(t, rawdb.HashScheme)
+ testInsertChainWithSidecars(t, rawdb.PathScheme)
+}
+
+func testInsertChainWithSidecars(t *testing.T, scheme string) {
privateKey, _ := crypto.GenerateKey()
address := crypto.PubkeyToAddress(privateKey.PublicKey)
chainConfig := params.TestChainConfig
@@ -3948,8 +4219,10 @@ func TestInsertChainWithSidecars(t *testing.T) {
},
},
}
- genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ triedb := trie.NewDatabase(db, nil)
+ gspec.MustCommit(db, triedb)
+
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -3999,6 +4272,7 @@ func TestInsertChainWithSidecars(t *testing.T) {
t.Fatal(err)
}
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
blocks, _ := GenerateChain(chainConfig, genesis, engine, db, 1, func(i int, bg *BlockGen) {
bg.AddTx(tx1)
bg.AddTx(tx2)
@@ -4033,8 +4307,11 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
+
+ chain.triedb.Close()
+ chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4057,8 +4334,11 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ triedb = trie.NewDatabase(db, nil)
+
+ chain.triedb.Close()
+ genesis = gspec.MustCommit(db, triedb)
+ chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4105,8 +4385,11 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
+
+ chain.triedb.Close()
+ chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4167,8 +4450,8 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db := rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4201,8 +4484,9 @@ func TestInsertChainWithSidecars(t *testing.T) {
// Reset database
db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ triedb = trie.NewDatabase(db, nil)
+ genesis = gspec.MustCommit(db, triedb)
+ chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4234,6 +4518,11 @@ func TestInsertChainWithSidecars(t *testing.T) {
}
func TestSidecarsPruning(t *testing.T) {
+ testSidecarsPruning(t, rawdb.HashScheme)
+ testSidecarsPruning(t, rawdb.PathScheme)
+}
+
+func testSidecarsPruning(t *testing.T, scheme string) {
var prunePeriod uint64 = 1000
privateKey, _ := crypto.GenerateKey()
address := crypto.PubkeyToAddress(privateKey.PublicKey)
@@ -4249,8 +4538,8 @@ func TestSidecarsPruning(t *testing.T) {
},
},
}
- genesis := gspec.MustCommit(db)
- chain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create blockchain, err %s", err)
}
@@ -4324,3 +4613,113 @@ func TestSidecarsPruning(t *testing.T) {
}
}
}
+
+func TestDeleteThenCreate(t *testing.T) {
+ var (
+ engine = ethash.NewFaker()
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ address = crypto.PubkeyToAddress(key.PublicKey)
+ factoryAddr = crypto.CreateAddress(address, 0)
+ funds = big.NewInt(1000000000000000)
+ )
+ /*
+ contract Factory {
+ function deploy(bytes memory code) public {
+ address addr;
+ assembly {
+ addr := create2(0, add(code, 0x20), mload(code), 0)
+ if iszero(extcodesize(addr)) {
+ revert(0, 0)
+ }
+ }
+ }
+ }
+ */
+ factoryBIN := common.Hex2Bytes("608060405234801561001057600080fd5b50610241806100206000396000f3fe608060405234801561001057600080fd5b506004361061002a5760003560e01c80627743601461002f575b600080fd5b610049600480360381019061004491906100d8565b61004b565b005b6000808251602084016000f59050803b61006457600080fd5b5050565b600061007b61007684610146565b610121565b905082815260208101848484011115610097576100966101eb565b5b6100a2848285610177565b509392505050565b600082601f8301126100bf576100be6101e6565b5b81356100cf848260208601610068565b91505092915050565b6000602082840312156100ee576100ed6101f5565b5b600082013567ffffffffffffffff81111561010c5761010b6101f0565b5b610118848285016100aa565b91505092915050565b600061012b61013c565b90506101378282610186565b919050565b6000604051905090565b600067ffffffffffffffff821115610161576101606101b7565b5b61016a826101fa565b9050602081019050919050565b82818337600083830152505050565b61018f826101fa565b810181811067ffffffffffffffff821117156101ae576101ad6101b7565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f830116905091905056fea2646970667358221220ea8b35ed310d03b6b3deef166941140b4d9e90ea2c92f6b41eb441daf49a59c364736f6c63430008070033")
+
+ /*
+ contract C {
+ uint256 value;
+ constructor() {
+ value = 100;
+ }
+ function destruct() public payable {
+ selfdestruct(payable(msg.sender));
+ }
+ receive() payable external {}
+ }
+ */
+ contractABI := common.Hex2Bytes("6080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c63430008070033")
+ contractAddr := crypto.CreateAddress2(factoryAddr, [32]byte{}, crypto.Keccak256(contractABI))
+
+ gspec := &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{
+ address: {Balance: funds},
+ },
+ }
+ nonce := uint64(0)
+ signer := types.HomesteadSigner{}
+ _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) {
+ fee := big.NewInt(1)
+ if b.header.BaseFee != nil {
+ fee = b.header.BaseFee
+ }
+ b.SetCoinbase(common.Address{1})
+
+ // Block 1
+ if i == 0 {
+ tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
+ Nonce: nonce,
+ GasPrice: new(big.Int).Set(fee),
+ Gas: 500000,
+ Data: factoryBIN,
+ })
+ nonce++
+ b.AddTx(tx)
+
+ data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000")
+ tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
+ Nonce: nonce,
+ GasPrice: new(big.Int).Set(fee),
+ Gas: 500000,
+ To: &factoryAddr,
+ Data: data,
+ })
+ b.AddTx(tx)
+ nonce++
+ } else {
+ // Block 2
+ tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
+ Nonce: nonce,
+ GasPrice: new(big.Int).Set(fee),
+ Gas: 500000,
+ To: &contractAddr,
+ Data: common.Hex2Bytes("2b68b9c6"), // destruct
+ })
+ nonce++
+ b.AddTx(tx)
+
+ data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000")
+ tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
+ Nonce: nonce,
+ GasPrice: new(big.Int).Set(fee),
+ Gas: 500000,
+ To: &factoryAddr, // re-creation
+ Data: data,
+ })
+ b.AddTx(tx)
+ nonce++
+ }
+ })
+ // Import the canonical chain
+ chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("failed to create tester chain: %v", err)
+ }
+ for _, block := range blocks {
+ if _, err := chain.InsertChain([]*types.Block{block}, nil); err != nil {
+ t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
+ }
+ }
+}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index d0e20302a8..08f82b157d 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -21,6 +21,7 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -264,7 +265,7 @@ func generateChain(
}
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
chainreader := newFakeChainReader(config, db)
- genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
+ genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine)
@@ -323,7 +324,7 @@ func generateChain(
panic(fmt.Sprintf("state write error: %v", err))
}
if flushDisk {
- if err := statedb.Database().TrieDB().Commit(root, false); err != nil {
+ if err := triedb.Commit(root, false); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
}
@@ -331,14 +332,16 @@ func generateChain(
}
return nil, nil
}
- // Create an ephemeral database
- database := state.NewDatabase(db)
+ // Forcibly use hash-based state scheme for retaining all nodes in disk.
+ triedb := trie.NewDatabase(db, trie.HashDefaults)
+ defer triedb.Close()
+
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), database, nil)
+ statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil {
panic(err)
}
- block, receipt := genblock(i, parent, statedb)
+ block, receipt := genblock(i, parent, triedb, statedb)
// Prepare Blob receipt
var blobGasPrice *big.Int
@@ -362,7 +365,10 @@ func generateChain(
// then generate chain on top.
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
db := rawdb.NewMemoryDatabase()
- genesis.MustCommit(db)
+ triedb := trie.NewDatabase(db, trie.HashDefaults)
+
+ defer triedb.Close()
+ genesis.MustCommit(db, triedb)
blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen, true)
return db, blocks, receipts
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 22b59e4754..adf2476bc3 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
func ExampleGenerateChain() {
@@ -44,7 +45,7 @@ func ExampleGenerateChain() {
Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)},
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
// This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the
@@ -79,7 +80,7 @@ func ExampleGenerateChain() {
}, true)
// Import the chain. This runs all block validation rules.
- blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer blockchain.Stop()
if i, err := blockchain.InsertChain(chain, nil); err != nil {
diff --git a/core/dao_test.go b/core/dao_test.go
index 2fa5b4e26c..0c862a9c11 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -34,18 +34,14 @@ func TestDAOForkRangeExtradata(t *testing.T) {
chainConfig.HomesteadBlock = big.NewInt(0)
// Generate a common prefix for both pro-forkers and non-forkers
- db := rawdb.NewMemoryDatabase()
gspec := &Genesis{
BaseFee: big.NewInt(params.InitialBaseFee),
Config: &chainConfig,
}
- genesis := gspec.MustCommit(db)
- prefix, _ := GenerateChain(&chainConfig, genesis, ethash.NewFaker(), db, int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {}, true)
+ db, prefix, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {})
// Create the concurrent, conflicting two nodes
proDb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(proDb)
-
proConf := *params.NonActivatedConfig
proConf.HomesteadBlock = big.NewInt(0)
proConf.DAOForkBlock = forkBlock
@@ -54,13 +50,10 @@ func TestDAOForkRangeExtradata(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
Config: &proConf,
}
-
proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer proBc.Stop()
conDb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(conDb)
-
conConf := *params.NonActivatedConfig
conConf.HomesteadBlock = big.NewInt(0)
conConf.DAOForkBlock = forkBlock
@@ -69,7 +62,6 @@ func TestDAOForkRangeExtradata(t *testing.T) {
BaseFee: big.NewInt(params.InitialBaseFee),
Config: &conConf,
}
-
conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer conBc.Stop()
@@ -82,10 +74,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks
for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ {
// Create a pro-fork block, and try to feed into the no-fork chain
- db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
- defer bc.Stop()
+ bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
for j := 0; j < len(blocks)/2; j++ {
@@ -94,7 +83,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
+ if err := bc.triedb.Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -107,9 +96,8 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err)
}
// Create a no-fork block, and try to feed into the pro-fork chain
- db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
@@ -119,7 +107,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
+ if err := bc.triedb.Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -133,9 +121,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
}
}
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
- db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()))
@@ -145,7 +131,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
+ if err := bc.triedb.Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -153,9 +139,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err)
}
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
- db = rawdb.NewMemoryDatabase()
- gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()))
@@ -165,7 +149,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
+ if err := bc.triedb.Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
diff --git a/core/error.go b/core/error.go
index ca1724b9a6..a36db136e9 100644
--- a/core/error.go
+++ b/core/error.go
@@ -1,3 +1,4 @@
+
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
diff --git a/core/genesis.go b/core/genesis.go
index ef3e3014aa..d1c8ff94fe 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -272,10 +272,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen
}
return genesis.Config, block.Hash(), nil
}
- // We have the genesis block in database(perhaps in ancient database)
- // but the corresponding state is missing.
+ // The genesis block is present(perhaps in ancient database) while the
+ // state database is not initialized yet. It can happen that the node
+ // is initialized with an external ancient store. Commit genesis state
+ // in this case.
header := rawdb.ReadHeader(db, stored, 0)
- if _, err := state.New(header.Root, state.NewDatabaseWithNodeDB(db, triedb), nil); err != nil {
+ if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) {
if genesis == nil {
genesis = DefaultGenesisBlock()
}
@@ -449,8 +451,8 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
// MustCommit writes the genesis block and state to db, panicking on error.
// The block is committed as the canonical head block.
-func (g *Genesis) MustCommit(db ethdb.Database) *types.Block {
- block, err := g.Commit(db, trie.NewDatabase(db))
+func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block {
+ block, err := g.Commit(db, triedb)
if err != nil {
panic(err)
}
diff --git a/core/genesis_test.go b/core/genesis_test.go
index aced782f3e..9fb0a988e8 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -30,18 +30,23 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
func TestInvalidCliqueConfig(t *testing.T) {
block := DefaultGoerliGenesisBlock()
block.ExtraData = []byte{}
db := rawdb.NewMemoryDatabase()
- if _, err := block.Commit(db, trie.NewDatabase(db)); err == nil {
+ if _, err := block.Commit(db, trie.NewDatabase(db, nil)); err == nil {
t.Fatal("Expected error on invalid clique config")
}
}
-
func TestSetupGenesis(t *testing.T) {
+ testSetupGenesis(t, rawdb.HashScheme)
+ testSetupGenesis(t, rawdb.PathScheme)
+}
+
+func testSetupGenesis(t *testing.T, scheme string) {
var (
customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50")
customg = Genesis{
@@ -63,7 +68,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "genesis without ChainConfig",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, trie.NewDatabase(db), new(Genesis), false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis), false)
},
wantErr: errGenesisNoConfig,
wantConfig: params.AllEthashProtocolChanges,
@@ -71,7 +76,7 @@ func TestSetupGenesis(t *testing.T) {
{
name: "no block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, trie.NewDatabase(db), nil, false)
+ return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil, false)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@@ -79,8 +84,9 @@ func TestSetupGenesis(t *testing.T) {
{
name: "mainnet block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- DefaultGenesisBlock().MustCommit(db)
- return SetupGenesisBlock(db, trie.NewDatabase(db), nil, false)
+ tdb := newDbConfig(scheme)
+ DefaultGenesisBlock().MustCommit(db, trie.NewDatabase(db, tdb))
+ return SetupGenesisBlock(db, trie.NewDatabase(db, tdb), nil, false)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@@ -88,8 +94,9 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- customg.MustCommit(db)
- return SetupGenesisBlock(db, trie.NewDatabase(db), nil, false)
+ tdb := newDbConfig(scheme)
+ customg.MustCommit(db, trie.NewDatabase(db, tdb))
+ return SetupGenesisBlock(db, trie.NewDatabase(db, tdb), nil, false)
},
wantHash: customghash,
wantConfig: customg.Config,
@@ -97,8 +104,9 @@ func TestSetupGenesis(t *testing.T) {
{
name: "custom block in DB, genesis == ropsten",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- customg.MustCommit(db)
- return SetupGenesisBlock(db, trie.NewDatabase(db), DefaultRopstenGenesisBlock(), false)
+ tdb := newDbConfig(scheme)
+ customg.MustCommit(db, trie.NewDatabase(db, tdb))
+ return SetupGenesisBlock(db, trie.NewDatabase(db, tdb), DefaultRopstenGenesisBlock(), false)
},
wantErr: &GenesisMismatchError{Stored: customghash, New: params.RopstenGenesisHash},
wantHash: params.RopstenGenesisHash,
@@ -107,8 +115,9 @@ func TestSetupGenesis(t *testing.T) {
{
name: "compatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- oldcustomg.MustCommit(db)
- return SetupGenesisBlock(db, trie.NewDatabase(db), &customg, false)
+ tdb := newDbConfig(scheme)
+ oldcustomg.MustCommit(db, trie.NewDatabase(db, tdb))
+ return SetupGenesisBlock(db, trie.NewDatabase(db, tdb), &customg, false)
},
wantHash: customghash,
wantConfig: customg.Config,
@@ -118,16 +127,17 @@ func TestSetupGenesis(t *testing.T) {
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
// Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg.
- genesis := oldcustomg.MustCommit(db)
+ tdb := trie.NewDatabase(db, newDbConfig(scheme))
+ oldcustomg.MustCommit(db, tdb)
- bc, _ := NewBlockChain(db, nil, &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
+ bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
defer bc.Stop()
- blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil, true)
+ _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil)
bc.InsertChain(blocks, nil)
bc.CurrentBlock()
// This should return a compatibility error.
- return SetupGenesisBlock(db, trie.NewDatabase(db), &customg, false)
+ return SetupGenesisBlock(db, tdb, &customg, false)
},
wantHash: customghash,
wantConfig: customg.Config,
@@ -177,7 +187,8 @@ func TestGenesisHashes(t *testing.T) {
{DefaultSepoliaGenesisBlock(), params.SepoliaGenesisHash},
} {
// Test via MustCommit
- if have := c.genesis.MustCommit(rawdb.NewMemoryDatabase()).Hash(); have != c.want {
+ db := rawdb.NewMemoryDatabase()
+ if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
}
// Test via ToBlock
@@ -195,7 +206,7 @@ func TestGenesis_Commit(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- genesisBlock := genesis.MustCommit(db)
+ genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
if genesis.Difficulty != nil {
t.Fatalf("assumption wrong")
}
@@ -243,3 +254,10 @@ func TestReadWriteGenesisAlloc(t *testing.T) {
}
}
}
+
+func newDbConfig(scheme string) *trie.Config {
+ if scheme == rawdb.HashScheme {
+ return trie.HashDefaults
+ }
+ return &trie.Config{PathDB: pathdb.Defaults}
+}
diff --git a/core/headerchain_test.go b/core/headerchain_test.go
index 3dd7d8c624..2163926fb5 100644
--- a/core/headerchain_test.go
+++ b/core/headerchain_test.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
func verifyUnbrokenCanonchain(hc *HeaderChain) error {
@@ -72,7 +73,7 @@ func TestHeaderInsertion(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.InitialBaseFee)}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil))
)
hc, err := NewHeaderChain(db, params.AllEthashProtocolChanges, ethash.NewFaker(), func() bool { return false })
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index 978220f15c..4706156602 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -36,7 +36,7 @@ import (
//
// Now this scheme is still kept for backward compatibility, and it will be used
// for archive node and some other tries(e.g. light trie).
-const HashScheme = "hashScheme"
+const HashScheme = "hash"
// PathScheme is the new path-based state scheme with which trie nodes are stored
// in the disk with node path as the database key. This scheme will only store one
@@ -44,7 +44,7 @@ const HashScheme = "hashScheme"
// is native. At the same time, this scheme will put adjacent trie nodes in the same
// area of the disk with good data locality property. But this scheme needs to rely
// on extra state diffs to survive deep reorg.
-const PathScheme = "pathScheme"
+const PathScheme = "path"
// hasher is used to compute the sha256 hash of the provided data.
type hasher struct{ sha crypto.KeccakState }
@@ -208,9 +208,12 @@ func ReadTrieNode(db ethdb.KeyValueReader, owner common.Hash, path []byte, hash
} else {
blob, nHash = ReadStorageTrieNode(db, owner, path)
}
- if nHash != hash {
+ if len(blob) == 0 {
return nil
}
+ if nHash != hash {
+ return nil // Exists but not match
+ }
return blob
default:
panic(fmt.Sprintf("Unknown scheme %v", scheme))
@@ -262,3 +265,25 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has
panic(fmt.Sprintf("Unknown scheme %v", scheme))
}
}
+
+// ReadStateScheme reads the state scheme of persistent state, or none
+// if the state is not present in database.
+func ReadStateScheme(db ethdb.Reader) string {
+ // Check if state in path-based scheme is present
+ blob, _ := ReadAccountTrieNode(db, nil)
+ if len(blob) != 0 {
+ return PathScheme
+ }
+ // In a hash-based scheme, the genesis state is consistently stored
+ // on the disk. To assess the scheme of the persistent state, it
+ // suffices to inspect the scheme of the genesis state.
+ header := ReadHeader(db, ReadCanonicalHash(db, 0), 0)
+ if header == nil {
+ return "" // empty datadir
+ }
+ blob = ReadLegacyTrieNode(db, header.Root)
+ if len(blob) == 0 {
+ return "" // no state in disk
+ }
+ return HashScheme
+}
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
index b0f507cdd5..2773d3611a 100644
--- a/core/rawdb/ancient_scheme.go
+++ b/core/rawdb/ancient_scheme.go
@@ -51,9 +51,9 @@ const (
namespace = "eth/db/state"
)
-// stateHistoryFreezerNoSnappy configures whether compression is disabled for the stateHistory.
+// stateFreezerNoSnappy configures whether compression is disabled for the stateHistory.
// https://github.com/golang/snappy, Reason for splititng files for looking up in archive mode easily.
-var stateHistoryFreezerNoSnappy = map[string]bool{
+var stateFreezerNoSnappy = map[string]bool{
stateHistoryMeta: true,
stateHistoryAccountIndex: false,
stateHistoryStorageIndex: false,
@@ -80,9 +80,9 @@ var (
// freezers the collections of all builtin freezers.
var freezers = []string{chainFreezerName}
-// NewStateHistoryFreezer initializes the freezer for state history.
-func NewStateHistoryFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) {
+// NewStateFreezer initializes the freezer for state history.
+func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) {
return NewResettableFreezer(
filepath.Join(ancientDir, stateFreezerName), namespace, readOnly,
- stateHistoryTableSize, stateHistoryFreezerNoSnappy)
+ stateHistoryTableSize, stateFreezerNoSnappy)
}
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
index 5fe2423694..2a88ae5c18 100644
--- a/core/rawdb/ancient_utils.go
+++ b/core/rawdb/ancient_utils.go
@@ -50,39 +50,60 @@ func (info *freezerInfo) size() common.StorageSize {
return total
}
+func inspect(name string, order map[string]bool, reader ethdb.AncientReader) (freezerInfo, error) {
+ info := freezerInfo{name: name}
+ for t := range order {
+ size, err := reader.AncientSize(t)
+ if err != nil {
+ return freezerInfo{}, err
+ }
+ info.sizes = append(info.sizes, tableSize{name: t, size: common.StorageSize(size)})
+ }
+ // Retrieve the number of last stored item
+ ancients, err := reader.Ancients()
+ if err != nil {
+ return freezerInfo{}, err
+ }
+ info.head = ancients - 1
+
+ // Retrieve the number of first stored item
+ tail, err := reader.Tail()
+ if err != nil {
+ return freezerInfo{}, err
+ }
+ info.tail = tail
+ return info, nil
+}
+
// inspectFreezers inspects all freezers registered in the system.
func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
- var (
- infos []freezerInfo
- )
+ var infos []freezerInfo
for _, freezer := range freezers {
switch freezer {
- case chainFreezerName: // We only support chain freezer for now.
- // Chain ancient store is a bit special. It's always opened along
- // with a key-value store, inspect the chain store directly.
- info := freezerInfo{name: freezer}
- // Retrieve storage size of every contained table.
- for table := range chainFreezerNoSnappy {
- size, err := db.AncientSize(table)
- if err != nil {
- return nil, err
- }
- info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)})
+ case chainFreezerName:
+ info, err := inspect(chainFreezerName, chainFreezerNoSnappy, db)
+ if err != nil {
+ return nil, err
}
- // Retrieve the number of last stored item
- ancients, err := db.Ancients()
+ infos = append(infos, info)
+
+ case stateFreezerName:
+ datadir, err := db.AncientDatadir()
if err != nil {
return nil, err
}
- info.head = ancients - 1
+ f, err := NewStateFreezer(datadir, true)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
- // Retrieve the number of first stored item
- tail, err := db.Tail()
+ info, err := inspect(stateFreezerName, stateFreezerNoSnappy, f)
if err != nil {
return nil, err
}
- info.tail = tail
infos = append(infos, info)
+
default:
return nil, fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index cf0cc15096..522fa33ca7 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -446,7 +446,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
tds stat
numHashPairings stat
hashNumPairings stat
- tries stat
+ legacyTries stat
+ stateLookups stat
+ accountTries stat
+ storageTries stat
codes stat
txLookups stat
accountSnaps stat
@@ -487,8 +490,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
numHashPairings.Add(size)
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairings.Add(size)
- case len(key) == common.HashLength:
- tries.Add(size)
+ case IsLegacyTrieNode(key, it.Value()):
+ legacyTries.Add(size)
+ case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
+ stateLookups.Add(size)
+ case IsAccountTrieNode(key):
+ accountTries.Add(size)
+ case IsStorageTrieNode(key):
+ storageTries.Add(size)
case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
codes.Add(size)
case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
@@ -526,7 +535,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, highestFinalityVoteKey, storeInternalTxsEnabledKey,
- snapshotSyncStatusKey,
+ snapshotSyncStatusKey, persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey,
} {
if bytes.Equal(key, meta) {
metadata.Add(size)
@@ -556,7 +565,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
- {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
+ {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
+ {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
+ {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()},
+ {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()},
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 6c2afc1d1c..84b2bc8e5f 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -270,9 +270,10 @@ func IsLegacyTrieNode(key []byte, val []byte) bool {
return bytes.Equal(key, crypto.Keccak256(val))
}
-// IsAccountTrieNode reports whether a provided database entry is an account
-// trie node in path-based state scheme.
-func IsAccountTrieNode(key []byte) (bool, []byte) {
+// ResolveAccountTrieNodeKey reports whether a provided database entry is an
+// account trie node in path-based state scheme, and returns the resolved
+// node path if so.
+func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) {
if !bytes.HasPrefix(key, trieNodeAccountPrefix) {
return false, nil
}
@@ -285,9 +286,17 @@ func IsAccountTrieNode(key []byte) (bool, []byte) {
return true, key[len(trieNodeAccountPrefix):]
}
-// IsStorageTrieNode reports whether a provided database entry is a storage
+// IsAccountTrieNode reports whether a provided database entry is an account
// trie node in path-based state scheme.
-func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
+func IsAccountTrieNode(key []byte) bool {
+ ok, _ := ResolveAccountTrieNodeKey(key)
+ return ok
+}
+
+// ResolveStorageTrieNode reports whether a provided database entry is a storage
+// trie node in path-based state scheme, and returns the resolved account hash
+// and node path if so.
+func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
if !bytes.HasPrefix(key, trieNodeStoragePrefix) {
return false, common.Hash{}, nil
}
@@ -304,6 +313,13 @@ func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
}
+// IsStorageTrieNode reports whether a provided database entry is a storage
+// trie node in path-based state scheme.
+func IsStorageTrieNode(key []byte) bool {
+ ok, _, _ := ResolveStorageTrieNode(key)
+ return ok
+}
+
// stateIDKey = stateIDPrefix + root (32 bytes)
func stateIDKey(root common.Hash) []byte {
return append(stateIDPrefix, root.Bytes()...)
diff --git a/core/rawdb/schema_test.go b/core/rawdb/schema_test.go
index a1009b3bd2..06b017d41c 100644
--- a/core/rawdb/schema_test.go
+++ b/core/rawdb/schema_test.go
@@ -78,7 +78,7 @@ func TestIsLegacyTrieNode(t *testing.T) {
}
}
-func TestIsAccountTrieNode(t *testing.T) {
+func TestResolveAccountTrieNodeKey(t *testing.T) {
tests := []struct {
name string
inputKey []byte
@@ -137,14 +137,14 @@ func TestIsAccountTrieNode(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- if check, key := IsAccountTrieNode(test.inputKey); check != test.expectedCheck || !bytes.Equal(key, test.expectedKey) {
+ if check, key := ResolveAccountTrieNodeKey(test.inputKey); check != test.expectedCheck || !bytes.Equal(key, test.expectedKey) {
t.Errorf("expected %v, %v, got %v, %v", test.expectedCheck, test.expectedKey, check, key)
}
})
}
}
-func TestIsStorageTrieNode(t *testing.T) {
+func TestResolveStorageTrieNode(t *testing.T) {
tests := []struct {
name string
inputKey []byte
@@ -219,7 +219,7 @@ func TestIsStorageTrieNode(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- if check, hash, key := IsStorageTrieNode(test.inputKey); check != test.expectedCheck || !bytes.Equal(key, test.expectedKey) || hash != test.expectedHash {
+ if check, hash, key := ResolveStorageTrieNode(test.inputKey); check != test.expectedCheck || !bytes.Equal(key, test.expectedKey) || hash != test.expectedHash {
t.Errorf("expected %v, %v, %v, got %v, %v, %v", test.expectedCheck, test.expectedHash, test.expectedKey, check, hash, key)
}
})
diff --git a/core/rlp_test.go b/core/rlp_test.go
index bf30eff24f..ba4a9266fd 100644
--- a/core/rlp_test.go
+++ b/core/rlp_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
@@ -45,7 +46,7 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block {
Config: params.TestChainConfig,
Alloc: GenesisAlloc{address: {Balance: funds}},
}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
)
// We need to generate as many blocks +1 as uncles
diff --git a/core/state/database.go b/core/state/database.go
index 6991e85843..01c7c51f26 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -97,7 +97,7 @@ type Trie interface {
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
- NodeIterator(startKey []byte) trie.NodeIterator
+ NodeIterator(startKey []byte) (trie.NodeIterator, error)
// Prove constructs a Merkle proof for key. The result contains all encoded nodes
// on the path to the value at key. The value itself is also included in the last
@@ -122,7 +122,7 @@ func NewDatabase(db ethdb.Database) Database {
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
csc, _ := lru.New[common.Hash, int](codeSizeCacheSize)
return &cachingDB{
- triedb: trie.NewDatabaseWithConfig(db, config),
+ triedb: trie.NewDatabase(db, config),
codeSizeCache: csc,
codeCache: fastcache.New(codeCacheSize),
}
diff --git a/core/state/dump.go b/core/state/dump.go
index caa0061ce7..e9891960f9 100644
--- a/core/state/dump.go
+++ b/core/state/dump.go
@@ -138,8 +138,11 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
)
log.Info("Trie dumping started", "root", s.trie.Hash())
c.OnRoot(s.trie.Hash())
-
- it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
+ trieIt, err := s.trie.NodeIterator(conf.Start)
+ if err != nil {
+ return nil
+ }
+ it := trie.NewIterator(trieIt)
for it.Next() {
var data types.StateAccount
if err := rlp.DecodeBytes(it.Value, &data); err != nil {
@@ -166,9 +169,15 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []
if !conf.SkipCode {
account.Code = obj.Code()
}
+
if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string)
- storageIt := trie.NewIterator(obj.getTrie().NodeIterator(nil))
+ trieIt, err := obj.getTrie().NodeIterator(nil)
+ if err != nil {
+ log.Error("Failed to create trie iterator", "err", err)
+ continue
+ }
+ storageIt := trie.NewIterator(trieIt)
for storageIt.Next() {
_, content, _, err := rlp.Split(storageIt.Value)
if err != nil {
diff --git a/core/state/iterator.go b/core/state/iterator.go
index ba7efd4653..653d566f85 100644
--- a/core/state/iterator.go
+++ b/core/state/iterator.go
@@ -74,8 +74,12 @@ func (it *NodeIterator) step() error {
return nil
}
// Initialize the iterator if we've just started
+ var err error
if it.stateIt == nil {
- it.stateIt = it.state.trie.NodeIterator(nil)
+ it.stateIt, err = it.state.trie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
}
// If we had data nodes previously, we surely have at least state nodes
if it.dataIt != nil {
@@ -113,7 +117,10 @@ func (it *NodeIterator) step() error {
if err != nil {
return err
}
- it.dataIt = dataTrie.NodeIterator(nil)
+ it.dataIt, err = dataTrie.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
if !it.dataIt.Next(true) {
it.dataIt = nil
}
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index 24b192c26c..2419c9175c 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -26,9 +26,14 @@ import (
// Tests that the node iterator indeed walks over the entire database contents.
func TestNodeIteratorCoverage(t *testing.T) {
+ testNodeIteratorCoverage(t, rawdb.HashScheme)
+ testNodeIteratorCoverage(t, rawdb.PathScheme)
+}
+
+func testNodeIteratorCoverage(t *testing.T, scheme string) {
// Create some arbitrary test state to iterate
- db, sdb, root, _ := makeTestState()
- sdb.TrieDB().Commit(root, false)
+ db, sdb, ndb, root, _ := makeTestState(scheme)
+ ndb.Commit(root, false)
state, err := New(root, sdb, nil)
if err != nil {
@@ -48,7 +53,7 @@ func TestNodeIteratorCoverage(t *testing.T) {
)
it := db.NewIterator(nil, nil)
for it.Next() {
- ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value())
+ ok, hash := isTrieNode(scheme, it.Key(), it.Value())
if !ok {
continue
}
@@ -90,11 +95,11 @@ func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) {
return true, common.BytesToHash(key)
}
} else {
- ok, _ := rawdb.IsAccountTrieNode(key)
+ ok := rawdb.IsAccountTrieNode(key)
if ok {
return true, crypto.Keccak256Hash(val)
}
- ok, _, _ = rawdb.IsStorageTrieNode(key)
+ ok = rawdb.IsStorageTrieNode(key)
if ok {
return true, crypto.Keccak256Hash(val)
}
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index f46eb66990..05825cc3ad 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -88,7 +88,9 @@ func NewPruner(db ethdb.Database, datadir string, bloomSize uint64) (*Pruner, er
if headBlock == nil {
return nil, errors.New("Failed to load head block")
}
- snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false)
+ // Offline pruning is only supported in legacy hash based scheme.
+ triedb := trie.NewDatabase(db, trie.HashDefaults)
+ snaptree, err := snapshot.New(db, triedb, 256, headBlock.Root(), false, false, false)
if err != nil {
return nil, err // The relevant snapshot(s) might not exist
}
@@ -355,7 +357,9 @@ func RecoverPruning(datadir string, db ethdb.Database) error {
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
// In this case, even the state HEAD is not exactly matched with snapshot, it
// still feasible to recover the pruning correctly.
- snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true)
+ // Offline pruning is only supported in legacy hash based scheme.
+ triedb := trie.NewDatabase(db, trie.HashDefaults)
+ snaptree, err := snapshot.New(db, triedb, 256, headBlock.Root(), false, false, true)
if err != nil {
return err // The relevant snapshot(s) might not exist
}
@@ -397,11 +401,14 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil {
return errors.New("missing genesis block")
}
- t, err := trie.NewSecure(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db))
+ t, err := trie.NewSecure(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults))
+ if err != nil {
+ return err
+ }
+ accIter, err := t.NodeIterator(nil)
if err != nil {
return err
}
- accIter := t.NodeIterator(nil)
for accIter.Next(true) {
hash := accIter.Hash()
@@ -417,11 +424,14 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
return err
}
if acc.Root != emptyRoot {
- storageTrie, err := trie.NewSecure(trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root), trie.NewDatabase(db))
+ storageTrie, err := trie.NewSecure(trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root), trie.NewDatabase(db, trie.HashDefaults))
+ if err != nil {
+ return err
+ }
+ storageIter, err := storageTrie.NodeIterator(nil)
if err != nil {
return err
}
- storageIter := storageTrie.NodeIterator(nil)
for storageIter.Next(true) {
hash := storageIter.Hash()
if hash != (common.Hash{}) {
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 80e3d61bba..d8ecba8746 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -432,8 +432,9 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
var resolver trie.NodeResolver
if len(result.keys) > 0 {
mdb := rawdb.NewMemoryDatabase()
- tdb := trie.NewDatabase(mdb)
+ tdb := trie.NewDatabase(mdb, trie.HashDefaults)
snapTrie := trie.NewEmpty(tdb)
+ defer tdb.Close()
for i, key := range result.keys {
snapTrie.Update(key, result.vals[i])
}
@@ -459,7 +460,7 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
var (
trieMore bool
- nodeIt = tr.NodeIterator(origin)
+ nodeIt = tr.MustNodeIterator(origin)
iter = trie.NewIterator(nodeIt)
kvkeys, kvvals = result.keys, result.vals
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 08d83c46f6..caf13e42bb 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -30,6 +30,8 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -45,10 +47,15 @@ func hashData(input []byte) common.Hash {
// Tests that snapshot generation from an empty database.
func TestGeneration(t *testing.T) {
+ testGeneration(t, rawdb.HashScheme)
+ testGeneration(t, rawdb.PathScheme)
+}
+
+func testGeneration(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var helper = newHelper()
+ var helper = newHelper(scheme)
stRoot := helper.makeStorageTrie(common.Hash{}, common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
@@ -79,10 +86,15 @@ func TestGeneration(t *testing.T) {
// Tests that snapshot generation with existent flat state.
func TestGenerateExistentState(t *testing.T) {
+ testGenerateExistentState(t, rawdb.HashScheme)
+ testGenerateExistentState(t, rawdb.PathScheme)
+}
+
+func testGenerateExistentState(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- var helper = newHelper()
+ var helper = newHelper(scheme)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()})
@@ -143,9 +155,15 @@ type testHelper struct {
nodes *trienode.MergedNodeSet
}
-func newHelper() *testHelper {
+func newHelper(scheme string) *testHelper {
diskdb := rawdb.NewMemoryDatabase()
- triedb := trie.NewDatabase(diskdb)
+ config := &trie.Config{}
+ if scheme == rawdb.PathScheme {
+ config.PathDB = &pathdb.Config{} // disable caching
+ } else {
+ config.HashDB = &hashdb.Config{} // disable caching
+ }
+ triedb := trie.NewDatabase(diskdb, config)
accTrie, _ := trie.NewSecure(trie.StateTrieID(common.Hash{}), triedb)
return &testHelper{
diskdb: diskdb,
@@ -228,7 +246,12 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) {
// - extra slots in the middle
// - extra slots in the end
func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
- helper := newHelper()
+ testGenerateExistentStateWithWrongStorage(t, rawdb.HashScheme)
+ testGenerateExistentStateWithWrongStorage(t, rawdb.PathScheme)
+}
+
+func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) {
+ helper := newHelper(scheme)
// Account one, empty root but non-empty database
helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()})
@@ -320,7 +343,13 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) {
// - wrong accounts
// - extra accounts
func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
- helper := newHelper()
+
+ testGenerateExistentStateWithWrongAccounts(t, rawdb.HashScheme)
+ testGenerateExistentStateWithWrongAccounts(t, rawdb.PathScheme)
+}
+
+func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) {
+ helper := newHelper(scheme)
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -375,10 +404,16 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) {
// Tests that snapshot generation errors out correctly in case of a missing trie
// node in the account trie.
func TestGenerateCorruptAccountTrie(t *testing.T) {
+
+ testGenerateCorruptAccountTrie(t, rawdb.HashScheme)
+ testGenerateCorruptAccountTrie(t, rawdb.PathScheme)
+}
+
+func testGenerateCorruptAccountTrie(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// without any storage slots to keep the test smaller.
- helper := newHelper()
+ helper := newHelper(scheme)
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074
helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: emptyRoot, CodeHash: emptyCode.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7
@@ -386,9 +421,11 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
- // Delete an account trie leaf and ensure the generator chokes
- helper.triedb.Commit(root, false)
- helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
+ // Delete an account trie node and ensure the generator chokes
+ targetPath := []byte{0xc}
+ targetHash := common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7")
+
+ rawdb.DeleteTrieNode(helper.diskdb, common.Hash{}, targetPath, targetHash, scheme)
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -409,10 +446,19 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
// trie node for a storage trie. It's similar to internal corruption but it is
// handled differently inside the generator.
func TestGenerateMissingStorageTrie(t *testing.T) {
+ testGenerateMissingStorageTrie(t, rawdb.HashScheme)
+ testGenerateMissingStorageTrie(t, rawdb.PathScheme)
+}
+
+func testGenerateMissingStorageTrie(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- helper := newHelper()
+ var (
+ acc1 = hashData([]byte("acc-1"))
+ acc3 = hashData([]byte("acc-3"))
+ helper = newHelper(scheme)
+ )
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -421,8 +467,9 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root := helper.Commit()
- // Delete a storage trie root and ensure the generator chokes
- helper.diskdb.Delete(stRoot.Bytes())
+ // Delete storage trie root of account one and three.
+ rawdb.DeleteTrieNode(helper.diskdb, acc1, nil, stRoot, scheme)
+ rawdb.DeleteTrieNode(helper.diskdb, acc3, nil, stRoot, scheme)
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -442,10 +489,16 @@ func TestGenerateMissingStorageTrie(t *testing.T) {
// Tests that snapshot generation errors out correctly in case of a missing trie
// node in a storage trie.
func TestGenerateCorruptStorageTrie(t *testing.T) {
+
+ testGenerateCorruptStorageTrie(t, rawdb.HashScheme)
+ testGenerateCorruptStorageTrie(t, rawdb.PathScheme)
+}
+
+func testGenerateCorruptStorageTrie(t *testing.T, scheme string) {
// We can't use statedb to make a test trie (circular dependency), so make
// a fake one manually. We're going with a small account trie of 3 accounts,
// two of which also has the same 3-slot storage trie attached.
- helper := newHelper()
+ helper := newHelper(scheme)
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67
helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e
@@ -454,8 +507,11 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: emptyCode.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2
root := helper.Commit()
- // Delete a storage trie leaf and ensure the generator chokes
- helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes())
+ // Delete a node in the storage trie.
+ targetPath := []byte{0x4}
+ targetHash := common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371")
+ rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-1")), targetPath, targetHash, scheme)
+ rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-3")), targetPath, targetHash, scheme)
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
select {
@@ -474,7 +530,12 @@ func TestGenerateCorruptStorageTrie(t *testing.T) {
// Tests that snapshot generation when an extra account with storage exists in the snap state.
func TestGenerateWithExtraAccounts(t *testing.T) {
- helper := newHelper()
+ testGenerateWithExtraAccounts(t, rawdb.HashScheme)
+ testGenerateWithExtraAccounts(t, rawdb.PathScheme)
+}
+
+func testGenerateWithExtraAccounts(t *testing.T, scheme string) {
+ helper := newHelper(scheme)
{
// Account one in the trie
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")),
@@ -542,10 +603,15 @@ func enableLogging() {
// Tests that snapshot generation when an extra account with storage exists in the snap state.
func TestGenerateWithManyExtraAccounts(t *testing.T) {
+ testGenerateWithManyExtraAccounts(t, rawdb.HashScheme)
+ testGenerateWithManyExtraAccounts(t, rawdb.PathScheme)
+}
+
+func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) {
if false {
enableLogging()
}
- helper := newHelper()
+ helper := newHelper(scheme)
{
// Account one in the trie
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte("acc-1")),
@@ -599,11 +665,16 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) {
// So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted.
// But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished.
func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
+ testGenerateWithExtraBeforeAndAfter(t, rawdb.HashScheme)
+ testGenerateWithExtraBeforeAndAfter(t, rawdb.PathScheme)
+}
+
+func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) {
accountCheckRange = 3
if false {
enableLogging()
}
- helper := newHelper()
+ helper := newHelper(scheme)
{
acc := &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
@@ -636,11 +707,16 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) {
// TestGenerateWithMalformedSnapdata tests what happes if we have some junk
// in the snapshot database, which cannot be parsed back to an account
func TestGenerateWithMalformedSnapdata(t *testing.T) {
+ testGenerateWithMalformedSnapdata(t, rawdb.HashScheme)
+ testGenerateWithMalformedSnapdata(t, rawdb.PathScheme)
+}
+
+func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) {
accountCheckRange = 3
if false {
enableLogging()
}
- helper := newHelper()
+ helper := newHelper(scheme)
{
acc := &types.StateAccount{Balance: big.NewInt(1), Root: emptyRoot, CodeHash: emptyCode.Bytes()}
val, _ := rlp.EncodeToBytes(acc)
@@ -673,10 +749,15 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) {
}
func TestGenerateFromEmptySnap(t *testing.T) {
+ testGenerateFromEmptySnap(t, rawdb.HashScheme)
+ testGenerateFromEmptySnap(t, rawdb.PathScheme)
+}
+
+func testGenerateFromEmptySnap(t *testing.T, scheme string) {
//enableLogging()
accountCheckRange = 10
storageCheckRange = 20
- helper := newHelper()
+ helper := newHelper(scheme)
// Add 1K accounts to the trie
for i := 0; i < 400; i++ {
stRoot := helper.makeStorageTrie(common.Hash{}, hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true)
@@ -708,8 +789,13 @@ func TestGenerateFromEmptySnap(t *testing.T) {
// This hits a case where the snap verification passes, but there are more elements in the trie
// which we must also add.
func TestGenerateWithIncompleteStorage(t *testing.T) {
+ testGenerateWithIncompleteStorage(t, rawdb.HashScheme)
+ testGenerateWithIncompleteStorage(t, rawdb.PathScheme)
+}
+
+func testGenerateWithIncompleteStorage(t *testing.T, scheme string) {
storageCheckRange = 4
- helper := newHelper()
+ helper := newHelper(scheme)
stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"}
stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"}
// We add 8 accounts, each one is missing exactly one of the storage slots. This means
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 881509f474..a66e87de00 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
@@ -41,7 +42,8 @@ func newStateEnv() *stateEnv {
func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil)
+ tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+ sdb, _ := New(types.EmptyRootHash, tdb, nil)
s := &stateEnv{db: db, state: sdb}
// generate a few entries
@@ -56,8 +58,10 @@ func TestDump(t *testing.T) {
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
s.state.Commit(0, false)
+ root, _ := s.state.Commit(0, false)
// check that DumpToCollector contains the state objects that are in trie
+ s.state, _ = New(root, tdb, nil)
got := string(s.state.Dump(nil))
want := `{
"root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2",
diff --git a/core/state/statedb.go b/core/state/statedb.go
index b4d66f0687..1d5ff3b369 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -64,6 +64,11 @@ func (n *proofList) Delete(key []byte) error {
// nested states. It's the general query interface to retrieve:
// * Contracts
// * Accounts
+//
+// Once the state is committed, tries cached in stateDB (including account
+// trie, storage tries) will no longer be functional. A new state instance
+// must be created with new root and updated database for accessing post-
+// commit states.
type StateDB struct {
db Database
prefetcher *triePrefetcher
@@ -707,15 +712,19 @@ func (s *StateDB) CreateAccount(addr common.Address) {
}
}
-func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
- so := db.getStateObject(addr)
+func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error {
+ so := s.getStateObject(addr)
if so == nil {
return nil
}
- it := trie.NewIterator(so.getTrie().NodeIterator(nil))
+ trieIt, err := so.getTrie().NodeIterator(nil)
+ if err != nil {
+ return err
+ }
+ it := trie.NewIterator(trieIt)
for it.Next() {
- key := common.BytesToHash(db.trie.GetKey(it.Key))
+ key := common.BytesToHash(s.trie.GetKey(it.Key))
if value, dirty := so.dirtyStorage[key]; dirty {
if !cb(key, value) {
return nil
@@ -997,7 +1006,10 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
if err != nil {
return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}
- it := tr.NodeIterator(nil)
+ it, err := tr.NodeIterator(nil)
+ if err != nil {
+ return false, nil, nil, fmt.Errorf("failed to create iterator, err: %w", err)
+ }
var (
set = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
@@ -1137,6 +1149,10 @@ func (s *StateDB) clearJournalAndRefund() {
//
// The associated block number of the state transition is also provided
// for more chain context.
+// Once the state is committed, tries cached in stateDB (including account
+// trie, storage tries) will no longer be functional. A new state instance
+// must be created with new root and updated database for accessing post-
+// commit states.
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) {
if s.dbErr != nil {
return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1151,8 +1167,9 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
storageTrieNodesUpdated int
storageTrieNodesDeleted int
nodes = trienode.NewMergedNodeSet()
+ codeWriter = s.db.TrieDB().DiskDB().NewBatch()
)
- codeWriter := s.db.TrieDB().DiskDB().NewBatch()
+
// Handle all state deletions first
incomplete, err := s.handleDestruction(nodes)
if err != nil {
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index bea297d0f9..1bef564ed9 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -179,7 +179,7 @@ func (test *stateTest) run() bool {
storageList = append(storageList, copy2DSet(states.Storages))
}
disk = rawdb.NewMemoryDatabase()
- tdb = trie.NewDatabaseWithConfig(disk, &trie.Config{OnCommit: onCommit})
+ tdb = trie.NewDatabase(disk, &trie.Config{OnCommit: onCommit})
sdb = NewDatabaseWithNodeDB(disk, tdb)
byzantium = rand.Intn(2) == 0
)
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index aa08ebb926..841702f98c 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie"
)
// Tests that updating a state trie does not leak any database writes prior to
@@ -518,7 +519,8 @@ func TestCopyOfCopy(t *testing.T) {
//
// See https://github.com/ethereum/go-ethereum/issues/20106.
func TestCopyCommitCopy(t *testing.T) {
- state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ tdb := NewDatabase(rawdb.NewMemoryDatabase())
+ state, _ := New(types.EmptyRootHash, tdb, nil)
// Create an account and check if the retrieved balance is correct
addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
@@ -556,19 +558,6 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- copyOne.Commit(0, false)
- if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
- t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyOne.GetState(addr, skey); val != sval {
- t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyOne.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
// Copy the copy and check the balance once more
copyTwo := copyOne.Copy()
if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
@@ -580,8 +569,23 @@ func TestCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetState(addr, skey); val != sval {
t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
- if val := copyTwo.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
+ if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ // Commit state, ensure states can be loaded from disk
+ root, _ := state.Commit(0, false)
+ state, _ = New(root, tdb, nil)
+ if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
+ }
+ if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello"))
+ }
+ if val := state.GetState(addr, skey); val != sval {
+ t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ if val := state.GetCommittedState(addr, skey); val != sval {
+ t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval)
}
}
@@ -641,19 +645,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- copyTwo.Commit(0, false)
- if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
- t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42)
- }
- if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
- t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello"))
- }
- if val := copyTwo.GetState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval)
- }
- if val := copyTwo.GetCommittedState(addr, skey); val != sval {
- t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval)
- }
+
// Copy the copy-copy and check the balance once more
copyThree := copyTwo.Copy()
if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
@@ -665,11 +657,60 @@ func TestCopyCopyCommitCopy(t *testing.T) {
if val := copyThree.GetState(addr, skey); val != sval {
t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval)
}
- if val := copyThree.GetCommittedState(addr, skey); val != sval {
+ if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) {
t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval)
}
}
+// TestCommitCopy tests the copy from a committed state is not functional.
+func TestCommitCopy(t *testing.T) {
+ state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil)
+
+ // Create an account and check if the retrieved balance is correct
+ addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe")
+ skey := common.HexToHash("aaa")
+ sval := common.HexToHash("bbb")
+
+ state.SetBalance(addr, big.NewInt(42)) // Change the account trie
+ state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetState(addr, skey, sval) // Change the storage trie
+
+ if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42)
+ }
+ if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) {
+ t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello"))
+ }
+ if val := state.GetState(addr, skey); val != sval {
+ t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval)
+ }
+ if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
+ }
+ // Copy the committed state database, the copied one is not functional.
+ state.Commit(0, true)
+ copied := state.Copy()
+ if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 {
+ t.Fatalf("unexpected balance: have %v", balance)
+ }
+ if code := copied.GetCode(addr); code != nil {
+ t.Fatalf("unexpected code: have %x", code)
+ }
+ if val := copied.GetState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("unexpected storage slot: have %x", val)
+ }
+ if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) {
+ t.Fatalf("unexpected storage slot: have %x", val)
+ }
+ // Should compare based on the error message.
+ if !strings.Contains(copied.Error().Error(), trie.ErrCommitted.Error()) {
+ t.Fatalf("unexpected state error, %v", copied.Error())
+ }
+ // if !errors.Is(copied.Error(), trie.ErrCommitted) {
+ // t.Fatalf("unexpected state error, %v", copied.Error())
+ // }
+}
+
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
// while changing the internals of StateDB. The workflow is that a contract is
// self-destructed, then in a follow-up transaction (but same block) it's created
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 30742ed3b2..559f37044c 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -29,6 +29,8 @@ import (
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
// testAccount is the data associated with an account used by the state tests.
@@ -40,24 +42,28 @@ type testAccount struct {
}
// makeTestState create a sample test state to test node-wise reconstruction.
-func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
+func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, common.Hash, []*testAccount) {
// Create an empty state
+ config := &trie.Config{Preimages: true}
+ if scheme == rawdb.PathScheme {
+ config.PathDB = pathdb.Defaults
+ } else {
+ config.HashDB = hashdb.Defaults
+ }
db := rawdb.NewMemoryDatabase()
- sdb := NewDatabase(db)
- state, _ := New(common.Hash{}, sdb, nil)
+ nodeDb := trie.NewDatabase(db, config)
+ sdb := NewDatabaseWithNodeDB(db, nodeDb)
+ state, _ := New(types.EmptyRootHash, sdb, nil)
// Fill it with some arbitrary data
var accounts []*testAccount
for i := byte(0); i < 96; i++ {
obj := state.GetOrNewStateObject(common.BytesToAddress([]byte{i}))
acc := &testAccount{address: common.BytesToAddress([]byte{i})}
-
obj.AddBalance(big.NewInt(int64(11 * i)))
acc.balance = big.NewInt(int64(11 * i))
-
obj.SetNonce(uint64(42 * i))
acc.nonce = uint64(42 * i)
-
if i%3 == 0 {
obj.SetCode(crypto.Keccak256Hash([]byte{i, i, i, i, i}), []byte{i, i, i, i, i})
acc.code = []byte{i, i, i, i, i}
@@ -68,24 +74,27 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) {
obj.SetState(hash, hash)
}
}
- state.updateStateObject(obj)
accounts = append(accounts, acc)
}
root, _ := state.Commit(0, false)
// Return the generated state
- return db, sdb, root, accounts
+ return db, sdb, nodeDb, root, accounts
}
// checkStateAccounts cross references a reconstructed state with an expected
// account array.
-func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) {
+func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) {
+ var config trie.Config
+ if scheme == rawdb.PathScheme {
+ config.PathDB = pathdb.Defaults
+ }
// Check root availability and state contents
- state, err := New(root, NewDatabase(db), nil)
+ state, err := New(root, NewDatabaseWithConfig(db, &config), nil)
if err != nil {
t.Fatalf("failed to create state trie at %x: %v", root, err)
}
- if err := checkStateConsistency(db, root); err != nil {
+ if err := checkStateConsistency(db, scheme, root); err != nil {
t.Fatalf("inconsistent state trie at %x: %v", root, err)
}
for i, acc := range accounts {
@@ -102,27 +111,35 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou
}
// checkTrieConsistency checks that all nodes in a (sub-)trie are indeed present.
-func checkTrieConsistency(db ethdb.Database, root common.Hash) error {
+func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error {
+ config := &trie.Config{Preimages: true}
+ if scheme == rawdb.PathScheme {
+ config.PathDB = pathdb.Defaults
+ }
if v, _ := db.Get(root[:]); v == nil {
return nil // Consider a non existent state consistent.
}
- trie, err := trie.New(trie.StateTrieID(root), trie.NewDatabase(db))
+ trie, err := trie.New(trie.StateTrieID(root), trie.NewDatabase(db, config))
if err != nil {
return err
}
- it := trie.NodeIterator(nil)
+ it := trie.MustNodeIterator(nil)
for it.Next(true) {
}
return it.Error()
}
// checkStateConsistency checks that all data of a state root is present.
-func checkStateConsistency(db ethdb.Database, root common.Hash) error {
- // Create and iterate a state trie rooted in a sub-node
- if _, err := db.Get(root.Bytes()); err != nil {
- return nil // Consider a non existent state consistent.
+func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error {
+ config := &trie.Config{Preimages: true}
+ if scheme == rawdb.PathScheme {
+ config.PathDB = pathdb.Defaults
}
- state, err := New(root, NewDatabase(db), nil)
+ // // Create and iterate a state trie rooted in a sub-node
+ // if _, err := db.Get(root.Bytes()); err != nil {
+ // return err // Consider a non existent state consistent.
+ // }
+ state, err := New(root, NewDatabaseWithConfig(db, config), nil)
if err != nil {
return err
}
@@ -134,9 +151,14 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error {
// Tests that an empty state is not scheduled for syncing.
func TestEmptyStateSync(t *testing.T) {
- db := trie.NewDatabase(rawdb.NewMemoryDatabase())
- empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
- sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil, db.Scheme())
+ dbA := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ dbB := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{PathDB: pathdb.Defaults})
+
+ sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil, dbA.Scheme())
+ if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
+ t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes)
+ }
+ sync = NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil, dbB.Scheme())
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes)
}
@@ -145,22 +167,29 @@ func TestEmptyStateSync(t *testing.T) {
// Tests that given a root hash, a state can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go.
func TestIterativeStateSyncIndividual(t *testing.T) {
- testIterativeStateSync(t, 1, false, false)
+
+ testIterativeStateSync(t, 1, false, false, rawdb.HashScheme)
+ testIterativeStateSync(t, 1, false, false, rawdb.PathScheme)
}
func TestIterativeStateSyncBatched(t *testing.T) {
- testIterativeStateSync(t, 100, false, false)
+ testIterativeStateSync(t, 100, false, false, rawdb.HashScheme)
+ testIterativeStateSync(t, 100, false, false, rawdb.PathScheme)
}
func TestIterativeStateSyncIndividualFromDisk(t *testing.T) {
- testIterativeStateSync(t, 1, true, false)
+ testIterativeStateSync(t, 1, true, false, rawdb.HashScheme)
+ testIterativeStateSync(t, 1, true, false, rawdb.PathScheme)
}
func TestIterativeStateSyncBatchedFromDisk(t *testing.T) {
- testIterativeStateSync(t, 100, true, false)
+ testIterativeStateSync(t, 100, true, false, rawdb.HashScheme)
+ testIterativeStateSync(t, 100, true, false, rawdb.PathScheme)
}
func TestIterativeStateSyncIndividualByPath(t *testing.T) {
- testIterativeStateSync(t, 1, false, true)
+ testIterativeStateSync(t, 1, false, true, rawdb.HashScheme)
+ testIterativeStateSync(t, 1, false, true, rawdb.PathScheme)
}
func TestIterativeStateSyncBatchedByPath(t *testing.T) {
- testIterativeStateSync(t, 100, false, true)
+ testIterativeStateSync(t, 100, false, true, rawdb.HashScheme)
+ testIterativeStateSync(t, 100, false, true, rawdb.PathScheme)
}
// stateElement represents the element in the state trie(bytecode or trie node).
@@ -171,17 +200,17 @@ type stateElement struct {
syncPath trie.SyncPath
}
-func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
+func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, scheme string) {
// Create a random state to copy
- _, srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme)
if commit {
- srcDb.TrieDB().Commit(srcRoot, false)
+ ndb.Commit(srcRoot, false)
}
- srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
+ srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), ndb)
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, ndb.Scheme())
var (
nodeElements []stateElement
@@ -196,9 +225,11 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
})
}
for i := 0; i < len(codes); i++ {
- codeElements = append(codeElements, stateElement{
- code: codes[i],
- })
+ codeElements = append(codeElements, stateElement{code: codes[i]})
+ }
+ reader, err := ndb.Reader(srcRoot)
+ if err != nil {
+ t.Fatalf("failed to create reader for root %x: %v", srcRoot, err)
}
for len(nodeElements)+len(codeElements) > 0 {
var (
@@ -225,7 +256,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
if err := rlp.DecodeBytes(srcTrie.Get(node.syncPath[0]), &acc); err != nil {
t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err)
}
- stTrie, err := trie.New(trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root), srcDb.TrieDB())
+ stTrie, err := trie.New(trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root), ndb)
if err != nil {
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
}
@@ -236,7 +267,9 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data}
}
} else {
- data, err := srcDb.TrieDB().Node(node.hash)
+ owner, inner := trie.ResolvePath([]byte(node.path))
+
+ data, err := reader.Node(owner, inner, node.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for key %v", []byte(node.path))
}
@@ -276,18 +309,23 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
}
}
// Cross check that the two states are in sync
- checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+ checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned, and the others sent only later.
func TestIterativeDelayedStateSync(t *testing.T) {
+ testIterativeDelayedStateSync(t, rawdb.HashScheme)
+ testIterativeDelayedStateSync(t, rawdb.PathScheme)
+}
+
+func testIterativeDelayedStateSync(t *testing.T, scheme string) {
// Create a random state to copy
- _, srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme)
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, ndb.Scheme())
var (
nodeElements []stateElement
@@ -302,10 +340,13 @@ func TestIterativeDelayedStateSync(t *testing.T) {
})
}
for i := 0; i < len(codes); i++ {
- codeElements = append(codeElements, stateElement{
- code: codes[i],
- })
+ codeElements = append(codeElements, stateElement{code: codes[i]})
+ }
+ reader, err := ndb.Reader(srcRoot)
+ if err != nil {
+ t.Fatalf("failed to create reader for root %x: %v", srcRoot, err)
}
+
for len(nodeElements)+len(codeElements) > 0 {
// Sync only half of the scheduled nodes
var nodeProcessd int
@@ -330,7 +371,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1)
for i, element := range nodeElements[:len(nodeResults)] {
owner, inner := trie.ResolvePath([]byte(element.path))
- data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
}
@@ -366,22 +407,28 @@ func TestIterativeDelayedStateSync(t *testing.T) {
}
}
// Cross check that the two states are in sync
- checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+ checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go, however in a
// random order.
-func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) }
-func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) }
+func TestIterativeRandomStateSyncIndividual(t *testing.T) {
+ testIterativeRandomStateSync(t, 1, rawdb.HashScheme)
+ testIterativeRandomStateSync(t, 1, rawdb.PathScheme)
+}
+func TestIterativeRandomStateSyncBatched(t *testing.T) {
+ testIterativeRandomStateSync(t, 100, rawdb.HashScheme)
+ testIterativeRandomStateSync(t, 100, rawdb.PathScheme)
+}
-func testIterativeRandomStateSync(t *testing.T, count int) {
+func testIterativeRandomStateSync(t *testing.T, count int, scheme string) {
// Create a random state to copy
- _, srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme)
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, ndb.Scheme())
nodeQueue := make(map[string]stateElement)
codeQueue := make(map[common.Hash]struct{})
@@ -396,6 +443,10 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
for _, hash := range codes {
codeQueue[hash] = struct{}{}
}
+ reader, err := ndb.Reader(srcRoot)
+ if err != nil {
+ t.Fatalf("failed to create reader for root %x: %v", srcRoot, err)
+ }
for len(nodeQueue)+len(codeQueue) > 0 {
// Fetch all the queued nodes in a random order
if len(codeQueue) > 0 {
@@ -417,7 +468,7 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
for path, element := range nodeQueue {
owner, inner := trie.ResolvePath([]byte(element.path))
- data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path)
}
@@ -451,18 +502,23 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
}
}
// Cross check that the two states are in sync
- checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+ checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned (Even those randomly), others sent only later.
func TestIterativeRandomDelayedStateSync(t *testing.T) {
+ testIterativeRandomDelayedStateSync(t, rawdb.HashScheme)
+ testIterativeRandomDelayedStateSync(t, rawdb.PathScheme)
+}
+
+func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) {
// Create a random state to copy
- _, srcDb, srcRoot, srcAccounts := makeTestState()
+ _, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme)
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
- sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, srcDb.TrieDB().Scheme())
+ sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil, ndb.Scheme())
nodeQueue := make(map[string]stateElement)
codeQueue := make(map[common.Hash]struct{})
@@ -477,6 +533,10 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
for _, hash := range codes {
codeQueue[hash] = struct{}{}
}
+ reader, err := ndb.Reader(srcRoot)
+ if err != nil {
+ t.Fatalf("failed to create reader for root %x: %v", srcRoot, err)
+ }
for len(nodeQueue)+len(codeQueue) > 0 {
// Sync only half of the scheduled nodes, even those in random order
if len(codeQueue) > 0 {
@@ -506,7 +566,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
delete(nodeQueue, path)
owner, inner := trie.ResolvePath([]byte(element.path))
- data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
@@ -542,14 +602,19 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
}
}
// Cross check that the two states are in sync
- checkStateAccounts(t, dstDb, srcRoot, srcAccounts)
+ checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts)
}
// Tests that at any point in time during a sync, only complete sub-tries are in
// the database.
func TestIncompleteStateSync(t *testing.T) {
+ testIncompleteStateSync(t, rawdb.HashScheme)
+ testIncompleteStateSync(t, rawdb.PathScheme)
+}
+
+func testIncompleteStateSync(t *testing.T, scheme string) {
// Create a random state to copy
- db, srcDb, srcRoot, srcAccounts := makeTestState()
+ db, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme)
// isCodeLookup to save some hashing
var isCode = make(map[common.Hash]struct{})
@@ -559,7 +624,7 @@ func TestIncompleteStateSync(t *testing.T) {
}
}
isCode[common.BytesToHash(emptyCodeHash)] = struct{}{}
- checkTrieConsistency(db, srcRoot)
+ checkTrieConsistency(db, ndb.Scheme(), srcRoot)
// Create a destination state and sync with the scheduler
dstDb := rawdb.NewMemoryDatabase()
@@ -570,6 +635,10 @@ func TestIncompleteStateSync(t *testing.T) {
addedPaths []string
addedHashes []common.Hash
)
+ reader, err := ndb.Reader(srcRoot)
+ if err != nil {
+ t.Fatalf("state is not available %x", srcRoot)
+ }
nodeQueue := make(map[string]stateElement)
codeQueue := make(map[common.Hash]struct{})
paths, nodes, codes := sched.Missing(1)
@@ -583,6 +652,9 @@ func TestIncompleteStateSync(t *testing.T) {
for _, hash := range codes {
codeQueue[hash] = struct{}{}
}
+ if err != nil {
+ t.Fatalf("failed to create reader for root %x: %v", srcRoot, err)
+ }
for len(nodeQueue)+len(codeQueue) > 0 {
// Fetch a batch of state nodes
if len(codeQueue) > 0 {
@@ -607,7 +679,7 @@ func TestIncompleteStateSync(t *testing.T) {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
for path, element := range nodeQueue {
owner, inner := trie.ResolvePath([]byte(element.path))
- data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
@@ -635,7 +707,7 @@ func TestIncompleteStateSync(t *testing.T) {
for _, root := range nodehashes {
// Can't use checkStateConsistency here because subtrie keys may have odd
// length and crash in LeafKey.
- if err := checkTrieConsistency(dstDb, root); err != nil {
+ if err := checkTrieConsistency(dstDb, scheme, root); err != nil {
t.Fatalf("state inconsistent: %v", err)
}
}
@@ -658,12 +730,12 @@ func TestIncompleteStateSync(t *testing.T) {
for _, node := range addedCodes {
val := rawdb.ReadCode(dstDb, node)
rawdb.DeleteCode(dstDb, node)
- if err := checkStateConsistency(dstDb, srcRoot); err == nil {
+ if err := checkStateConsistency(dstDb, ndb.Scheme(), srcRoot); err == nil {
t.Errorf("trie inconsistency not caught, missing: %x", node)
}
rawdb.WriteCode(dstDb, node, val)
}
- scheme := srcDb.TrieDB().Scheme()
+
for i, path := range addedPaths {
owner, inner := trie.ResolvePath([]byte(path))
hash := addedHashes[i]
@@ -672,8 +744,10 @@ func TestIncompleteStateSync(t *testing.T) {
t.Error("missing trie node")
}
rawdb.DeleteTrieNode(dstDb, owner, inner, hash, scheme)
- if err := checkStateConsistency(dstDb, srcRoot); err == nil {
+
+ if err := checkStateConsistency(dstDb, ndb.Scheme(), srcRoot); err == nil {
t.Errorf("trie inconsistency not caught, missing: %v", path)
+
}
rawdb.WriteTrieNode(dstDb, owner, inner, hash, val, scheme)
}
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index 30fdf55606..c4556bd622 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -106,7 +106,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -241,7 +241,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -281,7 +281,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -335,7 +335,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
tooBigInitCode = [params.MaxInitCodeSize + 1]byte{}
smallInitCode = [320]byte{}
@@ -398,7 +398,7 @@ func TestStateProcessorErrors(t *testing.T) {
},
},
}
- genesis = gspec.MustCommit(db)
+ genesis = gspec.MustCommit(db, trie.NewDatabase(db, newDbConfig(rawdb.HashScheme)))
blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
defer blockchain.Stop()
@@ -496,7 +496,8 @@ func TestBlobTxStateTransition(t *testing.T) {
Alloc: GenesisAlloc{addr: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(gendb)
+ triedb = trie.NewDatabase(gendb, nil)
+ genesis = gspec.MustCommit(gendb, triedb)
signer = types.LatestSigner(gspec.Config)
)
gspec.Config.ConsortiumV2Block = common.Big0
@@ -574,7 +575,8 @@ func TestBaseFee(t *testing.T) {
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(int64(initialFund))}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(gendb)
+ triedb = trie.NewDatabase(gendb, nil)
+ genesis = gspec.MustCommit(gendb, triedb)
signer = types.LatestSigner(gspec.Config)
)
gspec.Config.ConsortiumV2Block = common.Big0
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index 44726c9cbb..b9df0c1e84 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -39,7 +39,7 @@ func TestDeriveSha(t *testing.T) {
t.Fatal(err)
}
for len(txs) < 1000 {
- exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
+ exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(txs, trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp)
@@ -86,7 +86,7 @@ func BenchmarkDeriveSha200(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
+ exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
}
})
@@ -107,7 +107,7 @@ func TestFuzzDeriveSha(t *testing.T) {
rndSeed := mrand.Int()
for i := 0; i < 10; i++ {
seed := rndSeed + i
- exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
+ exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
printList(newDummy(seed))
@@ -135,7 +135,7 @@ func TestDerivableList(t *testing.T) {
},
}
for i, tc := range tcs[1:] {
- exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())))
+ exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("case %d: got %x exp %x", i, got, exp)
diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go
index 67dce2b77b..3c049d4e2c 100644
--- a/core/vote/vote_pool_test.go
+++ b/core/vote/vote_pool_test.go
@@ -15,6 +15,7 @@ import (
wallet "github.com/ethereum/go-ethereum/accounts/bls"
"github.com/ethereum/go-ethereum/crypto/bls"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/google/uuid"
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
@@ -100,7 +101,7 @@ func testVotePool(t *testing.T, isValidRules bool) {
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
mux := new(event.TypeMux)
@@ -396,7 +397,7 @@ func TestVotePoolDosProtection(t *testing.T) {
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 25, nil, true)
@@ -523,7 +524,7 @@ func TestVotePoolWrongTargetNumber(t *testing.T) {
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
bs, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1, nil, true)
diff --git a/eth/api.go b/eth/api.go
index 29558cdedf..01c59e2f1e 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -470,7 +470,12 @@ func (api *PrivateDebugAPI) StorageRangeAt(ctx context.Context, blockHash common
}
func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {
- it := trie.NewIterator(st.NodeIterator(start))
+
+ trieIt, err := st.NodeIterator(start)
+ if err != nil {
+ return StorageRangeResult{}, err
+ }
+ it := trie.NewIterator(trieIt)
result := StorageRangeResult{Storage: storageMap{}}
for i := 0; i < maxResult && it.Next(); i++ {
_, content, _, err := rlp.Split(it.Value)
@@ -551,7 +556,7 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc
if startBlock.Number().Uint64() >= endBlock.Number().Uint64() {
return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64())
}
- triedb := api.eth.BlockChain().StateCache().TrieDB()
+ triedb := api.eth.BlockChain().TrieDB()
oldTrie, err := trie.NewSecure(trie.StateTrieID(startBlock.Root()), triedb)
if err != nil {
@@ -561,7 +566,15 @@ func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Bloc
if err != nil {
return nil, err
}
- diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))
+ oldIt, err := oldTrie.NodeIterator([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ newIt, err := newTrie.NodeIterator([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ diff, _ := trie.NewDifferenceIterator(oldIt, newIt)
iter := trie.NewIterator(diff)
var dirty []common.Address
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 93637e7c17..b7e4b1c2ae 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -417,7 +417,7 @@ func (b *EthAPIBackend) StartMining(threads int) error {
}
func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) {
- return b.eth.StateAtBlock(ctx, block, reexec, base, checkLive, preferDisk)
+ return b.eth.stateAtBlock(ctx, block, reexec, base, checkLive, preferDisk)
}
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
diff --git a/eth/api_test.go b/eth/api_test.go
index 86812fce5b..01b5d1b2b3 100644
--- a/eth/api_test.go
+++ b/eth/api_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/trie"
)
@@ -67,34 +68,34 @@ func TestAccountRange(t *testing.T) {
t.Parallel()
var (
- statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true})
- state, _ = state.New(common.Hash{}, statedb, nil)
- addrs = [AccountRangeMaxResults * 2]common.Address{}
- m = map[common.Address]bool{}
+ statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true})
+ sdb, _ = state.New(types.EmptyRootHash, statedb, nil)
+ addrs = [AccountRangeMaxResults * 2]common.Address{}
+ m = map[common.Address]bool{}
)
for i := range addrs {
hash := common.HexToHash(fmt.Sprintf("%x", i))
addr := common.BytesToAddress(crypto.Keccak256Hash(hash.Bytes()).Bytes())
addrs[i] = addr
- state.SetBalance(addrs[i], big.NewInt(1))
+ sdb.SetBalance(addrs[i], big.NewInt(1))
if _, ok := m[addr]; ok {
t.Fatalf("bad")
} else {
m[addr] = true
}
}
- state.Commit(0, true)
- root := state.IntermediateRoot(true)
+ root, _ := sdb.Commit(0, true)
+ sdb, _ = state.New(root, statedb, nil)
trie, err := statedb.OpenTrie(root)
if err != nil {
t.Fatal(err)
}
- accountRangeTest(t, &trie, state, common.Hash{}, AccountRangeMaxResults/2, AccountRangeMaxResults/2)
+ accountRangeTest(t, &trie, sdb, common.Hash{}, AccountRangeMaxResults/2, AccountRangeMaxResults/2)
// test pagination
- firstResult := accountRangeTest(t, &trie, state, common.Hash{}, AccountRangeMaxResults, AccountRangeMaxResults)
- secondResult := accountRangeTest(t, &trie, state, common.BytesToHash(firstResult.Next), AccountRangeMaxResults, AccountRangeMaxResults)
+ firstResult := accountRangeTest(t, &trie, sdb, common.Hash{}, AccountRangeMaxResults, AccountRangeMaxResults)
+ secondResult := accountRangeTest(t, &trie, sdb, common.BytesToHash(firstResult.Next), AccountRangeMaxResults, AccountRangeMaxResults)
hList := make(resultHash, 0)
for addr1 := range firstResult.Accounts {
@@ -112,7 +113,7 @@ func TestAccountRange(t *testing.T) {
// set and get an even split between the first and second sets.
sort.Sort(hList)
middleH := hList[AccountRangeMaxResults/2]
- middleResult := accountRangeTest(t, &trie, state, middleH, AccountRangeMaxResults, AccountRangeMaxResults)
+ middleResult := accountRangeTest(t, &trie, sdb, middleH, AccountRangeMaxResults, AccountRangeMaxResults)
missing, infirst, insecond := 0, 0, 0
for h := range middleResult.Accounts {
if _, ok := firstResult.Accounts[h]; ok {
@@ -141,8 +142,9 @@ func TestEmptyAccountRange(t *testing.T) {
statedb = state.NewDatabase(rawdb.NewMemoryDatabase())
st, _ = state.New(common.Hash{}, statedb, nil)
)
+ // Commit(although nothing to flush) and re-init the statedb
st.Commit(0, true)
- st.IntermediateRoot(true)
+ st, _ = state.New(types.EmptyRootHash, statedb, nil)
results := st.IteratorDump(&state.DumpConfig{
SkipCode: true,
SkipStorage: true,
diff --git a/eth/backend.go b/eth/backend.go
index 3850b94458..12cdafeb3d 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -141,8 +141,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
return nil, err
}
- if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil {
- log.Error("Failed to recover state", "error", err)
+ // Recover the pruning data only in hash scheme
+ if config.StateScheme == rawdb.HashScheme {
+ if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil {
+ log.Error("Failed to recover state", "error", err)
+ }
}
eth := &Ethereum{
config: config,
@@ -199,9 +202,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
SnapshotLimit: config.SnapshotCache,
Preimages: config.Preimages,
TriesInMemory: config.TriesInMemory,
+ StateHistory: config.StateHistory,
+ StateScheme: config.StateScheme,
}
)
- eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, config.OverrideArrowGlacier, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
+ eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, config.OverrideArrowGlacier, eth.engine, vmConfig, eth.shouldPreserve, &config.TransactionHistory)
chainConfig := eth.blockchain.Config()
genesisHash := eth.blockchain.Genesis().Hash()
if err != nil {
@@ -599,7 +604,7 @@ func (s *Ethereum) StartMining(threads int) error {
}
// If mining is started, we can disable the transaction rejection mechanism
// introduced to speed sync times.
- atomic.StoreUint32(&s.handler.acceptTxs, 1)
+ s.handler.enableSyncedFeatures()
go s.miner.Start(eb)
}
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index bc66329217..990a1d30ba 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
var (
@@ -57,7 +58,7 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
}
gblock := genesis.ToBlock()
engine := ethash.NewFaker()
- genesis.MustCommit(db)
+ genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
blocks, _ := core.GenerateChain(config, gblock, engine, db, 10, generate, true)
blocks = append([]*types.Block{gblock}, blocks...)
return genesis, blocks
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 49804de0fa..8c660d0e86 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -88,7 +88,7 @@ func newTester() *downloadTester {
}
tester.stateDb = rawdb.NewMemoryDatabase()
tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})
- tester.triedb = trie.NewDatabase(tester.stateDb)
+ tester.triedb = trie.NewDatabase(tester.stateDb, nil)
tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer, tester.verifyBlobHeader)
return tester
@@ -234,7 +234,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil {
- _, err := trie.NewSecure(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb))
+ _, err := trie.NewSecure(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb, nil))
return err
}
return fmt.Errorf("non existent block: %x", hash[:4])
diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go
index 4e7f818135..c50ed367b0 100644
--- a/eth/downloader/statesync.go
+++ b/eth/downloader/statesync.go
@@ -298,7 +298,7 @@ type codeTask struct {
func newStateSync(d *Downloader, root common.Hash) *stateSync {
// Hack the node scheme here. It's a dead code is not used
// by light client at all. Just aim for passing tests.
- scheme := trie.NewDatabase(rawdb.NewMemoryDatabase()).Scheme()
+ scheme := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil).Scheme()
return &stateSync{
d: d,
root: root,
diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go
index 01ebca97a4..2f081a080f 100644
--- a/eth/downloader/testchain_test.go
+++ b/eth/downloader/testchain_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// Test chain parameters.
@@ -40,7 +41,7 @@ var (
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- testGenesis = testGspec.MustCommit(testDB)
+ testGenesis = testGspec.MustCommit(testDB, trie.NewDatabase(testDB, nil))
)
// The common prefix of all test chains:
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index ac257d8a68..d0e8d22093 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/eth/downloader"
@@ -78,6 +79,9 @@ var Defaults = Config{
},
NetworkId: 1,
TxLookupLimit: 2350000,
+ TransactionHistory: 2350000,
+ StateHistory: params.FullImmutabilityThreshold,
+ StateScheme: rawdb.HashScheme,
LightPeers: 100,
UltraLightFraction: 75,
DatabaseCache: 512,
@@ -141,8 +145,13 @@ type Config struct {
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
+ // Deprecated, use 'TransactionHistory' instead.
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
+ TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
+ StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.
+ StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top
+
// Whitelist of required block number -> hash values to accept
Whitelist map[uint64]common.Hash `toml:"-"`
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 0af7eeced0..3f86095dae 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -27,6 +27,9 @@ func (c Config) MarshalTOML() (interface{}, error) {
NoPruning bool
NoPrefetch bool
TxLookupLimit uint64 `toml:",omitempty"`
+ TransactionHistory uint64 `toml:",omitempty"`
+ StateHistory uint64 `toml:",omitempty"`
+ StateScheme string `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"`
LightServ int `toml:",omitempty"`
LightIngress int `toml:",omitempty"`
@@ -69,6 +72,9 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
enc.TxLookupLimit = c.TxLookupLimit
+ enc.TransactionHistory = c.TransactionHistory
+ enc.StateHistory = c.StateHistory
+ enc.StateScheme = c.StateScheme
enc.Whitelist = c.Whitelist
enc.LightServ = c.LightServ
enc.LightIngress = c.LightIngress
@@ -115,6 +121,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
NoPruning *bool
NoPrefetch *bool
TxLookupLimit *uint64 `toml:",omitempty"`
+ TransactionHistory *uint64 `toml:",omitempty"`
+ StateHistory *uint64 `toml:",omitempty"`
+ StateScheme *string `toml:",omitempty"`
Whitelist map[uint64]common.Hash `toml:"-"`
LightServ *int `toml:",omitempty"`
LightIngress *int `toml:",omitempty"`
@@ -176,6 +185,15 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.TxLookupLimit != nil {
c.TxLookupLimit = *dec.TxLookupLimit
}
+ if dec.TransactionHistory != nil {
+ c.TransactionHistory = *dec.TransactionHistory
+ }
+ if dec.StateHistory != nil {
+ c.StateHistory = *dec.StateHistory
+ }
+ if dec.StateScheme != nil {
+ c.StateScheme = *dec.StateScheme
+ }
if dec.Whitelist != nil {
c.Whitelist = dec.Whitelist
}
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index ebb025d9a5..bfe5269b11 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -42,7 +42,7 @@ var (
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(testdb)
+ genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, nil))
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
)
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index d30c9f9592..9aa34d9a98 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
)
var (
@@ -164,7 +165,7 @@ func TestFinalizedBlockSubscription(t *testing.T) {
db = rawdb.NewMemoryDatabase()
backend = &testBackend{db: db}
api = NewPublicFilterAPI(backend, false, deadline)
- genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}, true)
chainEvents = []core.ChainEvent{}
)
@@ -225,7 +226,7 @@ func TestBlockSubscription(t *testing.T) {
db = rawdb.NewMemoryDatabase()
backend = &testBackend{db: db}
api = NewPublicFilterAPI(backend, false, deadline)
- genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
+ genesis = (&core.Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db, trie.NewDatabase(db, nil))
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}, true)
chainEvents = []core.ChainEvent{}
)
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index e79b3dc9a0..4cd9961006 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
func makeReceipt(addr common.Address) *types.Receipt {
@@ -65,7 +66,7 @@ func BenchmarkFilters(b *testing.B) {
)
defer db.Close()
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 100010, func(i int, gen *core.BlockGen) {
switch i {
@@ -132,7 +133,7 @@ func TestFilters(t *testing.T) {
)
defer db.Close()
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {
switch i {
diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go
index e35f2eab20..cc3ca5a04e 100644
--- a/eth/gasprice/gasprice_test.go
+++ b/eth/gasprice/gasprice_test.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
)
const testHead = 32
@@ -110,7 +111,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
config.ArrowGlacierBlock = londonBlock
engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase()
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
// Generate testing blocks
blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) {
@@ -141,7 +142,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
}, true)
// Construct testing chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb)
+ gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create local chain, %v", err)
diff --git a/eth/handler.go b/eth/handler.go
index 29efa79309..c6353e650e 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vote"
@@ -44,6 +45,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"golang.org/x/crypto/sha3"
)
@@ -241,7 +243,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
}
n, err := h.chain.InsertChain(blocks, sidecars)
if err == nil {
- atomic.StoreUint32(&h.acceptTxs, 1) // Mark initial sync done on any fetcher import
+ h.enableSyncedFeatures() // Mark initial sync done on any fetcher import
}
return n, err
}
@@ -700,3 +702,12 @@ func (h *handler) voteBroadcastLoop() {
}
}
}
+
+// enableSyncedFeatures enables the post-sync functionalities when the initial
+// sync is finished.
+func (h *handler) enableSyncedFeatures() {
+ atomic.StoreUint32(&h.acceptTxs, 1)
+ if h.chain.TrieDB().Scheme() == rawdb.PathScheme {
+ h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize)
+ }
+}
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index da85e829dc..73f16a4771 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -117,8 +117,8 @@ func testForkIDSplit(t *testing.T, protocol uint) {
gspecNoFork = &core.Genesis{Config: configNoFork}
gspecProFork = &core.Genesis{Config: configProFork}
- genesisNoFork = gspecNoFork.MustCommit(dbNoFork)
- genesisProFork = gspecProFork.MustCommit(doFork)
+ genesisNoFork = gspecNoFork.MustCommit(dbNoFork, trie.NewDatabase(dbNoFork, nil))
+ genesisProFork = gspecProFork.MustCommit(doFork, trie.NewDatabase(doFork, nil))
chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil)
chainProFork, _ = core.NewBlockChain(doFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil)
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 137cd52e97..58dc896a37 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/holiman/uint256"
)
@@ -147,7 +148,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler {
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
}
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
@@ -222,7 +223,7 @@ func newTestHandlerWithBlocks100(blocks int) (*testHandler, []*types.BlobTxSidec
},
},
}
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
chain, err := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
panic(err)
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index a038316347..56f465774e 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -70,7 +70,7 @@ func newTestBackendWithGenerator(blocks int, generator func(int, *core.BlockGen)
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
}
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index 8444f56be3..95743577fb 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -21,6 +21,7 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
@@ -202,6 +203,10 @@ func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {
}
func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer) [][]byte {
+ // Request nodes by hash is not supported in path-based scheme.
+ if backend.Chain().TrieDB().Scheme() == rawdb.PathScheme {
+ return nil
+ }
// Gather state data until the fetch or network limits is reached
var (
bytes int
@@ -217,7 +222,7 @@ func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer
// Only lookup the trie node if there's chance that we actually have it
continue
}
- entry, err := backend.Chain().TrieNode(hash)
+ entry, err := backend.Chain().TrieDB().Node(hash)
if len(entry) == 0 || err != nil {
// Read the contract code with prefix only to save unnecessary lookups.
entry, err = backend.Chain().ContractCodeWithPrefix(hash)
diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go
index 235340fa96..0cc6f0d1de 100644
--- a/eth/protocols/snap/handler.go
+++ b/eth/protocols/snap/handler.go
@@ -165,7 +165,7 @@ func handleMessage(backend Backend, peer *Peer) error {
req.Bytes = softResponseLimit
}
// Retrieve the requested state and bail out if non existent
- tr, err := trie.New(trie.StateTrieID(req.Root), backend.Chain().StateCache().TrieDB())
+ tr, err := trie.New(trie.StateTrieID(req.Root), backend.Chain().TrieDB())
if err != nil {
return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})
}
@@ -315,7 +315,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if origin != (common.Hash{}) || abort {
// Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs
- accTrie, err := trie.New(trie.StateTrieID(req.Root), backend.Chain().StateCache().TrieDB())
+ accTrie, err := trie.New(trie.StateTrieID(req.Root), backend.Chain().TrieDB())
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -323,7 +323,7 @@ func handleMessage(backend Backend, peer *Peer) error {
if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
- stTrie, err := trie.New(trie.StorageTrieID(req.Root, account, acc.Root), backend.Chain().StateCache().TrieDB())
+ stTrie, err := trie.New(trie.StorageTrieID(req.Root, account, acc.Root), backend.Chain().TrieDB())
if err != nil {
return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})
}
@@ -428,7 +428,7 @@ func handleMessage(backend Backend, peer *Peer) error {
req.Bytes = softResponseLimit
}
// Make sure we have the state associated with the request
- triedb := backend.Chain().StateCache().TrieDB()
+ triedb := backend.Chain().TrieDB()
accTrie, err := trie.NewSecure(trie.StateTrieID(req.Root), triedb)
if err != nil {
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index ecfc76fe9e..fcf3373740 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -560,6 +561,11 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has
// the remote side does not do any follow-up requests
func TestSyncBloatedProof(t *testing.T) {
t.Parallel()
+ testSyncBloatedProof(t, rawdb.HashScheme)
+ testSyncBloatedProof(t, rawdb.PathScheme)
+}
+
+func testSyncBloatedProof(t *testing.T, scheme string) {
var (
once sync.Once
@@ -571,7 +577,7 @@ func TestSyncBloatedProof(t *testing.T) {
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 100)
source := newTestPeer("source", t, term)
source.accountTrie = sourceAccountTrie
source.accountValues = elems
@@ -639,6 +645,11 @@ func setupSyncer(scheme string, peers ...*testPeer) *Syncer {
func TestSync(t *testing.T) {
t.Parallel()
+ testSync(t, rawdb.HashScheme)
+ testSync(t, rawdb.PathScheme)
+}
+
+func testSync(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -648,7 +659,7 @@ func TestSync(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -660,14 +671,18 @@ func TestSync(t *testing.T) {
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a
// panic within the prover
func TestSyncTinyTriePanic(t *testing.T) {
t.Parallel()
+ testSyncTinyTriePanic(t, rawdb.HashScheme)
+ testSyncTinyTriePanic(t, rawdb.PathScheme)
+}
+func testSyncTinyTriePanic(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -677,7 +692,7 @@ func TestSyncTinyTriePanic(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 1)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -691,13 +706,17 @@ func TestSyncTinyTriePanic(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSync tests a basic sync with multiple peers
func TestMultiSync(t *testing.T) {
t.Parallel()
+ testMultiSync(t, rawdb.HashScheme)
+ testMultiSync(t, rawdb.PathScheme)
+}
+func testMultiSync(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -707,7 +726,7 @@ func TestMultiSync(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -721,13 +740,19 @@ func TestMultiSync(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code
func TestSyncWithStorage(t *testing.T) {
t.Parallel()
+ testSyncWithStorage(t, rawdb.HashScheme)
+ testSyncWithStorage(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorage(t *testing.T, scheme string) {
+
var (
once sync.Once
cancel = make(chan struct{})
@@ -737,7 +762,8 @@ func TestSyncWithStorage(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)
+ // Create 3 accounts with 3000 storage slots each, code is true, and boundary is false.
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -753,13 +779,17 @@ func TestSyncWithStorage(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUseless(t *testing.T) {
t.Parallel()
+ testMultiSyncManyUseless(t, rawdb.HashScheme)
+ testMultiSyncManyUseless(t, rawdb.PathScheme)
+}
+func testMultiSyncManyUseless(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -769,7 +799,7 @@ func TestMultiSyncManyUseless(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -801,11 +831,17 @@ func TestMultiSyncManyUseless(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
+ t.Parallel()
+ testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme)
+ testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme)
+}
+
+func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -815,7 +851,7 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -853,11 +889,17 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive(t *testing.T) {
+ t.Parallel()
+ testMultiSyncManyUnresponsive(t, rawdb.HashScheme)
+ testMultiSyncManyUnresponsive(t, rawdb.PathScheme)
+}
+
+func testMultiSyncManyUnresponsive(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -867,7 +909,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -888,7 +930,8 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
return source
}
- syncer := setupSyncer(nodeScheme,
+ syncer := setupSyncer(
+ nodeScheme,
mkSource("full", true, true, true),
mkSource("noAccounts", false, true, true),
mkSource("noStorage", true, false, true),
@@ -902,7 +945,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
func checkStall(t *testing.T, term func()) chan struct{} {
@@ -923,7 +966,11 @@ func checkStall(t *testing.T, term func()) chan struct{} {
// account trie has a few boundary elements.
func TestSyncBoundaryAccountTrie(t *testing.T) {
t.Parallel()
+ testSyncBoundaryAccountTrie(t, rawdb.HashScheme)
+ testSyncBoundaryAccountTrie(t, rawdb.PathScheme)
+}
+func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -933,7 +980,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000)
+ nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -950,14 +997,18 @@ func TestSyncBoundaryAccountTrie(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is
// consistently returning very small results
func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme)
+}
+func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -967,7 +1018,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 3000)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -991,14 +1042,18 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver
// code requests properly.
func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme)
+}
+func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1008,7 +1063,7 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1030,12 +1085,16 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme)
+}
+func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1045,7 +1104,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 3000)
mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1067,14 +1126,18 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes
// one by one
func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
t.Parallel()
+ testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme)
+ testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme)
+}
+func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1084,7 +1147,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 3000)
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1116,14 +1179,18 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {
if threshold := 100; counter > threshold {
t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the
// storage trie has a few boundary elements.
func TestSyncBoundaryStorageTrie(t *testing.T) {
t.Parallel()
+ testSyncBoundaryStorageTrie(t, rawdb.HashScheme)
+ testSyncBoundaryStorageTrie(t, rawdb.PathScheme)
+}
+func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1133,7 +1200,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1152,14 +1219,18 @@ func TestSyncBoundaryStorageTrie(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is
// consistently returning very small results
func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
t.Parallel()
+ testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme)
+ testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme)
+}
+func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1169,7 +1240,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false)
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
@@ -1193,14 +1264,19 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is
// sometimes sending bad proofs
func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
t.Parallel()
+ testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme)
+ testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1210,7 +1286,7 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1233,12 +1309,16 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
t.Parallel()
+ testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme)
+ testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme)
+}
+func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1248,7 +1328,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false)
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
@@ -1270,7 +1350,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
t.Fatalf("sync failed: %v", err)
}
close(done)
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
// TestSyncWithStorage tests basic sync using accounts + storage + code, against
@@ -1279,6 +1359,11 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) {
// did not mark the account for healing.
func TestSyncWithStorageMisbehavingProve(t *testing.T) {
t.Parallel()
+ testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme)
+ testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme)
+}
+
+func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
var (
once sync.Once
cancel = make(chan struct{})
@@ -1288,7 +1373,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)
+ nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1303,7 +1388,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) {
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}
type kv struct {
@@ -1356,10 +1441,10 @@ func getCodeByHash(hash common.Hash) []byte {
}
// makeAccountTrieNoStorage spits out a trie, along with the leafs
-func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
+func makeAccountTrieNoStorage(scheme string, n int) (string, *trie.Trie, entrySlice) {
// Create emptry Trie
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries entrySlice
)
@@ -1391,12 +1476,12 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
// makeBoundaryAccountTrie constructs an account trie. Instead of filling
// accounts normally, this function will fill a few accounts which have
// boundary hash.
-func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
+func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
)
// Initialize boundaries
@@ -1451,9 +1536,9 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts
// has a unique storage set. Code is true when u pass a random code hash to the account
-func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
+func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries entrySlice
storageRoots = make(map[common.Hash]common.Hash)
@@ -1504,9 +1589,9 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
}
// makeAccountTrieWithStorage spits out a trie, along with the leafs
-func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
+func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries entrySlice
storageRoots = make(map[common.Hash]common.Hash)
@@ -1562,7 +1647,8 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
}
for i := uint64(1); i <= uint64(accounts); i++ {
key := key32(i)
- trie, err := trie.New(trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)]), db)
+ id := trie.StorageTrieID(root, common.BytesToHash(key), storageRoots[common.BytesToHash(key)])
+ trie, err := trie.New(id, db)
if err != nil {
panic(err)
}
@@ -1575,7 +1661,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
- trie, _ := trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
+ trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
// store 'x' at slot 'x'
@@ -1601,7 +1687,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
var (
entries entrySlice
boundaries []common.Hash
- trie, _ = trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
+ trie, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
)
// Initialize boundaries
var next common.Hash
@@ -1645,15 +1731,15 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
return root, nodes, entries
}
-func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
+func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
- triedb := trie.NewDatabase(rawdb.NewDatabase(db))
+ triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
accTrie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil {
t.Fatal(err)
}
accounts, slots := 0, 0
- accIt := trie.NewIterator(accTrie.NodeIterator(nil))
+ accIt := trie.NewIterator(accTrie.MustNodeIterator(nil))
for accIt.Next() {
var acc struct {
Nonce uint64
@@ -1665,12 +1751,14 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
log.Crit("Invalid account encountered during snapshot creation", "err", err)
}
accounts++
- if acc.Root != emptyRoot {
- storeTrie, err := trie.NewSecure(trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root), triedb)
+ if acc.Root != types.EmptyRootHash {
+ id := trie.StorageTrieID(root, common.BytesToHash(accIt.Key), acc.Root)
+ storeTrie, err := trie.NewSecure(id, triedb)
if err != nil {
t.Fatal(err)
}
- storeIt := trie.NewIterator(storeTrie.NodeIterator(nil))
+
+ storeIt := trie.NewIterator(storeTrie.MustNodeIterator(nil))
for storeIt.Next() {
slots++
}
@@ -1688,6 +1776,12 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
// TestSyncAccountPerformance tests how efficient the snap algo is at minimizing
// state healing
func TestSyncAccountPerformance(t *testing.T) {
+ t.Parallel()
+ testSyncAccountPerformance(t, rawdb.HashScheme)
+ testSyncAccountPerformance(t, rawdb.PathScheme)
+}
+
+func testSyncAccountPerformance(t *testing.T, scheme string) {
// Set the account concurrency to 1. This _should_ result in the
// range root to become correct, and there should be no healing needed
defer func(old int) { accountConcurrency = old }(accountConcurrency)
@@ -1702,7 +1796,7 @@ func TestSyncAccountPerformance(t *testing.T) {
})
}
)
- nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100)
+ nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 100)
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
@@ -1715,7 +1809,7 @@ func TestSyncAccountPerformance(t *testing.T) {
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
- verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
+ verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
// The trie root will always be requested, since it is added when the snap
// sync cycle starts. When popping the queue, we do not look it up again.
// Doing so would bring this number down to zero in this artificial testcase,
@@ -1775,3 +1869,10 @@ func TestSlotEstimation(t *testing.T) {
}
}
}
+
+func newDbConfig(scheme string) *trie.Config {
+ if scheme == rawdb.HashScheme {
+ return &trie.Config{}
+ }
+ return &trie.Config{PathDB: pathdb.Defaults}
+}
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 06bbfdbd46..c4cac850bf 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/consortium"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -38,38 +39,24 @@ import (
// for releasing state.
var noopReleaser = tracers.StateReleaseFunc(func() {})
-// StateAtBlock retrieves the state database associated with a certain block.
-// If no state is locally available for the given block, a number of blocks
-// are attempted to be reexecuted to generate the desired state. The optional
-// base layer statedb can be passed then it's regarded as the statedb of the
-// parent block.
-// Parameters:
-// - block: The block for which we want the state (== state at the stateRoot of the parent)
-// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
-// - base: If the caller is tracing multiple blocks, the caller can provide the parent state
-// continuously from the callsite.
-// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to
-// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid
-// storing trash persistently
-// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided,
-// it would be preferrable to start from a fresh state, if we have it on disk.
-func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) {
+func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) {
var (
current *types.Block
database state.Database
+ triedb *trie.Database
report = true
origin = block.NumberU64()
)
// The state is only for reading purposes, check the state presence in
// live database.
- if checkLive {
+ if readOnly {
// The state is available in live database, create a reference
// on top to prevent garbage collection and return a release
// function to deref it.
- statedb, err = eth.blockchain.StateAt(block.Root())
- if err == nil {
+ if statedb, err = eth.blockchain.StateAt(block.Root()); err == nil {
+ eth.blockchain.TrieDB().Reference(block.Root(), common.Hash{})
return statedb, func() {
- statedb.Database().TrieDB().Dereference(block.Root())
+ eth.blockchain.TrieDB().Dereference(block.Root())
}, nil
}
}
@@ -80,27 +67,32 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
if preferDisk {
// Create an ephemeral trie.Database for isolating the live one. Otherwise
// the internal junks created by tracing will be persisted into the disk.
- database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16})
+ // TODO(rjl493456442), clean cache is disabled to prevent memory leak,
+ // please re-enable it for better performance.
+ database = state.NewDatabaseWithConfig(eth.chainDb, trie.HashDefaults)
if statedb, err = state.New(block.Root(), database, nil); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, noopReleaser, nil
}
}
// The optional base statedb is given, mark the start point as parent block
- statedb, database, report = base, base.Database(), false
+ statedb, database, triedb, report = base, base.Database(), base.Database().TrieDB(), false
current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
} else {
- // Otherwise try to reexec blocks until we find a state or reach our limit
+ // Otherwise, try to reexec blocks until we find a state or reach our limit
current = block
// Create an ephemeral trie.Database for isolating the live one. Otherwise
// the internal junks created by tracing will be persisted into the disk.
- database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16})
+ // TODO(rjl493456442), clean cache is disabled to prevent memory leak,
+ // please re-enable it for better performance.
+ triedb = trie.NewDatabase(eth.chainDb, trie.HashDefaults)
+ database = state.NewDatabaseWithNodeDB(eth.chainDb, triedb)
// If we didn't check the live database, do check state over ephemeral database,
// otherwise we would rewind past a persisted block (specific corner case is
// chain tracing from the genesis).
- if !checkLive {
+ if !readOnly {
statedb, err = state.New(current.Root(), database, nil)
if err == nil {
return statedb, noopReleaser, nil
@@ -108,6 +100,9 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
}
// Database does not have the state for the given block, try to regenerate
for i := uint64(0); i < reexec; i++ {
+ if err := ctx.Err(); err != nil {
+ return nil, nil, err
+ }
if current.NumberU64() == 0 {
return nil, nil, errors.New("genesis state is missing")
}
@@ -168,17 +163,58 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe
}
// Hold the state reference and also drop the parent state
// to prevent accumulating too many nodes in memory.
- database.TrieDB().Reference(root, common.Hash{})
+ triedb.Reference(root, common.Hash{})
if parent != (common.Hash{}) {
- database.TrieDB().Dereference(parent)
+ triedb.Dereference(parent)
}
parent = root
}
if report {
- nodes, imgs := database.TrieDB().Size()
+ nodes, imgs := triedb.Size()
log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs)
}
- return statedb, func() { database.TrieDB().Dereference(block.Root()) }, nil
+ return statedb, func() { triedb.Dereference(block.Root()) }, nil
+}
+
+func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) {
+ // Check if the requested state is available in the live chain.
+ statedb, err := eth.blockchain.StateAt(block.Root())
+ if err == nil {
+ return statedb, noopReleaser, nil
+ }
+ // TODO historic state is not supported in path-based scheme.
+ // Fully archive node in pbss will be implemented by relying
+ // on state history, but needs more work on top.
+ return nil, nil, errors.New("historical state not available in path scheme yet")
+}
+
+// stateAtBlock retrieves the state database associated with a certain block.
+// If no state is locally available for the given block, a number of blocks
+// are attempted to be reexecuted to generate the desired state. The optional
+// base layer statedb can be provided which is regarded as the statedb of the
+// parent block.
+//
+// An additional release function will be returned if the requested state is
+// available. Release is expected to be invoked when the returned state is no
+// longer needed. Its purpose is to prevent resource leaking. Though it can be
+// noop in some cases.
+//
+// Parameters:
+// - block: The block for which we want the state(state = block.Root)
+// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
+// - base: If the caller is tracing multiple blocks, the caller can provide the parent
+// state continuously from the callsite.
+// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should
+// be made from caller, e.g. perform Commit or other 'save-to-disk' changes.
+// Otherwise, the trash generated by caller may be persisted permanently.
+// - preferDisk: This arg can be used by the caller to signal that even though the 'base' is
+// provided, it would be preferable to start from a fresh state, if we have it
+// on disk.
+func (eth *Ethereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) {
+ if eth.blockchain.TrieDB().Scheme() == rawdb.HashScheme {
+ return eth.hashState(ctx, block, reexec, base, readOnly, preferDisk)
+ }
+ return eth.pathState(block)
}
// stateAtTransaction returns the execution environment of a certain transaction.
@@ -194,7 +230,7 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block,
}
// Lookup the statedb of parent block from the live database,
// otherwise regenerate it on the flight.
- statedb, release, err := eth.StateAtBlock(ctx, parent, reexec, nil, true, false)
+ statedb, release, err := eth.stateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil {
return nil, vm.BlockContext{}, nil, nil, err
}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index c6d98e0704..0cc8a85cbe 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -85,7 +85,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i
TrieDirtyDisabled: true, // Archive mode
}
- _, _, genesisErr := core.SetupGenesisBlockWithOverride(backend.chaindb, trie.NewDatabase(backend.chaindb), gspec, nil, true)
+ _, _, genesisErr := core.SetupGenesisBlockWithOverride(backend.chaindb, trie.NewDatabase(backend.chaindb, nil), gspec, nil, true)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
t.Fatal(genesisErr.Error())
}
diff --git a/eth/tracers/internal/tracetest/calltrace2_test.go b/eth/tracers/internal/tracetest/calltrace2_test.go
index d0559e2804..c854828885 100644
--- a/eth/tracers/internal/tracetest/calltrace2_test.go
+++ b/eth/tracers/internal/tracetest/calltrace2_test.go
@@ -162,8 +162,9 @@ func testCallTracer2(tracerName string, dirPath string, t *testing.T) {
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
+ defer triedb.Close()
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
@@ -265,7 +266,8 @@ func benchTracer2(tracerName string, test *callTracer2Test, b *testing.B) {
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
b.ReportAllocs()
b.ResetTimer()
@@ -335,7 +337,8 @@ func TestZeroValueToNotExitCall2(t *testing.T) {
Balance: big.NewInt(500000000000000),
},
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
// Create the tracer, the EVM environment and run it
tracer, err := tracers.DefaultDirectory.New("callTracer2", new(tracers.Context), nil)
if err != nil {
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 1b3c121a89..233c1eb8e7 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -137,8 +137,9 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
BaseFee: test.Genesis.BaseFee,
}
- _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
+ triedb.Close()
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
@@ -237,7 +238,8 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
b.ReportAllocs()
b.ResetTimer()
@@ -343,7 +345,7 @@ func TestInternals(t *testing.T) {
want: `{"0x0000000000000000000000000000000000000000":{"balance":"0x0"},"0x000000000000000000000000000000000000feed":{"balance":"0x1c6bf52640350"},"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"}}`,
},
} {
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
core.GenesisAlloc{
to: core.GenesisAccount{
Code: tc.code,
@@ -351,7 +353,8 @@ func TestInternals(t *testing.T) {
origin: core.GenesisAccount{
Balance: big.NewInt(500000000000000),
},
- }, false)
+ }, false, rawdb.HashScheme)
+ defer triedb.Close()
evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer})
msg := types.NewMessage(origin, &to, 0, big.NewInt(0), 50000, big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, false, nil, nil)
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(msg.Gas()))
diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go
index 10cf1ecdcd..f2e9d741ff 100644
--- a/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -100,7 +100,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
// Create the tracer, the EVM environment and run it
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index db97c6e63d..579eb9fe71 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -108,8 +108,9 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
BaseFee: test.Genesis.BaseFee,
}
- _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
+ triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
+ defer triedb.Close()
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 277cb471c6..f08f156612 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -79,7 +79,9 @@ func BenchmarkTransactionTrace(b *testing.B) {
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
- _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
+ triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false, rawdb.HashScheme)
+ defer triedb.Close()
+
// Create the tracer, the EVM environment and run it
tracer := logger.NewStructLogger(&logger.Config{
Debug: false,
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 0a33a593e3..3c63cab01d 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -40,6 +40,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/holiman/uint256"
)
@@ -287,7 +288,7 @@ func generateTestChain() ([]*types.Block, [][]*types.BlobTxSidecar, [][]common.H
blobTxHashes = append(blobTxHashes, []common.Hash{})
}
}
- gblock := genesis.MustCommit(db)
+ gblock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, 2, generate, true)
// add genesis blob/sidecars/txhash to the begining of the list
@@ -724,7 +725,7 @@ func testGetBlobSidecars(t *testing.T, chain []*types.Block, blobSidecars [][]*t
wantErr error
}{
"first_block_blob_notfound_by_number": {
- blkNum: chain[1].Number(),
+ blkNum: chain[1].Number(),
},
"first_block_blob_notfound_by_hash": {
blkHash: chain[1].Hash(),
diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go
index a1f0cf385b..2a4954cfb1 100644
--- a/ethclient/gethclient/gethclient_test.go
+++ b/ethclient/gethclient/gethclient_test.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/trie"
)
var (
@@ -82,7 +83,7 @@ func generateTestChain() (*core.Genesis, []*types.Block) {
g.OffsetTime(5)
g.SetExtra([]byte("test"))
}
- gblock := genesis.MustCommit(db)
+ gblock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(config, gblock, engine, db, 1, generate, true)
blocks = append([]*types.Block{gblock}, blocks...)
diff --git a/les/client.go b/les/client.go
index 6e8fcbc308..3ea07911f5 100644
--- a/les/client.go
+++ b/les/client.go
@@ -89,7 +89,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, config.OverrideArrowGlacier, false)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb, nil), config.Genesis, config.OverrideArrowGlacier, false)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go
index 963d4d9035..fe686ab407 100644
--- a/les/downloader/downloader_test.go
+++ b/les/downloader/downloader_test.go
@@ -229,7 +229,7 @@ func (dl *downloadTester) CurrentFastBlock() *types.Block {
func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {
// For now only check that the state trie is correct
if block := dl.GetBlockByHash(hash); block != nil {
- _, err := trie.NewSecure(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb))
+ _, err := trie.NewSecure(trie.StateTrieID(block.Root()), trie.NewDatabase(dl.stateDb, nil))
return err
}
return fmt.Errorf("non existent block: %x", hash[:4])
diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go
index 4e7f818135..c50ed367b0 100644
--- a/les/downloader/statesync.go
+++ b/les/downloader/statesync.go
@@ -298,7 +298,7 @@ type codeTask struct {
func newStateSync(d *Downloader, root common.Hash) *stateSync {
// Hack the node scheme here. It's a dead code is not used
// by light client at all. Just aim for passing tests.
- scheme := trie.NewDatabase(rawdb.NewMemoryDatabase()).Scheme()
+ scheme := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil).Scheme()
return &stateSync{
d: d,
root: root,
diff --git a/les/downloader/testchain_test.go b/les/downloader/testchain_test.go
index 359dc4bad4..41cb789817 100644
--- a/les/downloader/testchain_test.go
+++ b/les/downloader/testchain_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// Test chain parameters.
@@ -40,7 +41,7 @@ var (
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- testGenesis = gspec.MustCommit(testDB)
+ testGenesis = gspec.MustCommit(testDB, trie.NewDatabase(testDB, nil))
)
// The common prefix of all test chains:
diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go
index a3d320c126..6a64d3933b 100644
--- a/les/fetcher/block_fetcher_test.go
+++ b/les/fetcher/block_fetcher_test.go
@@ -43,7 +43,7 @@ var (
Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(testdb)
+ genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, nil))
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
)
diff --git a/les/handler_test.go b/les/handler_test.go
index 4fa19e9915..61500d99f9 100644
--- a/les/handler_test.go
+++ b/les/handler_test.go
@@ -406,7 +406,7 @@ func testGetProofs(t *testing.T, protocol int) {
accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {
header := bc.GetHeaderByNumber(i)
- trie, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db))
+ trie, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB())
for _, acc := range accounts {
req := ProofReq{
@@ -457,7 +457,7 @@ func testGetStaleProof(t *testing.T, protocol int) {
var expected []rlp.RawValue
if wantOK {
proofsV2 := light.NewNodeSet()
- t, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db))
+ t, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB())
t.Prove(account, 0, proofsV2)
expected = proofsV2.NodeList()
}
@@ -513,7 +513,7 @@ func testGetCHTProofs(t *testing.T, protocol int) {
AuxData: [][]byte{rlp},
}
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
- trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))
+ trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix), nil))
trie.Prove(key, 0, &proofsV2.Proofs)
// Assemble the requests for the different protocols
requestsV2 := []HelperTrieReq{{
@@ -578,7 +578,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) {
var proofs HelperTrieResps
root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
- trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))
+ trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix), nil))
trie.Prove(key, 0, &proofs.Proofs)
// Send the proof request and verify the response
diff --git a/les/server_handler.go b/les/server_handler.go
index 9029034a5c..dc2b3d7527 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -392,7 +392,7 @@ func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie {
if root == (common.Hash{}) {
return nil
}
- trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))
+ trie, _ := trie.New(trie.StateTrieID(root), trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix), nil))
return trie
}
diff --git a/les/test_helper.go b/les/test_helper.go
index bd967b112d..01dcf89132 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -52,6 +52,7 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
var (
@@ -203,7 +204,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
}
oracle *checkpointoracle.CheckpointOracle
)
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ := light.NewLightChain(odr, gspec.Config, engine, nil)
if indexers != nil {
checkpointConfig := ¶ms.CheckpointOracleConfig{
@@ -263,7 +264,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
}
oracle *checkpointoracle.CheckpointOracle
)
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
// create a simulation backend and pre-commit several customized block to the database.
simulation := backends.NewSimulatedBackendWithDatabase(db, gspec.Alloc, 100000000)
diff --git a/light/lightchain_test.go b/light/lightchain_test.go
index e9d43d6da0..d9fb159119 100644
--- a/light/lightchain_test.go
+++ b/light/lightchain_test.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
// So we can deterministically seed different blockchains
@@ -55,7 +56,7 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
func newCanonical(n int) (ethdb.Database, *LightChain, error) {
db := rawdb.NewMemoryDatabase()
gspec := core.Genesis{Config: params.TestChainConfig}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
blockchain, _ := NewLightChain(&dummyOdr{db: db, indexerConfig: TestClientIndexerConfig}, gspec.Config, ethash.NewFaker(), nil)
// Create and inject the requested chain
@@ -75,7 +76,7 @@ func newTestLightChain() *LightChain {
Difficulty: big.NewInt(1),
Config: params.TestChainConfig,
}
- gspec.MustCommit(db)
+ gspec.MustCommit(db, trie.NewDatabase(db, nil))
lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker(), nil)
if err != nil {
panic(err)
diff --git a/light/odr_test.go b/light/odr_test.go
index 1cadad5ec2..6929696cc3 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -82,7 +82,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error {
req.Receipts = rawdb.ReadRawReceipts(odr.sdb, req.Hash, *number)
}
case *TrieRequest:
- t, _ := trie.New(trie.StorageTrieID(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root), trie.NewDatabase(odr.sdb))
+ t, _ := trie.New(trie.StorageTrieID(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root), trie.NewDatabase(odr.sdb, nil))
nodes := NewNodeSet()
t.Prove(req.Key, 0, nodes)
req.Proof = nodes
@@ -258,9 +258,9 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) {
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(sdb)
+ genesis = gspec.MustCommit(sdb, trie.NewDatabase(sdb, nil))
)
- gspec.MustCommit(ldb)
+ gspec.MustCommit(ldb, trie.NewDatabase(ldb, nil))
// Assemble the test environment
blockchain, _ := core.NewBlockChain(sdb, nil, &gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, 4, testChainGen, true)
diff --git a/light/postprocess.go b/light/postprocess.go
index f43fdb3cb5..4dcc358129 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -149,7 +149,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, dis
diskdb: db,
odr: odr,
trieTable: trieTable,
- triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
+ triedb: trie.NewDatabase(trieTable, nil), // Use a tiny cache only to keep memory down
trieset: mapset.NewSet(),
sectionSize: size,
disablePruning: disablePruning,
@@ -354,7 +354,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin
diskdb: db,
odr: odr,
trieTable: trieTable,
- triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down
+ triedb: trie.NewDatabase(trieTable, nil), // Use a tiny cache only to keep memory down
trieset: mapset.NewSet(),
parentSize: parentSize,
size: size,
diff --git a/light/trie.go b/light/trie.go
index a09488a4ba..b7c4f311bb 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -151,8 +151,8 @@ func (t *odrTrie) Hash() common.Hash {
return t.trie.Hash()
}
-func (t *odrTrie) NodeIterator(startkey []byte) trie.NodeIterator {
- return newNodeIterator(t, startkey)
+func (t *odrTrie) NodeIterator(startkey []byte) (trie.NodeIterator, error) {
+ return newNodeIterator(t, startkey), nil
}
func (t *odrTrie) GetKey(sha []byte) []byte {
@@ -175,7 +175,7 @@ func (t *odrTrie) do(key []byte, fn func() error) error {
} else {
id = trie.StateTrieID(t.id.StateRoot)
}
- t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database()))
+ t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database(), nil))
}
if err == nil {
err = fn()
@@ -207,7 +207,7 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator {
} else {
id = trie.StateTrieID(t.id.StateRoot)
}
- t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database()))
+ t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database(), nil))
if err == nil {
it.t.trie = t
}
@@ -215,7 +215,11 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator {
})
}
it.do(func() error {
- it.NodeIterator = it.t.trie.NodeIterator(startkey)
+ var err error
+ it.NodeIterator, err = it.t.trie.NodeIterator(startkey)
+ if err != nil {
+ return err
+ }
return it.NodeIterator.Error()
})
return it
diff --git a/light/trie_test.go b/light/trie_test.go
index 3e858c8ab7..be05504f02 100644
--- a/light/trie_test.go
+++ b/light/trie_test.go
@@ -42,9 +42,9 @@ func TestNodeIterator(t *testing.T) {
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(fulldb)
+ genesis = gspec.MustCommit(fulldb, trie.NewDatabase(fulldb, nil))
)
- gspec.MustCommit(lightdb)
+ gspec.MustCommit(lightdb, trie.NewDatabase(lightdb, nil))
blockchain, _ := core.NewBlockChain(fulldb, nil, &gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), fulldb, 4, testChainGen, true)
if _, err := blockchain.InsertChain(gchain, nil); err != nil {
@@ -62,8 +62,16 @@ func TestNodeIterator(t *testing.T) {
}
func diffTries(t1, t2 state.Trie) error {
- i1 := trie.NewIterator(t1.NodeIterator(nil))
- i2 := trie.NewIterator(t2.NodeIterator(nil))
+ trieIt1, err := t1.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
+ trieIt2, err := t2.NodeIterator(nil)
+ if err != nil {
+ return err
+ }
+ i1 := trie.NewIterator(trieIt1)
+ i2 := trie.NewIterator(trieIt2)
for i1.Next() && i2.Next() {
if !bytes.Equal(i1.Key, i2.Key) {
spew.Dump(i2)
diff --git a/light/txpool_test.go b/light/txpool_test.go
index 3c377f7b00..8398742689 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
)
type testTxRelay struct {
@@ -88,9 +89,9 @@ func TestTxPool(t *testing.T) {
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(sdb)
+ genesis = gspec.MustCommit(sdb, trie.NewDatabase(sdb, nil))
)
- gspec.MustCommit(ldb)
+ gspec.MustCommit(ldb, trie.NewDatabase(ldb, nil))
// Assemble the test environment
blockchain, _ := core.NewBlockChain(sdb, nil, &gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), sdb, poolTestBlocks, txPoolTestChainGen, true)
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 1b6eeecd66..fa652b27e4 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -251,7 +251,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
if err != nil {
t.Fatalf("can't create new chain %v", err)
}
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(chainDB), nil)
+ statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil)
blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
legacyPool := legacypool.New(testTxPoolConfig, bc.Config(), blockchain)
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 4b150e1a85..11d2f51c4f 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -42,6 +42,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/holiman/uint256"
)
@@ -142,7 +143,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
default:
t.Fatalf("unexpected consensus engine type: %T", engine)
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, trie.NewDatabase(db, nil))
chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, &gspec, nil, engine, vm.Config{}, nil, nil)
legacyPool := legacypool.New(testTxPoolConfig, chainConfig, chain)
@@ -244,7 +245,7 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
// This test chain imports the mined blocks.
db2 := rawdb.NewMemoryDatabase()
- b.genesis.MustCommit(db2)
+ b.genesis.MustCommit(db2, trie.NewDatabase(db2, nil))
chain, _ := core.NewBlockChain(db2, nil, b.genesis, nil, engine, vm.Config{}, nil, nil)
defer chain.Stop()
diff --git a/tests/block_test.go b/tests/block_test.go
index 74c7ed8197..baea3615d9 100644
--- a/tests/block_test.go
+++ b/tests/block_test.go
@@ -18,6 +18,8 @@ package tests
import (
"testing"
+
+ "github.com/ethereum/go-ethereum/core/rawdb"
)
func TestBlockchain(t *testing.T) {
@@ -47,12 +49,19 @@ func TestBlockchain(t *testing.T) {
// using 4.6 TGas
bt.skipLoad(`.*randomStatetest94.json.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
- if err := bt.checkFailure(t, test.Run(false)); err != nil {
- t.Errorf("test without snapshotter failed: %v", err)
+ if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme)); err != nil {
+ t.Errorf("test in hash mode without snapshotter failed: %v", err)
}
- if err := bt.checkFailure(t, test.Run(true)); err != nil {
- t.Errorf("test with snapshotter failed: %v", err)
+ if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme)); err != nil {
+ t.Errorf("test in hash mode with snapshotter failed: %v", err)
}
+ // if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme)); err != nil {
+
+ // t.Errorf("test in path mode without snapshotter failed: %v", err)
+ // }
+ // if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme)); err != nil {
+ // t.Errorf("test in path mode with snapshotter failed: %v", err)
+ // }
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index df955e3d69..227b9bf605 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -37,6 +37,9 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
// A BlockTest checks handling of entire blocks.
@@ -98,15 +101,26 @@ type btHeaderMarshaling struct {
BaseFeePerGas *math.HexOrDecimal256
}
-func (t *BlockTest) Run(snapshotter bool) error {
+func (t *BlockTest) Run(snapshotter bool, scheme string) error {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
- db := rawdb.NewMemoryDatabase()
- gblock := t.genesis(config).MustCommit(db)
+ var (
+ db = rawdb.NewMemoryDatabase()
+ tconf = &trie.Config{}
+ )
+ if scheme == rawdb.PathScheme {
+ tconf.PathDB = pathdb.Defaults
+ } else {
+ tconf.HashDB = hashdb.Defaults
+ }
+ triedb := trie.NewDatabase(db, tconf)
+ // Commit genesis state
+ gblock := t.genesis(config).MustCommit(db, triedb)
+ triedb.Close()
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
}
@@ -119,7 +133,7 @@ func (t *BlockTest) Run(snapshotter bool) error {
} else {
engine = ethash.NewShared()
}
- cache := &core.CacheConfig{TrieCleanLimit: 0}
+ cache := &core.CacheConfig{TrieCleanLimit: 0, StateScheme: scheme}
if snapshotter {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go
index 494979109a..3f48578291 100644
--- a/tests/fuzzers/les/les-fuzzer.go
+++ b/tests/fuzzers/les/les-fuzzer.go
@@ -62,7 +62,7 @@ func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) {
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
GasLimit: 100000000,
}
- genesis := gspec.MustCommit(db)
+ genesis := gspec.MustCommit(db, nil)
signer := types.HomesteadSigner{}
blocks, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, testChainLen,
func(i int, gen *core.BlockGen) {
@@ -90,8 +90,8 @@ func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) {
}
func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) {
- chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
- bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < testChainLen; i++ {
// The element in CHT is ->
key := make([]byte, 8)
diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
index 5a65152aa8..aea4a47985 100644
--- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
+++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
@@ -62,7 +62,7 @@ func (f *fuzzer) readInt() uint64 {
}
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
- trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
size := f.readInt()
// Fill it with some fluff
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 6e728ac2c2..9ade6f2b2f 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -149,10 +149,10 @@ func (f *fuzzer) fuzz() int {
// This spongeDb is used to check the sequence of disk-db-writes
var (
spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA))
+ dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA), nil)
trieA = trie.NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB))
+ dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil)
trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme())
})
@@ -237,7 +237,7 @@ func (f *fuzzer) fuzz() int {
panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
}
trieA, _ = trie.New(trie.TrieID(rootA), dbA)
- iterA := trieA.NodeIterator(nil)
+ iterA := trieA.MustNodeIterator(nil)
for iterA.Next(true) {
if iterA.Hash() == (common.Hash{}) {
if _, present := nodeset[string(iterA.Path())]; present {
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index dc2e689a56..617d4f7c30 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -145,7 +145,7 @@ func Fuzz(input []byte) int {
func runRandTest(rt randTest) error {
var (
- triedb = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ triedb = trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)
tr = trie.NewEmpty(triedb)
origin = types.EmptyRootHash
values = make(map[string]string) // tracks content of the trie
@@ -185,7 +185,7 @@ func runRandTest(rt randTest) error {
origin = hash
case opItercheckhash:
checktr := trie.NewEmpty(triedb)
- it := trie.NewIterator(tr.NodeIterator(nil))
+ it := trie.NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
}
diff --git a/tests/state_test.go b/tests/state_test.go
index 16975cef31..6eb65350a4 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -25,6 +25,9 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers/logger"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -68,29 +71,52 @@ func TestState(t *testing.T) {
subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
- t.Run(key+"/trie", func(t *testing.T) {
+ t.Run(key+"/hash/trie", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- _, _, err := test.Run(subtest, vmconfig, false)
- if err != nil && len(test.json.Post[subtest.Fork][subtest.Index].ExpectException) > 0 {
- // Ignore expected errors (TODO MariusVanDerWijden check error string)
- return nil
- }
- return st.checkFailure(t, err)
+ var result error
+ test.Run(subtest, vmconfig, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ result = st.checkFailure(t, err)
+ })
+ return result
})
})
- t.Run(key+"/snap", func(t *testing.T) {
+ t.Run(key+"/hash/snap", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- snaps, statedb, err := test.Run(subtest, vmconfig, true)
- if snaps != nil && statedb != nil {
- if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil {
- return err
+ var result error
+ test.Run(subtest, vmconfig, true, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ if snaps != nil && state != nil {
+ if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil {
+ result = err
+ return
+ }
}
- }
- if err != nil && len(test.json.Post[subtest.Fork][subtest.Index].ExpectException) > 0 {
- // Ignore expected errors (TODO MariusVanDerWijden check error string)
- return nil
- }
- return st.checkFailure(t, err)
+ result = st.checkFailure(t, err)
+ })
+ return result
+ })
+ })
+ t.Run(key+"/path/trie", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, false, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ result = st.checkFailure(t, err)
+ })
+ return result
+ })
+ })
+ t.Run(key+"/path/snap", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
+ if snaps != nil && state != nil {
+ if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil {
+ result = err
+ return
+ }
+ }
+ result = st.checkFailure(t, err)
+ })
+ return result
})
})
}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index a923535181..e3f576718b 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -37,6 +37,9 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"golang.org/x/crypto/sha3"
)
@@ -158,32 +161,41 @@ func (t *StateTest) Subtests() []StateSubtest {
}
// Run executes a specific subtest and verifies the post-state and logs
-func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) {
- snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
+func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string, postCheck func(err error, snaps *snapshot.Tree, state *state.StateDB)) (result error) {
+ triedb, snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter, scheme)
if err != nil {
- return snaps, statedb, err
+ return err
}
+ // Invoke the callback at the end of function for further analysis.
+ defer func() {
+ postCheck(result, snaps, statedb)
+
+ if triedb != nil {
+ triedb.Close()
+ }
+ }()
post := t.json.Post[subtest.Fork][subtest.Index]
// N.B: We need to do this in a two-step process, because the first Commit takes care
// of suicides, and we need to touch the coinbase _after_ it has potentially suicided.
if root != common.Hash(post.Root) {
- return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
+ return fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
- return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
+ return fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
- return snaps, statedb, nil
+ return nil
}
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
-func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) {
+func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) {
config, eips, err := GetChainConfig(subtest.Fork)
if err != nil {
- return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
+ return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock()
- snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
+ triedb, snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter, scheme)
+ defer triedb.Close()
var baseFee *big.Int
if config.IsLondon(new(big.Int)) {
@@ -197,7 +209,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post, baseFee)
if err != nil {
- return nil, nil, common.Hash{}, err
+ return nil, nil, nil, common.Hash{}, err
}
// Try to recover tx with current signer
@@ -205,11 +217,11 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
var ttx types.Transaction
err := ttx.UnmarshalBinary(post.TxBytes)
if err != nil {
- return nil, nil, common.Hash{}, err
+ return nil, nil, nil, common.Hash{}, err
}
if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
- return nil, nil, common.Hash{}, err
+ return nil, nil, nil, common.Hash{}, err
}
}
@@ -240,15 +252,22 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
statedb.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
// And _now_ get the state root
root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
- return snaps, statedb, root, err
+ return triedb, snaps, statedb, root, err
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
-func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) {
- sdb := state.NewDatabase(db)
+func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) {
+ tconf := &trie.Config{Preimages: true}
+ if scheme == rawdb.HashScheme {
+ tconf.HashDB = hashdb.Defaults
+ } else {
+ tconf.PathDB = pathdb.Defaults
+ }
+ triedb := trie.NewDatabase(db, tconf)
+ sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
@@ -263,10 +282,10 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo
var snaps *snapshot.Tree
if snapshotter {
- snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false)
+ snaps, _ = snapshot.New(db, triedb, 1, root, false, true, false)
}
statedb, _ = state.New(root, sdb, snaps)
- return snaps, statedb
+ return triedb, snaps, statedb
}
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
diff --git a/trie/database.go b/trie/database.go
index 92791a92d8..79ce83588e 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
@@ -29,9 +30,10 @@ import (
// Config defines all necessary options for database.
type Config struct {
- Cache int // Memory allowance (MB) to use for caching trie nodes in memory
- Journal string // Journal of clean cache to survive node restarts
- Preimages bool // Flag whether the preimage of trie key is recorded
+ Preimages bool // Flag whether the preimage of trie key is recorded
+
+ HashDB *hashdb.Config // Configs for hash-based scheme
+ PathDB *pathdb.Config // Configs for experimental path-based scheme
// Testing hooks
OnCommit func(states *triestate.Set) // Hook invoked when commit is performed
@@ -93,38 +95,50 @@ func prepare(diskdb ethdb.Database, config *Config) *Database {
}
}
-// NewDatabase initializes the trie database with default settings, namely
-// the legacy hash-based scheme is used by default.
-func NewDatabase(diskdb ethdb.Database) *Database {
- return NewDatabaseWithConfig(diskdb, nil)
+// HashDefaults represents a config for using hash-based scheme with
+// default settings.
+var HashDefaults = &Config{
+ Preimages: false,
+ HashDB: hashdb.Defaults,
}
-// NewDatabaseWithConfig initializes the trie database with provided configs.
-// The path-based scheme is not activated yet, always initialized with legacy
-// hash-based scheme by default.
-func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
- var cleans int
-
- if config != nil && config.Cache != 0 {
- cleans = config.Cache * 1024 * 1024
+// NewDatabase initializes the trie database with default settings, namely
+// the legacy hash-based scheme is used by default.
+func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
+ if config == nil {
+ config = HashDefaults
+ }
+ var preimages *preimageStore
+ if config.Preimages {
+ preimages = newPreimageStore(diskdb)
+ }
+ db := &Database{
+ config: config,
+ diskdb: diskdb,
+ preimages: preimages,
+ }
+ if config.HashDB != nil && config.PathDB != nil {
+ log.Crit("Both 'hash' and 'path' mode are configured")
+ }
+ if config.PathDB != nil {
+ db.backend = pathdb.New(diskdb, config.PathDB)
+ } else {
+ // Use hashdb by default
+ db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{})
}
- db := prepare(diskdb, config)
- db.backend = hashdb.New(diskdb, cleans, mptResolver{})
return db
}
// Reader returns a reader for accessing all trie nodes with provided state root.
// Nil is returned in case the state is not available.
-func (db *Database) Reader(blockRoot common.Hash) Reader {
+func (db *Database) Reader(blockRoot common.Hash) (Reader, error) {
switch b := db.backend.(type) {
case *hashdb.Database:
return b.Reader(blockRoot)
case *pathdb.Database:
- reader, _ := b.Reader(blockRoot)
- return reader
+ return b.Reader(blockRoot)
}
- return nil
-
+ return nil, errors.New("unsupported")
}
// Update performs a state transition by committing dirty nodes contained in the
@@ -242,3 +256,60 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
}
return hdb.Node(hash)
}
+
+// Recover rollbacks the database to a specified historical point. The state is
+// supported as the rollback destination only if it's canonical state and the
+// corresponding trie histories are existent. It's only supported by path-based
+// database and will return an error for others.
+func (db *Database) Recover(target common.Hash) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Recover(target, &trieLoader{db: db})
+}
+
+// Recoverable returns the indicator if the specified state is enabled to be
+// recovered. It's only supported by path-based database and will return an
+// error for others.
+func (db *Database) Recoverable(root common.Hash) (bool, error) {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return false, errors.New("not supported")
+ }
+ return pdb.Recoverable(root), nil
+}
+
+// Reset wipes all available journal from the persistent database and discard
+// all caches and diff layers. Using the given root to create a new disk layer.
+// It's only supported by path-based database and will return an error for others.
+func (db *Database) Reset(root common.Hash) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Reset(root)
+}
+
+// Journal commits an entire diff hierarchy to disk into a single journal entry.
+// This is meant to be used during shutdown to persist the snapshot without
+// flattening everything down (bad for reorgs). It's only supported by path-based
+// database and will return an error for others.
+func (db *Database) Journal(root common.Hash) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Journal(root)
+}
+
+// SetBufferSize sets the node buffer size to the provided value(in bytes).
+// It's only supported by path-based database and will return an error for
+// others.
+func (db *Database) SetBufferSize(size int) error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.SetBufferSize(size)
+}
diff --git a/trie/database_test.go b/trie/database_test.go
index 0d4f63e467..e456bf7177 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -20,6 +20,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
)
// newTestDatabase initializes the trie database with specified scheme.
@@ -27,10 +28,9 @@ import (
func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
db := prepare(diskdb, nil)
if scheme == rawdb.HashScheme {
- db.backend = hashdb.New(diskdb, 0, mptResolver{})
+ db.backend = hashdb.New(diskdb, &hashdb.Config{}, mptResolver{})
+ } else {
+ db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache
}
- // //} else {
- // // db.backend = snap.New(diskdb, db.cleans, nil)
- // //}
return db
}
diff --git a/trie/errors.go b/trie/errors.go
index afe344bed2..f614dd30ad 100644
--- a/trie/errors.go
+++ b/trie/errors.go
@@ -17,11 +17,17 @@
package trie
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
)
+// ErrCommitted is returned when a already committed trie is requested for usage.
+// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove`
+// and so on.
+var ErrCommitted = errors.New("trie is already committed")
+
// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete)
// in the case where a trie node is not present in the local database. It contains
// information necessary for retrieving the missing node.
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 3527cc0266..240aa25284 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -33,8 +33,8 @@ import (
)
func TestEmptyIterator(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- iter := trie.NodeIterator(nil)
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ iter := trie.MustNodeIterator(nil)
seen := make(map[string]struct{})
for iter.Next(true) {
@@ -46,7 +46,7 @@ func TestEmptyIterator(t *testing.T) {
}
func TestIterator(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -69,7 +69,7 @@ func TestIterator(t *testing.T) {
db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
found[string(it.Key)] = string(it.Value)
}
@@ -87,7 +87,7 @@ type kv struct {
}
func TestIteratorLargeData(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ {
@@ -99,7 +99,7 @@ func TestIteratorLargeData(t *testing.T) {
vals[string(value2.k)] = value2
}
- it := NewIterator(trie.NodeIterator(nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
vals[string(it.Key)].t = true
}
@@ -128,7 +128,7 @@ type iterationElement struct {
func TestNodeIteratorCoverage(t *testing.T) {
testNodeIteratorCoverage(t, rawdb.HashScheme)
- //testNodeIteratorCoverage(t, rawdb.PathScheme)
+ testNodeIteratorCoverage(t, rawdb.PathScheme)
}
func testNodeIteratorCoverage(t *testing.T, scheme string) {
@@ -137,7 +137,7 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
// Gather all the node hashes found by the iterator
var elements = make(map[common.Hash]iterationElement)
- for it := trie.NodeIterator(nil); it.Next(true); {
+ for it := trie.MustNodeIterator(nil); it.Next(true); {
if it.Hash() != (common.Hash{}) {
elements[it.Hash()] = iterationElement{
hash: it.Hash(),
@@ -148,7 +148,12 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) {
}
// Cross check the hashes and the database itself
for _, element := range elements {
- if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil {
+ reader, err := nodeDb.Reader(trie.Hash())
+ if err != nil {
+ t.Errorf("failed to retrieve reader %v", err)
+ }
+
+ if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil {
t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
} else if !bytes.Equal(blob, element.blob) {
t.Errorf("node blob is different, want %v got %v", element.blob, blob)
@@ -202,25 +207,25 @@ var testdata2 = []kvs{
}
func TestIteratorSeek(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for _, val := range testdata1 {
trie.Update([]byte(val.k), []byte(val.v))
}
// Seek to the middle.
- it := NewIterator(trie.NodeIterator([]byte("fab")))
+ it := NewIterator(trie.MustNodeIterator([]byte("fab")))
if err := checkIteratorOrder(testdata1[4:], it); err != nil {
t.Fatal(err)
}
// Seek to a non-existent key.
- it = NewIterator(trie.NodeIterator([]byte("barc")))
+ it = NewIterator(trie.MustNodeIterator([]byte("barc")))
if err := checkIteratorOrder(testdata1[1:], it); err != nil {
t.Fatal(err)
}
// Seek beyond the end.
- it = NewIterator(trie.NodeIterator([]byte("z")))
+ it = NewIterator(trie.MustNodeIterator([]byte("z")))
if err := checkIteratorOrder(nil, it); err != nil {
t.Fatal(err)
}
@@ -243,7 +248,7 @@ func checkIteratorOrder(want []kvs, it *Iterator) error {
}
func TestDifferenceIterator(t *testing.T) {
- dba := NewDatabase(rawdb.NewMemoryDatabase())
+ dba := NewDatabase(rawdb.NewMemoryDatabase(), nil)
triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.Update([]byte(val.k), []byte(val.v))
@@ -252,7 +257,7 @@ func TestDifferenceIterator(t *testing.T) {
dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
- dbb := NewDatabase(rawdb.NewMemoryDatabase())
+ dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.Update([]byte(val.k), []byte(val.v))
@@ -262,7 +267,7 @@ func TestDifferenceIterator(t *testing.T) {
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
- di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil))
+ di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil))
it := NewIterator(di)
for it.Next() {
found[string(it.Key)] = string(it.Value)
@@ -285,7 +290,7 @@ func TestDifferenceIterator(t *testing.T) {
}
func TestUnionIterator(t *testing.T) {
- dba := NewDatabase(rawdb.NewMemoryDatabase())
+ dba := NewDatabase(rawdb.NewMemoryDatabase(), nil)
triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.Update([]byte(val.k), []byte(val.v))
@@ -294,7 +299,7 @@ func TestUnionIterator(t *testing.T) {
dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
triea, _ = New(TrieID(rootA), dba)
- dbb := NewDatabase(rawdb.NewMemoryDatabase())
+ dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.Update([]byte(val.k), []byte(val.v))
@@ -303,7 +308,7 @@ func TestUnionIterator(t *testing.T) {
dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
trieb, _ = New(TrieID(rootB), dbb)
- di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
+ di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
it := NewIterator(di)
all := []struct{ k, v string }{
@@ -338,19 +343,19 @@ func TestUnionIterator(t *testing.T) {
}
func TestIteratorNoDups(t *testing.T) {
- tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
- checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
}
// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
func TestIteratorContinueAfterError(t *testing.T) {
testIteratorContinueAfterError(t, false, rawdb.HashScheme)
testIteratorContinueAfterError(t, true, rawdb.HashScheme)
- // testIteratorContinueAfterError(t, false, rawdb.PathScheme)
- // testIteratorContinueAfterError(t, true, rawdb.PathScheme)
+ testIteratorContinueAfterError(t, false, rawdb.PathScheme)
+ testIteratorContinueAfterError(t, true, rawdb.PathScheme)
}
func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
@@ -367,7 +372,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
tdb.Commit(root, false)
}
tr, _ = New(TrieID(root), tdb)
- wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
+ wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
var (
paths [][]byte
@@ -426,7 +431,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
}
// Iterate until the error is hit.
seen := make(map[string]bool)
- it := tr.NodeIterator(nil)
+ it := tr.MustNodeIterator(nil)
checkIteratorNoDups(t, it, seen)
missing, ok := it.Error().(*MissingNodeError)
if !ok || missing.NodeHash != rhash {
@@ -455,8 +460,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
func TestIteratorContinueAfterSeekError(t *testing.T) {
testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
- // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
- // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
+ testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
+ testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
}
func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
@@ -494,7 +499,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- it := tr.NodeIterator([]byte("bars"))
+ it := tr.MustNodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
t.Fatal("want MissingNodeError, got", it.Error())
@@ -528,7 +533,7 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in
func TestIteratorNodeBlob(t *testing.T) {
testIteratorNodeBlob(t, rawdb.HashScheme)
- //testIteratorNodeBlob(t, rawdb.PathScheme)
+ testIteratorNodeBlob(t, rawdb.PathScheme)
}
type loggingDb struct {
@@ -585,8 +590,8 @@ func (l *loggingDb) Close() error {
func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
// Create an empty trie
logDb := &loggingDb{0, memorydb.New()}
- triedb := NewDatabase(rawdb.NewDatabase(logDb))
- trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
+ triedb := NewDatabase(rawdb.NewDatabase(logDb), nil)
+ trie, _ := NewSecure(TrieID(types.EmptyRootHash), triedb)
// Fill it with some arbitrary data
for i := 0; i < 10000; i++ {
@@ -600,7 +605,10 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
}
root, nodes, _ := trie.Commit(false)
triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Commit(root, false)
// Return the generated trie
+ trie, _ = NewSecure(TrieID(root), triedb)
+
return triedb, trie, logDb
}
@@ -612,8 +620,8 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
// Do a seek operation
trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885"))
// master: 24 get operations
- // this pr: 5 get operations
- if have, want := logDb.getCount, uint64(5); have != want {
+ // this pr: 6 get operations
+ if have, want := logDb.getCount, uint64(6); have != want {
t.Fatalf("Too many lookups during seek, have %d want %d", have, want)
}
}
@@ -644,7 +652,7 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
var found = make(map[common.Hash][]byte)
trie, _ = New(TrieID(root), triedb)
- it := trie.NodeIterator(nil)
+ it := trie.MustNodeIterator(nil)
for it.Next(true) {
if it.Hash() == (common.Hash{}) {
continue
@@ -692,7 +700,7 @@ func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) {
}
hash = common.BytesToHash(key)
} else {
- ok, remain := rawdb.IsAccountTrieNode(key)
+ ok, remain := rawdb.ResolveAccountTrieNodeKey(key)
if !ok {
return false, nil, common.Hash{}
}
diff --git a/trie/proof.go b/trie/proof.go
index c8179eeeb4..c1b4ae305f 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -35,6 +35,9 @@ import (
// nodes of the longest existing prefix of the key (at least the root node), ending
// with the node that proves the absence of the key.
func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error {
+ if t.committed {
+ return ErrCommitted
+ }
// Collect all nodes on the path to key.
var (
prefix []byte
diff --git a/trie/proof_test.go b/trie/proof_test.go
index f772a5e838..9f31b88d17 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -49,7 +49,7 @@ func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database {
// Create a leaf iterator based Merkle prover
provers = append(provers, func(key []byte) *memorydb.Database {
proof := memorydb.New()
- if it := NewIterator(trie.NodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
+ if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) {
for _, p := range it.Prove() {
proof.Put(crypto.Keccak256(p), p)
}
@@ -80,7 +80,7 @@ func TestProof(t *testing.T) {
}
func TestOneElementProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "k", "v")
for i, prover := range makeProvers(trie) {
proof := prover([]byte("k"))
@@ -131,7 +131,7 @@ func TestBadProof(t *testing.T) {
// Tests that missing keys can also be proven. The test explicitly uses a single
// entry trie and checks for missing keys both before and after the single entry.
func TestMissingKeyProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "k", "v")
for i, key := range []string{"a", "j", "l", "z"} {
@@ -387,7 +387,7 @@ func TestOneElementRangeProof(t *testing.T) {
}
// Test the mini trie with only a single element.
- tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
entry := &kv{randBytes(32), randBytes(20), false}
tinyTrie.Update(entry.k, entry.v)
@@ -459,7 +459,7 @@ func TestAllElementsProof(t *testing.T) {
// TestSingleSideRangeProof tests the range starts from zero.
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -494,7 +494,7 @@ func TestSingleSideRangeProof(t *testing.T) {
// TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff.
func TestReverseSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -601,7 +601,7 @@ func TestBadRangeProof(t *testing.T) {
// TestGappedRangeProof focuses on the small trie with embedded nodes.
// If the gapped node is embedded in the trie, it should be detected too.
func TestGappedRangeProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -675,7 +675,7 @@ func TestSameSideProofs(t *testing.T) {
}
func TestHasRightElement(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
var entries entrySlice
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -1028,7 +1028,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
}
func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -1053,7 +1053,7 @@ func randBytes(n int) []byte {
}
func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
max := uint64(0xffffffffffffffff)
for i := uint64(0); i < uint64(n); i++ {
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 973596a58f..8ff4630a7f 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -201,10 +201,16 @@ func (t *SecureTrie) Copy() *SecureTrie {
// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration
// starts at the key after the given start key.
-func (t *SecureTrie) NodeIterator(start []byte) NodeIterator {
+func (t *SecureTrie) NodeIterator(start []byte) (NodeIterator, error) {
return t.trie.NodeIterator(start)
}
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *SecureTrie) MustNodeIterator(start []byte) NodeIterator {
+ return t.trie.MustNodeIterator(start)
+}
+
// hashKey returns the hash of key as an ephemeral buffer.
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index 835608a0e3..797d23d4b0 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -28,14 +28,14 @@ import (
)
func newEmptySecure() *SecureTrie {
- trie, _ := NewSecure(TrieID(common.Hash{}), NewDatabase(rawdb.NewMemoryDatabase()))
+ trie, _ := NewSecure(TrieID(common.Hash{}), NewDatabase(rawdb.NewMemoryDatabase(), nil))
return trie
}
// makeTestSecureTrie creates a large enough secure trie for testing.
func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index dd4c75f5f8..10baeaf441 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -189,7 +189,7 @@ func TestStackTrieInsertAndHash(t *testing.T) {
func TestSizeBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -204,7 +204,7 @@ func TestSizeBug(t *testing.T) {
func TestEmptyBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -230,7 +230,7 @@ func TestEmptyBug(t *testing.T) {
func TestValLength56(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -255,7 +255,7 @@ func TestValLength56(t *testing.T) {
// which causes a lot of node-within-node. This case was found via fuzzing.
func TestUpdateSmallNodes(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
kvs := []struct {
K string
@@ -284,7 +284,7 @@ func TestUpdateSmallNodes(t *testing.T) {
func TestUpdateVariableKeys(t *testing.T) {
t.SkipNow()
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
kvs := []struct {
K string
@@ -355,7 +355,7 @@ func TestStacktrieNotModifyValues(t *testing.T) {
func TestStacktrieSerialization(t *testing.T) {
var (
st = NewStackTrie(nil)
- nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
keyB = big.NewInt(1)
keyDelta = big.NewInt(1)
vals [][]byte
diff --git a/trie/sync.go b/trie/sync.go
index d68ea85673..a6338b8a89 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -282,17 +282,16 @@ func (s *Sync) Missing(max int) ([]string, []common.Hash, []common.Hash) {
s.queue.Pop()
s.fetches[depth]++
- switch item.(type) {
+ switch item := item.(type) {
case common.Hash:
- codeHashes = append(codeHashes, item.(common.Hash))
+ codeHashes = append(codeHashes, item)
case string:
- path := item.(string)
- req, ok := s.nodeReqs[path]
+ req, ok := s.nodeReqs[item]
if !ok {
- log.Error("Missing node request", "path", path)
+ log.Error("Missing node request", "path", item)
continue // System very wrong, shouldn't happen
}
- nodePaths = append(nodePaths, path)
+ nodePaths = append(nodePaths, item)
nodeHashes = append(nodeHashes, req.hash)
}
}
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 386e1995d7..b3fb10db14 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -36,7 +36,7 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[st
db := rawdb.NewMemoryDatabase()
triedb := newTestDatabase(db, scheme)
- trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
+ trie, _ := NewSecure(TrieID(types.EmptyRootHash), triedb)
// Fill it with some arbitrary data
content := make(map[string][]byte)
@@ -68,7 +68,9 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[st
panic(err)
}
- // Return the generated trie
+ // Re-create the trie based on the new state
+ trie, _ = NewSecure(TrieID(root), triedb)
+
return db, triedb, trie, content
}
@@ -98,7 +100,7 @@ func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) er
if err != nil {
return nil // Consider a non existent state consistent
}
- it := trie.NodeIterator(nil)
+ it := trie.MustNodeIterator(nil)
for it.Next(true) {
}
return it.Error()
@@ -113,18 +115,18 @@ type trieElement struct {
// Tests that an empty trie is not scheduled for syncing.
func TestEmptySync(t *testing.T) {
- dbA := NewDatabase(rawdb.NewMemoryDatabase())
- dbB := NewDatabase(rawdb.NewMemoryDatabase())
- //dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
- //dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
+ dbA := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ dbB := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
+ dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
emptyA := NewEmpty(dbA)
emptyB, _ := New(TrieID(emptyRoot), dbB)
- //emptyC := NewEmpty(dbC)
- //emptyD, _ := New(TrieID(types.EmptyRootHash), dbD)
+ emptyC := NewEmpty(dbC)
+ emptyD, _ := New(TrieID(types.EmptyRootHash), dbD)
- for i, trie := range []*Trie{emptyA, emptyB /*emptyC, emptyD*/} {
- sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB /*dbC, dbD*/}[i].Scheme())
+ for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} {
+ sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB, dbC, dbD}[i].Scheme())
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes)
}
@@ -138,10 +140,10 @@ func TestIterativeSync(t *testing.T) {
testIterativeSync(t, 100, false, rawdb.HashScheme)
testIterativeSync(t, 1, true, rawdb.HashScheme)
testIterativeSync(t, 100, true, rawdb.HashScheme)
- // testIterativeSync(t, 1, false, rawdb.PathScheme)
- // testIterativeSync(t, 100, false, rawdb.PathScheme)
- // testIterativeSync(t, 1, true, rawdb.PathScheme)
- // testIterativeSync(t, 100, true, rawdb.PathScheme)
+ testIterativeSync(t, 1, false, rawdb.PathScheme)
+ testIterativeSync(t, 100, false, rawdb.PathScheme)
+ testIterativeSync(t, 1, true, rawdb.PathScheme)
+ testIterativeSync(t, 100, true, rawdb.PathScheme)
}
func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
@@ -168,7 +170,11 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
if !bypath {
for i, element := range elements {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
}
@@ -212,7 +218,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
// partial results are returned, and the others sent only later.
func TestIterativeDelayedSync(t *testing.T) {
testIterativeDelayedSync(t, rawdb.HashScheme)
- //testIterativeDelayedSync(t, rawdb.PathScheme)
+ testIterativeDelayedSync(t, rawdb.PathScheme)
}
func testIterativeDelayedSync(t *testing.T, scheme string) {
@@ -238,7 +244,11 @@ func testIterativeDelayedSync(t *testing.T, scheme string) {
results := make([]NodeSyncResult, len(elements)/2+1)
for i, element := range elements[:len(results)] {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -275,8 +285,8 @@ func testIterativeDelayedSync(t *testing.T, scheme string) {
func TestIterativeRandomSyncIndividual(t *testing.T) {
testIterativeRandomSync(t, 1, rawdb.HashScheme)
testIterativeRandomSync(t, 100, rawdb.HashScheme)
- // testIterativeRandomSync(t, 1, rawdb.PathScheme)
- // testIterativeRandomSync(t, 100, rawdb.PathScheme)
+ testIterativeRandomSync(t, 1, rawdb.PathScheme)
+ testIterativeRandomSync(t, 100, rawdb.PathScheme)
}
func testIterativeRandomSync(t *testing.T, count int, scheme string) {
@@ -303,7 +313,11 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) {
results := make([]NodeSyncResult, 0, len(queue))
for path, element := range queue {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -339,7 +353,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) {
// partial results are returned (Even those randomly), others sent only later.
func TestIterativeRandomDelayedSync(t *testing.T) {
testIterativeRandomDelayedSync(t, rawdb.HashScheme)
- // testIterativeRandomDelayedSync(t, rawdb.PathScheme)
+ testIterativeRandomDelayedSync(t, rawdb.PathScheme)
}
func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
@@ -366,7 +380,11 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
results := make([]NodeSyncResult, 0, len(queue)/2+1)
for path, element := range queue {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -407,7 +425,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
// have such references.
func TestDuplicateAvoidanceSync(t *testing.T) {
testDuplicateAvoidanceSync(t, rawdb.HashScheme)
- // testDuplicateAvoidanceSync(t, rawdb.PathScheme)
+ testDuplicateAvoidanceSync(t, rawdb.PathScheme)
}
func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
@@ -435,7 +453,12 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -475,11 +498,10 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
// the database.
func TestIncompleteSyncHash(t *testing.T) {
testIncompleteSync(t, rawdb.HashScheme)
- // testIncompleteSync(t, rawdb.PathScheme)
+ testIncompleteSync(t, rawdb.PathScheme)
}
func testIncompleteSync(t *testing.T, scheme string) {
- t.Parallel()
// Create a random trie to copy
_, srcDb, srcTrie, _ := makeTestTrie(scheme)
@@ -509,7 +531,11 @@ func testIncompleteSync(t *testing.T, scheme string) {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -562,7 +588,7 @@ func testIncompleteSync(t *testing.T, scheme string) {
// depth.
func TestSyncOrdering(t *testing.T) {
testSyncOrdering(t, rawdb.HashScheme)
- // testSyncOrdering(t, rawdb.PathScheme)
+ testSyncOrdering(t, rawdb.PathScheme)
}
func testSyncOrdering(t *testing.T, scheme string) {
@@ -593,7 +619,12 @@ func testSyncOrdering(t *testing.T, scheme string) {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(srcTrie.Hash())
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", srcTrie.Hash(), err)
+ }
+
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -656,7 +687,11 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
owner, inner := ResolvePath([]byte(element.path))
- data, err := srcDb.Reader(root).Node(owner, inner, element.hash)
+ reader, err := srcDb.Reader(root)
+ if err != nil {
+ t.Fatalf("failed to create reader for trie %x: %v", root, err)
+ }
+ data, err := reader.Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index e44b8826d6..ca11e2ea92 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -61,7 +61,7 @@ func TestTrieTracer(t *testing.T) {
// Tests if the trie diffs are tracked correctly. Tracer should capture
// all non-leaf dirty nodes, no matter the node is embedded or not.
func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
// Determine all new nodes are tracked
@@ -104,7 +104,7 @@ func TestTrieTracerNoop(t *testing.T) {
}
func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for _, val := range vals {
trie.Update([]byte(val.k), []byte(val.v))
}
@@ -128,7 +128,7 @@ func TestAccessList(t *testing.T) {
func testAccessList(t *testing.T, vals []struct{ k, v string }) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase())
+ db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie = NewEmpty(db)
orig = trie.Copy()
)
@@ -211,7 +211,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
// Tests origin values won't be tracked in Iterator or Prover
func TestAccessListLeak(t *testing.T) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase())
+ db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie = NewEmpty(db)
)
// Create trie from scratch
@@ -226,14 +226,14 @@ func TestAccessListLeak(t *testing.T) {
}{
{
func(tr *Trie) {
- it := tr.NodeIterator(nil)
+ it := tr.MustNodeIterator(nil)
for it.Next(true) {
}
},
},
{
func(tr *Trie) {
- it := NewIterator(tr.NodeIterator(nil))
+ it := NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
}
},
@@ -262,7 +262,7 @@ func TestAccessListLeak(t *testing.T) {
// in its parent due to the smaller size of the original tree node.
func TestTinyTree(t *testing.T) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase())
+ db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie = NewEmpty(db)
)
for _, val := range tiny {
@@ -300,7 +300,7 @@ func compareSet(setA, setB map[string]struct{}) bool {
func forNodes(tr *Trie) map[string][]byte {
var (
- it = tr.NodeIterator(nil)
+ it = tr.MustNodeIterator(nil)
nodes = make(map[string][]byte)
)
for it.Next(true) {
@@ -320,7 +320,7 @@ func iterNodes(db *Database, root common.Hash) map[string][]byte {
func forHashedNodes(tr *Trie) map[string][]byte {
var (
- it = tr.NodeIterator(nil)
+ it = tr.MustNodeIterator(nil)
nodes = make(map[string][]byte)
)
for it.Next(true) {
diff --git a/trie/trie.go b/trie/trie.go
index 51883e7bb8..05d136fb1b 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -45,6 +45,11 @@ var (
type Trie struct {
owner common.Hash
root node
+
+ // Flag whether the commit operation is already performed. If so the
+ // trie is not usable(latest states is invisible).
+ committed bool
+
// Keep track of the number leafs which have been inserted since the last
// hashing operation. This number will not directly map to the number of
// actually unhashed nodes
@@ -70,7 +75,7 @@ func (t *Trie) newFlag() nodeFlag {
// zero hash or the sha3 hash of an empty string, then trie is initially
// empty, otherwise, the root node must be present in database or returns
// a MissingNodeError if not.
-func New(id *ID, db NodeReader) (*Trie, error) {
+func New(id *ID, db *Database) (*Trie, error) {
reader, err := newTrieReader(id.StateRoot, id.Owner, db)
if err != nil {
return nil, err
@@ -96,10 +101,24 @@ func NewEmpty(db *Database) *Trie {
return tr
}
+// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered
+// error but just print out an error message.
+func (t *Trie) MustNodeIterator(start []byte) NodeIterator {
+ it, err := t.NodeIterator(start)
+ if err != nil {
+ log.Error("Unhandled trie error in Trie.NodeIterator", "err", err)
+ }
+ return it
+}
+
// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at
// the key after the given start key.
-func (t *Trie) NodeIterator(start []byte) NodeIterator {
- return newNodeIterator(t, start)
+func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, ErrCommitted
+ }
+ return newNodeIterator(t, start), nil
}
// Get returns the value for key stored in the trie.
@@ -116,6 +135,10 @@ func (t *Trie) Get(key []byte) []byte {
// The value bytes must not be modified by the caller.
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryGet(key []byte) ([]byte, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, ErrCommitted
+ }
value, newroot, didResolve, err := t.tryGet(t.root, keybytesToHex(key), 0)
if err == nil && didResolve {
t.root = newroot
@@ -162,6 +185,10 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value []byte, newnode
// TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not
// possible to use keybyte-encoding as the path might contain odd nibbles.
func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return nil, 0, ErrCommitted
+ }
item, newroot, resolved, err := t.tryGetNode(t.root, compactToHex(path), 0)
if err != nil {
return nil, resolved, err
@@ -266,6 +293,10 @@ func (t *Trie) TryUpdateAccount(key []byte, acc *types.StateAccount) error {
//
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryUpdate(key, value []byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
t.unhashed++
k := keybytesToHex(key)
if len(value) != 0 {
@@ -372,6 +403,10 @@ func (t *Trie) Delete(key []byte) {
// TryDelete removes any existing value for key from the trie.
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryDelete(key []byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
@@ -586,6 +621,9 @@ func (t *Trie) Hash() common.Hash {
func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
defer t.tracer.reset()
+ defer func() {
+ t.committed = true
+ }()
// (a) The trie was empty and no update happens => return nil
// (b) The trie was non-empty and all nodes are dropped => return
// the node set includes all deleted nodes
@@ -646,15 +684,17 @@ func (t *Trie) Reset() {
t.owner = common.Hash{}
t.unhashed = 0
t.tracer.reset()
+ t.committed = false
}
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- root: t.root,
- owner: t.owner,
- unhashed: t.unhashed,
- reader: t.reader,
- tracer: t.tracer.copy(),
+ root: t.root,
+ owner: t.owner,
+ committed: t.committed,
+ unhashed: t.unhashed,
+ reader: t.reader,
+ tracer: t.tracer.copy(),
}
}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 92cc8f54df..4c17a438ec 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -17,9 +17,10 @@
package trie
import (
- "fmt"
-
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/triestate"
)
// Reader wraps the Node and NodeBlob method of a backing trie store.
@@ -37,7 +38,7 @@ type Reader interface {
type NodeReader interface {
// Reader returns a reader for accessing all trie nodes with provided
// state root. Nil is returned in case the state is not available.
- Reader(root common.Hash) Reader
+ Reader(root common.Hash) (Reader, error)
}
// trieReader is a wrapper of the underlying node reader. It's not safe
@@ -50,9 +51,16 @@ type trieReader struct {
// newTrieReader initializes the trie reader with the given node reader.
func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
- reader := db.Reader(stateRoot)
- if reader == nil {
- return nil, fmt.Errorf("state not found #%x", stateRoot)
+ if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
+ if stateRoot == (common.Hash{}) {
+ log.Error("zero state root")
+ }
+ return &trieReader{owner: owner}, nil
+ }
+
+ reader, err := db.Reader(stateRoot)
+ if err != nil {
+ return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err}
}
return &trieReader{owner: owner, reader: reader}, nil
}
@@ -82,3 +90,18 @@ func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
}
return blob, nil
}
+
+// trieLoader implements triestate.TrieLoader for constructing tries.
+type trieLoader struct {
+ db *Database
+}
+
+// OpenTrie opens the main account trie.
+func (l *trieLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
+ return New(TrieID(root), l.db)
+}
+
+// OpenStorageTrie opens the storage trie of an account.
+func (l *trieLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
+ return New(StorageTrieID(stateRoot, addrHash, root), l.db)
+}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 062f465066..04e2851e6e 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -48,12 +48,12 @@ func init() {
// Used for testing
func newEmpty() *Trie {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
return trie
}
func TestEmptyTrie(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
res := trie.Hash()
exp := emptyRoot
if res != exp {
@@ -62,7 +62,7 @@ func TestEmptyTrie(t *testing.T) {
}
func TestNull(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
key := make([]byte, 32)
value := []byte("test")
trie.Update(key, value)
@@ -72,7 +72,13 @@ func TestNull(t *testing.T) {
}
func TestMissingRoot(t *testing.T) {
- trie, err := New(TrieID(common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")), NewDatabase(rawdb.NewMemoryDatabase()))
+ testMissingRoot(t, rawdb.HashScheme)
+ testMissingRoot(t, rawdb.PathScheme)
+}
+
+func testMissingRoot(t *testing.T, scheme string) {
+ root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
+ trie, err := New(TrieID(root), newTestDatabase(rawdb.NewMemoryDatabase(), scheme))
if trie != nil {
t.Error("New returned non-nil trie for invalid root")
}
@@ -83,9 +89,9 @@ func TestMissingRoot(t *testing.T) {
func TestMissingNode(t *testing.T) {
testMissingNode(t, false, rawdb.HashScheme)
- //testMissingNode(t, false, rawdb.PathScheme)
+ testMissingNode(t, false, rawdb.PathScheme)
testMissingNode(t, true, rawdb.HashScheme)
- //testMissingNode(t, true, rawdb.PathScheme)
+ testMissingNode(t, true, rawdb.PathScheme)
}
func testMissingNode(t *testing.T, memonly bool, scheme string) {
@@ -167,7 +173,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
}
func TestInsert(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -193,7 +199,7 @@ func TestInsert(t *testing.T) {
}
func TestGet(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -220,7 +226,7 @@ func TestGet(t *testing.T) {
}
func TestDelete(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -247,7 +253,7 @@ func TestDelete(t *testing.T) {
}
func TestEmptyValues(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -271,7 +277,7 @@ func TestEmptyValues(t *testing.T) {
}
func TestReplication(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
+ db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -338,7 +344,7 @@ func TestReplication(t *testing.T) {
}
func TestLargeValue(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
trie.Update([]byte("key1"), []byte{99, 99, 99, 99})
trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash()
@@ -475,9 +481,9 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
func runRandTest(rt randTest) bool {
var scheme = rawdb.HashScheme
- //if rand.Intn(2) == 0 {
- // scheme = rawdb.PathScheme
- //}
+ if rand.Intn(2) == 0 {
+ scheme = rawdb.PathScheme
+ }
var (
origin = types.EmptyRootHash
triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
@@ -550,7 +556,7 @@ func runRandTest(rt randTest) bool {
case opItercheckhash:
checktr := NewEmpty(triedb)
- it := NewIterator(tr.NodeIterator(nil))
+ it := NewIterator(tr.MustNodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
}
@@ -583,7 +589,7 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
const benchElemCount = 20000
func benchGet(b *testing.B, commit bool) {
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(triedb)
if commit {
_, tmpdb := tempDB()
@@ -665,7 +671,7 @@ func BenchmarkCommitAfterHash(b *testing.B) {
func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
// Make the random benchmark deterministic
addresses, accounts := makeAccounts(b.N)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -692,8 +698,8 @@ func TestTinyTrie(t *testing.T) {
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp)
}
- checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
- it := NewIterator(trie.NodeIterator(nil))
+ checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
checktr.Update(it.Key, it.Value)
}
@@ -705,7 +711,7 @@ func TestTinyTrie(t *testing.T) {
func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash
addresses, accounts := makeAccounts(1000)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase()))
+ trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -771,11 +777,17 @@ func (s *spongeDb) Stat(property string) (string, error) { panic("implement
func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
func (s *spongeDb) Close() error { return nil }
func (s *spongeDb) Put(key []byte, value []byte) error {
- valbrief := value
+ var (
+ keybrief = key
+ valbrief = value
+ )
+ if len(keybrief) > 8 {
+ keybrief = keybrief[:8]
+ }
if len(valbrief) > 8 {
valbrief = valbrief[:8]
}
- s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, key[:8], len(value), valbrief))
+ s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief))
s.sponge.Write(key)
s.sponge.Write(value)
return nil
@@ -813,7 +825,7 @@ func TestCommitSequence(t *testing.T) {
addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
@@ -844,7 +856,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
@@ -876,7 +888,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
prng := rand.New(rand.NewSource(int64(count)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
@@ -936,7 +948,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
func TestCommitSequenceSmallRoot(t *testing.T) {
s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(rawdb.NewDatabase(s))
+ db := NewDatabase(rawdb.NewDatabase(s), nil)
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
@@ -1116,7 +1128,7 @@ func BenchmarkDerefRootFixedSize(b *testing.B) {
func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
trie := NewEmpty(triedb)
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
@@ -1138,7 +1150,7 @@ func tempDB() (string, *Database) {
if err != nil {
panic(fmt.Sprintf("can't create temporary database: %v", err))
}
- return dir, NewDatabase(rawdb.NewDatabase(diskdb))
+ return dir, NewDatabase(rawdb.NewDatabase(diskdb), nil)
}
func getString(trie *Trie, k string) []byte {
diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go
index 8d8701a60b..a86095916a 100644
--- a/trie/triedb/hashdb/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -18,6 +18,7 @@ package hashdb
import (
"errors"
+ "fmt"
"reflect"
"sync"
"time"
@@ -64,6 +65,20 @@ type ChildResolver interface {
ForEach(node []byte, onChild func(common.Hash))
}
+// Config contains the settings for database.
+type Config struct {
+ CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
+}
+
+// Defaults is the default setting for database if it's not specified.
+// Notably, clean cache is disabled explicitly
+var Defaults = &Config{
+ // Explicitly set clean cache size to 0 to avoid creating fastcache,
+ // otherwise database must be closed when it's no longer needed to
+ // prevent memory leak.
+ CleanCacheSize: 0,
+}
+
// Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
@@ -119,21 +134,17 @@ func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash commo
resolver.ForEach(n.node, onChild)
}
-// Config defines all necessary options for database.
-type Config struct {
- Cache int // Memory allowance (MB) to use for caching trie nodes in memory
- Journal string // Journal of clean cache to survive node restarts
- Preimages bool // Flag whether the preimage of trie key is recorded
-}
-
// New initializes the hash-based node database.
-func New(diskdb ethdb.Database, size int, resolver ChildResolver) *Database {
+func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Database {
+ if config == nil {
+ config = Defaults
+ }
// Initialize the clean cache if the specified cache allowance
// is non-zero. Note, the size is in bytes.
var cleans *fastcache.Cache
- if size > 0 {
- cleans = fastcache.New(size)
+ if config.CleanCacheSize > 0 {
+ cleans = fastcache.New(config.CleanCacheSize)
}
db := &Database{
diskdb: diskdb,
@@ -606,7 +617,14 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n
}
// Close closes the trie database and releases all held resources.
-func (db *Database) Close() error { return nil }
+func (db *Database) Close() error {
+
+ if db.cleans != nil {
+ db.cleans.Reset()
+ db.cleans = nil
+ }
+ return nil
+}
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
@@ -627,8 +645,12 @@ func (db *Database) Scheme() string {
}
// Reader retrieves a node reader belonging to the given state root.
-func (db *Database) Reader(root common.Hash) *reader {
- return &reader{db: db}
+func (db *Database) Reader(root common.Hash) (*reader, error) {
+ if _, err := db.Node(root); err != nil {
+ return nil, fmt.Errorf("state %#x is not available, %v", root, err)
+ }
+
+ return &reader{db: db}, nil
}
// reader is a state reader of Database which implements the Reader interface.
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
index 588e477eb0..f02f2164ed 100644
--- a/trie/triedb/pathdb/database.go
+++ b/trie/triedb/pathdb/database.go
@@ -33,8 +33,27 @@ import (
"github.com/ethereum/go-ethereum/trie/triestate"
)
-// maxDiffLayers is the maximum diff layers allowed in the layer tree.
-const maxDiffLayers = 128
+const (
+ // maxDiffLayers is the maximum diff layers allowed in the layer tree.
+
+ maxDiffLayers = 128
+
+ // defaultCleanSize is the default memory allowance of clean cache.
+ defaultCleanSize = 16 * 1024 * 1024
+
+ // maxBufferSize is the maximum memory allowance of node buffer.
+ // Too large nodebuffer will cause the system to pause for a long
+ // time when write happens. Also, the largest batch that pebble can
+ // support is 4GB, node will panic if batch size exceeds this limit.
+ maxBufferSize = 256 * 1024 * 1024
+
+ // DefaultBufferSize is the default memory allowance of node buffer
+ // that aggregates the writes from above until it's flushed into the
+ // disk. It's meant to be used once the initial sync is finished.
+ // Do not increase the buffer size arbitrarily, otherwise the system
+ // pause time will increase when the database writes happen.
+ DefaultBufferSize = 64 * 1024 * 1024
+)
// layer is the interface implemented by all state layers which includes some
// public methods and some additional methods for internal usage.
@@ -68,28 +87,29 @@ type layer interface {
// Config contains the settings for database.
type Config struct {
- StateLimit uint64 // Number of recent blocks to maintain state history for
- CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes
- DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes
- ReadOnly bool // Flag whether the database is opened in read only mode.
+ StateHistory uint64 // Number of recent blocks to maintain state history for
+ CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
+ DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes
+ ReadOnly bool // Flag whether the database is opened in read only mode.
}
-var (
- // defaultCleanSize is the default memory allowance of clean cache.
- defaultCleanSize = 16 * 1024 * 1024
-
- // defaultBufferSize is the default memory allowance of node buffer
- // that aggregates the writes from above until it's flushed into the
- // disk. Do not increase the buffer size arbitrarily, otherwise the
- // system pause time will increase when the database writes happen.
- defaultBufferSize = 128 * 1024 * 1024
-)
-
// Defaults contains default settings for Ethereum mainnet.
var Defaults = &Config{
- StateLimit: params.FullImmutabilityThreshold,
- CleanSize: defaultCleanSize,
- DirtySize: defaultBufferSize,
+ StateHistory: params.FullImmutabilityThreshold,
+ CleanCacheSize: defaultCleanSize,
+ DirtyCacheSize: DefaultBufferSize,
+}
+
+// ReadOnly is the config in order to open database in read only mode.
+var ReadOnly = &Config{ReadOnly: true}
+
+// sanitize checks the provided user configurations and changes anything that's
+// unreasonable or unworkable.
+func (c *Config) sanitize() {
+ if c.DirtyCacheSize > maxBufferSize {
+ log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(c.DirtyCacheSize), "updated", common.StorageSize(maxBufferSize))
+ c.DirtyCacheSize = maxBufferSize
+ }
}
// Database is a multiple-layered structure for maintaining in-memory trie nodes.
@@ -123,9 +143,10 @@ func New(diskdb ethdb.Database, config *Config) *Database {
if config == nil {
config = Defaults
}
+ config.sanitize()
db := &Database{
readOnly: config.ReadOnly,
- bufferSize: config.DirtySize,
+ bufferSize: config.DirtyCacheSize,
config: config,
diskdb: diskdb,
}
@@ -140,7 +161,7 @@ func New(diskdb ethdb.Database, config *Config) *Database {
// mechanism also ensures that at most one **non-readOnly** database
// is opened at the same time to prevent accidental mutation.
if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly {
- db.freezer, err = rawdb.NewStateHistoryFreezer(ancient, false)
+ db.freezer, err = rawdb.NewStateFreezer(ancient, false)
if err != nil {
log.Crit("Failed to open state history freezer", "err", err)
}
@@ -171,7 +192,7 @@ func (db *Database) Reader(root common.Hash) (layer, error) {
// Update adds a new layer into the tree, if that can be linked to an existing
// old parent. It is disallowed to insert a disk layer (the origin of all). Apart
// from that this function will flatten the extra diff layers at bottom into disk
-// to only keep 128 diff layers in memory by default.
+// to only keep 128 diff layerReaders in memory by default.
//
// The passed in maps(nodes, states) will be retained to avoid copying everything.
// Therefore, these maps must not be changed afterwards.
@@ -379,7 +400,12 @@ func (db *Database) SetBufferSize(size int) error {
db.lock.Lock()
defer db.lock.Unlock()
+ if size > maxBufferSize {
+ log.Info("Capped node buffer size", "provided", common.StorageSize(size), "adjusted", common.StorageSize(maxBufferSize))
+ size = maxBufferSize
+ }
db.bufferSize = size
+
return db.tree.bottom().setBufferSize(db.bufferSize)
}
@@ -398,7 +424,15 @@ func (db *Database) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
+ // Set the database to read-only mode to prevent all
+ // following mutations.
+
db.readOnly = true
+
+ // Release the memory held by clean cache.
+ db.tree.bottom().resetCache()
+
+ // Close the attached state history freezer.
if db.freezer == nil {
return nil
}
diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go
index 89a5042ada..37d92ac268 100644
--- a/trie/triedb/pathdb/database_test.go
+++ b/trie/triedb/pathdb/database_test.go
@@ -51,12 +51,13 @@ func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[comm
}
for key, val := range dirties {
if len(val) == 0 {
- h.Delete(key.Bytes())
+ h.TryDelete(key.Bytes())
} else {
- h.Update(key.Bytes(), val)
+ h.TryUpdate(key.Bytes(), val)
}
}
- return h.Commit(false)
+ root, nodes, _ := h.Commit(false)
+ return root, nodes
}
const (
@@ -99,7 +100,7 @@ type tester struct {
func newTester(t *testing.T) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
- db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024})
+ db = New(disk, &Config{CleanCacheSize: 256 * 1024, DirtyCacheSize: 256 * 1024})
obj = &tester{
db: db,
preimages: make(map[common.Hash]common.Address),
diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go
index 513f9685de..6b33ae758a 100644
--- a/trie/triedb/pathdb/difflayer_test.go
+++ b/trie/triedb/pathdb/difflayer_test.go
@@ -29,7 +29,7 @@ import (
func emptyLayer() *diskLayer {
return &diskLayer{
db: New(rawdb.NewMemoryDatabase(), nil),
- buffer: newNodeBuffer(defaultBufferSize, nil, 0),
+ buffer: newNodeBuffer(DefaultBufferSize, nil, 0),
}
}
diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go
index 34e2d62ba0..369f7a6cd5 100644
--- a/trie/triedb/pathdb/disklayer.go
+++ b/trie/triedb/pathdb/disklayer.go
@@ -48,8 +48,8 @@ func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.C
// Initialize a clean cache if the memory allowance is not zero
// or reuse the provided cache if it is not nil (inherited from
// the original disk layer).
- if cleans == nil && db.config.CleanSize != 0 {
- cleans = fastcache.New(db.config.CleanSize)
+ if cleans == nil && db.config.CleanCacheSize != 0 {
+ cleans = fastcache.New(db.config.CleanCacheSize)
}
return &diskLayer{
root: root,
@@ -177,7 +177,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
// corresponding states(journal), the stored state history will
// be truncated in the next restart.
if dl.db.freezer != nil {
- err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit)
+ err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateHistory)
if err != nil {
return nil, err
}
@@ -278,6 +278,20 @@ func (dl *diskLayer) size() common.StorageSize {
return common.StorageSize(dl.buffer.size)
}
+// resetCache releases the memory held by clean cache to prevent memory leak.
+func (dl *diskLayer) resetCache() {
+ dl.lock.RLock()
+ defer dl.lock.RUnlock()
+
+ // Stale disk layer loses the ownership of clean cache.
+ if dl.stale {
+ return
+ }
+ if dl.cleans != nil {
+ dl.cleans.Reset()
+ }
+}
+
// hasher is used to compute the sha256 hash of the provided data.
type hasher struct{ sha crypto.KeccakState }
diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go
index d76db53d56..2216524b46 100644
--- a/trie/triedb/pathdb/history_test.go
+++ b/trie/triedb/pathdb/history_test.go
@@ -248,7 +248,7 @@ func TestTruncateTailHistories(t *testing.T) {
// openFreezer initializes the freezer instance for storing state histories.
func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) {
- return rawdb.NewStateHistoryFreezer(datadir, readOnly)
+ return rawdb.NewStateFreezer(datadir, readOnly)
}
func compareSet[k comparable](a, b map[k][]byte) bool {
diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go
index 3feb6217d2..674f52fd16 100644
--- a/trie/triedb/pathdb/testutils.go
+++ b/trie/triedb/pathdb/testutils.go
@@ -57,7 +57,7 @@ func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][
}
// Get returns the value for key stored in the trie.
-func (h *testHasher) Get(key []byte) ([]byte, error) {
+func (h *testHasher) TryGet(key []byte) ([]byte, error) {
hash := common.BytesToHash(key)
val, ok := h.dirties[hash]
if ok {
@@ -67,20 +67,20 @@ func (h *testHasher) Get(key []byte) ([]byte, error) {
}
// Update associates key with value in the trie.
-func (h *testHasher) Update(key, value []byte) error {
+func (h *testHasher) TryUpdate(key, value []byte) error {
h.dirties[common.BytesToHash(key)] = common.CopyBytes(value)
return nil
}
// Delete removes any existing value for key from the trie.
-func (h *testHasher) Delete(key []byte) error {
+func (h *testHasher) TryDelete(key []byte) error {
h.dirties[common.BytesToHash(key)] = nil
return nil
}
// Commit computes the new hash of the states and returns the set with all
// state changes.
-func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
var (
nodes = make(map[common.Hash][]byte)
set = trienode.NewNodeSet(h.owner)
@@ -108,7 +108,7 @@ func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
if root == types.EmptyRootHash && h.root != types.EmptyRootHash {
set.AddNode(nil, trienode.NewDeleted())
}
- return root, set
+ return root, set, nil
}
// hash performs the hash computation upon the provided states.
diff --git a/trie/triestate/state.go b/trie/triestate/state.go
index 0e00b67d78..2b2f3720d9 100644
--- a/trie/triestate/state.go
+++ b/trie/triestate/state.go
@@ -34,17 +34,17 @@ import (
// tree or Verkle tree.
type Trie interface {
// Get returns the value for key stored in the trie.
- Get(key []byte) ([]byte, error)
+ TryGet(key []byte) ([]byte, error)
// Update associates key with value in the trie.
- Update(key, value []byte) error
+ TryUpdate(key, value []byte) error
// Delete removes any existing value for key from the trie.
- Delete(key []byte) error
+ TryDelete(key []byte) error
// Commit the trie and returns a set of dirty nodes generated along with
// the new root hash.
- Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet)
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
}
// TrieLoader wraps functions to load tries.
@@ -131,7 +131,10 @@ func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Addre
return nil, fmt.Errorf("failed to revert state, err: %w", err)
}
}
- root, result := tr.Commit(false)
+ root, result, err := tr.Commit(false)
+ if err != nil {
+ return nil, err
+ }
if root != prevRoot {
return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root)
}
@@ -157,7 +160,7 @@ func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
}
// The account may or may not existent in post-state, try to
// load it and decode if it's found.
- blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ blob, err := ctx.accountTrie.TryGet(addrHash.Bytes())
if err != nil {
return err
}
@@ -175,15 +178,18 @@ func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
for key, val := range ctx.storages[addr] {
var err error
if len(val) == 0 {
- err = st.Delete(key.Bytes())
+ err = st.TryDelete(key.Bytes())
} else {
- err = st.Update(key.Bytes(), val)
+ err = st.TryUpdate(key.Bytes(), val)
}
if err != nil {
return err
}
}
- root, result := st.Commit(false)
+ root, result, err := st.Commit(false)
+ if err != nil {
+ return err
+ }
if root != prev.Root {
return errors.New("failed to reset storage trie")
}
@@ -199,7 +205,7 @@ func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error {
if err != nil {
return err
}
- return ctx.accountTrie.Update(addrHash.Bytes(), full)
+ return ctx.accountTrie.TryUpdate(addrHash.Bytes(), full)
}
// deleteAccount the account was not present in prev-state, and is expected
@@ -211,7 +217,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
defer h.release()
addrHash := h.hash(addr.Bytes())
- blob, err := ctx.accountTrie.Get(addrHash.Bytes())
+ blob, err := ctx.accountTrie.TryGet(addrHash.Bytes())
if err != nil {
return err
}
@@ -230,11 +236,14 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
if len(val) != 0 {
return errors.New("expect storage deletion")
}
- if err := st.Delete(key.Bytes()); err != nil {
+ if err := st.TryDelete(key.Bytes()); err != nil {
return err
}
}
- root, result := st.Commit(false)
+ root, result, err := st.Commit(false)
+ if err != nil {
+ return err
+ }
if root != types.EmptyRootHash {
return errors.New("failed to clear storage trie")
}
@@ -246,7 +255,7 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error {
}
}
// Delete the post-state account from the main trie.
- return ctx.accountTrie.Delete(addrHash.Bytes())
+ return ctx.accountTrie.TryDelete(addrHash.Bytes())
}
// hasher is used to compute the sha256 hash of the provided data.
From a5d7f7b1810733acf0930112d465773fb016f146 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 25 Oct 2024 14:15:10 +0700
Subject: [PATCH 28/41] Fix missing passing scheme when init genesis and avoid
referencing same object when passing parents in cosortium v1 (#608)
* cmd,eth: fix wrong compare logic when data dir is empty and moving checking error correctly
* docker: passing state.scheme when initing the genesis data
* rawdb: add missing freezer in collections
* v1/consortium: create a copy to keep parents content
In snapshot function, the list parents is popped out gradually for getting its contents, so when calling apply, the parents list is empty. Simply create a copy at the beginning to fix it.
This has been fixed in consortium v2. For a full sync scenario, however, the first blocks are still processed with consortium v1, which causes our node to panic.
---------
Co-authored-by: Francesco4203
---
cmd/utils/flags.go | 2 +-
consensus/consortium/v1/consortium.go | 10 +++++++---
core/rawdb/ancient_scheme.go | 2 +-
docker/chainnode/entrypoint.sh | 22 +++++++++++++++-------
eth/backend.go | 5 +++--
5 files changed, 27 insertions(+), 14 deletions(-)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 2e070815b9..8f480b9c17 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -2441,7 +2441,7 @@ func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
// If state scheme is specified, ensure it's compatible with
// persistent state.
scheme := ctx.String(StateSchemeFlag.Name)
- if stored != "" || scheme == stored {
+ if stored == "" || scheme == stored {
log.Info("State scheme set by user", "scheme", scheme)
return scheme, nil
}
diff --git a/consensus/consortium/v1/consortium.go b/consensus/consortium/v1/consortium.go
index f208275857..dee4b9c326 100644
--- a/consensus/consortium/v1/consortium.go
+++ b/consensus/consortium/v1/consortium.go
@@ -308,9 +308,13 @@ func (c *Consortium) verifyCascadingFields(chain consensus.ChainHeaderReader, he
func (c *Consortium) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
// Search for a snapshot in memory or on disk for checkpoints
var (
- headers []*types.Header
- snap *Snapshot
+ headers []*types.Header
+ snap *Snapshot
+ cpyParents = make([]*types.Header, len(parents))
)
+ // We must copy parents before going to the loop because parents are modified.
+ // If not, the FindAncientHeader function can not find its block ancestor
+ copy(cpyParents, parents)
for snap == nil {
// If an in-memory snapshot was found, use that
if s, ok := c.recents.Get(hash); ok {
@@ -370,7 +374,7 @@ func (c *Consortium) snapshot(chain consensus.ChainHeaderReader, number uint64,
for i := 0; i < len(headers)/2; i++ {
headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i]
}
- snap, err := snap.apply(chain, c, headers, parents)
+ snap, err := snap.apply(chain, c, headers, cpyParents)
if err != nil {
return nil, err
}
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
index 2773d3611a..0e6d4bea5a 100644
--- a/core/rawdb/ancient_scheme.go
+++ b/core/rawdb/ancient_scheme.go
@@ -78,7 +78,7 @@ var (
)
// freezers the collections of all builtin freezers.
-var freezers = []string{chainFreezerName}
+var freezers = []string{chainFreezerName, stateFreezerName}
// NewStateFreezer initializes the freezer for state history.
func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) {
diff --git a/docker/chainnode/entrypoint.sh b/docker/chainnode/entrypoint.sh
index b1825f88fd..095cfdc37f 100755
--- a/docker/chainnode/entrypoint.sh
+++ b/docker/chainnode/entrypoint.sh
@@ -29,6 +29,7 @@ params=""
syncmode="snap"
mine="true"
blsParams=""
+state_scheme="hash"
set -e
@@ -48,6 +49,11 @@ if [[ ! -z $WS_PORT ]]; then
ws_port="$WS_PORT"
fi
+if [[ ! -z $STATE_SCHEME ]]; then
+ state_scheme="$STATE_SCHEME"
+fi
+
+
# networkid
if [[ ! -z $NETWORK_ID ]]; then
case $NETWORK_ID in
@@ -78,15 +84,14 @@ fi
# data dir
if [[ ! -d $datadir/ronin ]]; then
- echo "No blockchain data, creating genesis block."
- ronin init $dbEngine --datadir $datadir $genesisPath 2> /dev/null
+ echo "No blockchain data, creating genesis block with $genesisPath, state_scheme $state_scheme ..."
+ ronin init $dbEngine --datadir $datadir --state.scheme $state_scheme $genesisPath $genesisPath
elif [[ "$FORCE_INIT" = "true" && "$INIT_FORCE_OVERRIDE_CHAIN_CONFIG" = "true" ]]; then
- echo "Forcing update chain config with force overriding chain config."
- ronin init $dbEngine --overrideChainConfig --datadir $datadir $genesisPath 2> /dev/null
+ echo "Forcing update chain config with force overriding chain config with $genesisPath, state_scheme $state_scheme ..."
+ ronin init $dbEngine --overrideChainConfig --datadir $datadir --state.scheme $state_scheme $genesisPath
elif [ "$FORCE_INIT" = "true" ]; then
- echo "Forcing update chain config."
- ronin init $dbEngine --datadir $datadir $genesisPath 2> /dev/null
-fi
+ echo "Forcing update chain config with $genesisPath, state_scheme $state_scheme ..."
+ ronin init $dbEngine --datadir $datadir --state.scheme $state_scheme $genesisPath
# password file
if [[ ! -f $PASSWORD_FILE ]]; then
@@ -333,6 +338,9 @@ if [[ "$BLS_SHOW_PRIVATE_KEY" = "true" ]]; then
--finality.blswalletpath $BLS_PRIVATE_KEY_DIR \
--secret
fi
+echo "---------------------------------"
+echo "Starting the Ronin Node"
+echo "---------------------------------"
exec ronin $params \
--syncmode $syncmode \
diff --git a/eth/backend.go b/eth/backend.go
index 12cdafeb3d..6345ad8097 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -207,11 +207,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
)
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, config.OverrideArrowGlacier, eth.engine, vmConfig, eth.shouldPreserve, &config.TransactionHistory)
- chainConfig := eth.blockchain.Config()
- genesisHash := eth.blockchain.Genesis().Hash()
if err != nil {
return nil, err
}
+ chainConfig := eth.blockchain.Config()
+ genesisHash := eth.blockchain.Genesis().Hash()
+
if config.EnableMonitorDoubleSign {
go eth.blockchain.StartDoubleSignMonitor()
}
From d912955940ee9f3722ffe9bef1af061592b744aa Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Fri, 25 Oct 2024 17:49:41 +0700
Subject: [PATCH 29/41] core, eth/downloader: pbss fix release v1.13.1 (#614)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* eth/downloader: prevent pivot moves after state commit (#28126)
* core, eth/downloader: fix genesis state missing due to state sync (#28124)
* core: fix chain repair corner case in path-based scheme
* eth/downloader: disable trie database whenever state sync is launched
---------
Co-authored-by: Péter Szilágyi
Co-authored-by: rjl493456442
---
core/blockchain.go | 98 ++++++++++++++++++++----------------
eth/downloader/downloader.go | 44 ++++++++++++----
2 files changed, 88 insertions(+), 54 deletions(-)
diff --git a/core/blockchain.go b/core/blockchain.go
index 632a679a08..d1ec0cedca 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -372,28 +372,38 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
if !bc.HasState(head.Root()) {
- // Head state is missing, before the state recovery, find out the
- // disk layer point of snapshot(if it's enabled). Make sure the
- // rewound point is lower than disk layer.
- var diskRoot common.Hash
- if bc.cacheConfig.SnapshotLimit > 0 {
- diskRoot = rawdb.ReadSnapshotRoot(bc.db)
- }
- if diskRoot != (common.Hash{}) {
- log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash(), "snaproot", diskRoot)
-
- snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
- if err != nil {
- return nil, err
- }
- // Chain rewound, persist old snapshot number to indicate recovery procedure
- if snapDisk != 0 {
- rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
- }
+ if head.NumberU64() == 0 {
+ // The genesis state is missing, which is only possible in the path-based
+ // scheme. This situation occurs when the state syncer overwrites it.
+ //
+ // The solution is to reset the state to the genesis state. Although it may not
+ // match the sync target, the state healer will later address and correct any
+ // inconsistencies.
+ bc.resetState()
} else {
- log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
- if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
- return nil, err
+ // Head state is missing, before the state recovery, find out the
+ // disk layer point of snapshot(if it's enabled). Make sure the
+ // rewound point is lower than disk layer.
+ var diskRoot common.Hash
+ if bc.cacheConfig.SnapshotLimit > 0 {
+ diskRoot = rawdb.ReadSnapshotRoot(bc.db)
+ }
+ if diskRoot != (common.Hash{}) {
+ log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot)
+
+ snapDisk, err := bc.setHeadBeyondRoot(head.NumberU64(), diskRoot, true)
+ if err != nil {
+ return nil, err
+ }
+ // Chain rewound, persist old snapshot number to indicate recovery procedure
+ if snapDisk != 0 {
+ rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk)
+ }
+ } else {
+ log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash())
+ if _, err := bc.setHeadBeyondRoot(head.NumberU64(), common.Hash{}, true); err != nil {
+ return nil, err
+ }
}
}
}
@@ -665,6 +675,28 @@ func (bc *BlockChain) SetHead(head uint64) error {
return err
}
+// resetState resets the persistent state to genesis state if it's not present.
+func (bc *BlockChain) resetState() {
+ // Short circuit if the genesis state is already present.
+ root := bc.genesisBlock.Root()
+ if bc.HasState(root) {
+ return
+ }
+ // Reset the state database to empty for committing genesis state.
+ // Note, it should only happen in path-based scheme and Reset function
+ // is also only call-able in this mode.
+ if bc.triedb.Scheme() == rawdb.PathScheme {
+ if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
+ log.Crit("Failed to clean state", "err", err) // Shouldn't happen
+ }
+ }
+ // Write genesis state into database.
+ if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
+ log.Crit("Failed to commit genesis state", "err", err)
+ }
+ log.Info("Reset state to genesis", "root", root)
+}
+
// setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be
// used when rewinding with snapshots enabled to ensure that we go back further than
@@ -687,26 +719,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
pivot := rawdb.ReadLastPivotNumber(bc.db)
frozen, _ := bc.db.Ancients()
- // resetState resets the persistent state to genesis if it's not available.
- resetState := func() {
- // Short circuit if the genesis state is already present.
- if bc.HasState(bc.genesisBlock.Root()) {
- return
- }
- // Reset the state database to empty for committing genesis state.
- // Note, it should only happen in path-based scheme and Reset function
- // is also only call-able in this mode.
- if bc.triedb.Scheme() == rawdb.PathScheme {
- if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
- log.Crit("Failed to clean state", "err", err) // Shouldn't happen
- }
- }
- // Write genesis state into database.
- if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
- log.Crit("Failed to commit genesis state", "err", err)
- }
- }
-
updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
// Rewind the blockchain, ensuring we don't end up with a stateless head
// block. Note, depth equality is permitted to allow using SetHead as a
@@ -716,7 +728,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
- resetState()
+ bc.resetState()
} else {
// Block exists, keep rewinding until we find one with state,
// keeping rewinding until we exceed the optional threshold
@@ -747,7 +759,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 {
- resetState()
+ bc.resetState()
} else if !bc.HasState(newHeadBlock.Root()) {
// Rewind to a block with recoverable state. If the state is
// missing, run the state recovery here.
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index a0cc2fd341..345c6d98f4 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -383,6 +383,13 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
// but until snap becomes prevalent, we should support both. TODO(karalabe).
if mode == SnapSync {
if !d.snapSync {
+ // Snap sync will directly modify the persistent state, making the entire
+ // trie database unusable until the state is fully synced. To prevent any
+ // subsequent state reads, explicitly disable the trie database and state
+ // syncer is responsible to address and correct any state missing.
+ if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
+ d.blockchain.TrieDB().Reset(types.EmptyRootHash)
+ }
// Snap sync uses the snapshot namespace to store potentially flakey data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean
// time to prevent access.
@@ -1777,17 +1784,30 @@ func (d *Downloader) processFastSyncContent() error {
// To cater for moving pivot points, track the pivot block and subsequently
// accumulated download results separately.
+ //
+ // These will be nil up to the point where we reach the pivot, and will only
+ // be set temporarily if the synced blocks are piling up, but the pivot is
+ // still busy downloading. In that case, we need to occasionally check for
+ // pivot moves, so need to unblock the loop. These fields will accumulate
+ // the results in the meantime.
+ //
+ // Note, there's no issue with memory piling up since after 64 blocks the
+ // pivot will forcefully move so these accumulators will be dropped.
var (
oldPivot *fetchResult // Locked in pivot block, might change eventually
oldTail []*fetchResult // Downloaded content after the pivot
)
for {
- // Wait for the next batch of downloaded data to be available, and if the pivot
- // block became stale, move the goalpost
- results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
+ // Wait for the next batch of downloaded data to be available. If we have
+ // not yet reached the pivot point, wait blockingly as there's no need to
+ // spin-loop check for pivot moves. If we reached the pivot but have not
+ // yet processed it, check for results async, so we might notice pivot
+ // moves while state syncing. If the pivot was passed fully, block again
+ // as there's no more reason to check for pivot moves at all.
+ results := d.queue.Results(oldPivot == nil)
if len(results) == 0 {
// If pivot sync is done, stop
- if oldPivot == nil {
+ if atomic.LoadInt32(&d.committed) == 1 {
return sync.Cancel()
}
// If sync failed, stop
@@ -1807,21 +1827,23 @@ func (d *Downloader) processFastSyncContent() error {
pivot := d.pivotHeader
d.pivotLock.RUnlock()
- if oldPivot == nil {
- if pivot.Root != sync.root {
- sync.Cancel()
- sync = d.syncState(pivot.Root)
+ if oldPivot == nil { // no results piling up, we can move the pivot
+ if atomic.LoadInt32(&d.committed) == 0 { // not yet passed the pivot, we can move the pivot
+ if pivot.Root != sync.root { // pivot position changed, we can move the pivot
+ sync.Cancel()
+ sync = d.syncState(pivot.Root)
- go closeOnErr(sync)
+ go closeOnErr(sync)
+ }
}
- } else {
+ } else { // results already piled up, consume before handling pivot move
results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
}
// Split around the pivot block and process the two sides via fast/full sync
if atomic.LoadInt32(&d.committed) == 0 {
latest := results[len(results)-1].Header
// If the height is above the pivot block by 2 sets, it means the pivot
- // become stale in the network and it was garbage collected, move to a
+ // become stale in the network, and it was garbage collected, move to a
// new pivot.
//
// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
From 92a516663eb50a6848aa1d33911679d89fcf1b64 Mon Sep 17 00:00:00 2001
From: minh-bq <97180373+minh-bq@users.noreply.github.com>
Date: Tue, 29 Oct 2024 14:19:40 +0700
Subject: [PATCH 30/41] rlp, trie: faster trie node encoding (#24126) (#606)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
commit https://github.com/ethereum/go-ethereum/commit/65ed1a6871569ce616f18d69f6ec8cfbec85c1f2.
This change speeds up trie hashing and all other activities that require
RLP encoding of trie nodes by approximately 20%. The speedup is achieved by
avoiding reflection overhead during node encoding.
The interface type trie.node now contains a method 'encode' that works with
rlp.EncoderBuffer. Management of EncoderBuffers is left to calling code.
trie.hasher, which is pooled to avoid allocations, now maintains an
EncoderBuffer. This means memory resources related to trie node encoding
are tied to the hasher pool.
This also refactors some functions in rlp package.
goos: linux
goarch: amd64
cpu: 11th Gen Intel(R) Core(TM) i7-1165G7 @ 2.80GHz
│ old.txt │ new.txt │
│ sec/op │ sec/op vs base │
DeriveSha200/std_trie-8 725.1µ ± 31% 613.8µ ± 37% ~ (p=0.481 n=10)
DeriveSha200/stack_trie-8 572.3µ ± 10% 493.1µ ± 13% -13.85% (p=0.005 n=10)
geomean 644.2µ 550.1µ -14.61%
│ old.txt │ new.txt │
│ B/op │ B/op vs base │
DeriveSha200/std_trie-8 287.4Ki ± 0% 283.0Ki ± 0% -1.53% (p=0.000 n=10)
DeriveSha200/stack_trie-8 56.34Ki ± 0% 42.43Ki ± 0% -24.69% (p=0.000 n=10)
geomean 127.2Ki 109.6Ki -13.88%
│ old.txt │ new.txt │
│ allocs/op │ allocs/op vs base │
DeriveSha200/std_trie-8 2.931k ± 0% 2.917k ± 0% -0.46% (p=0.000 n=10)
DeriveSha200/stack_trie-8 1.462k ± 0% 1.246k ± 0% -14.77% (p=0.000 n=10)
geomean 2.070k 1.907k -7.90%
│ old.txt │ new.txt │
│ sec/op │ sec/op vs base │
Prove-8 664.0µ ± 21% 450.2µ ± 27% -32.20% (p=0.000 n=10)
VerifyProof-8 8.643µ ± 18% 9.009µ ± 33% ~ (p=0.684 n=10)
VerifyRangeProof10-8 99.18µ ± 25% 67.60µ ± 67% ~ (p=0.089 n=10)
VerifyRangeProof100-8 496.3µ ± 20% 487.0µ ± 33% ~ (p=0.739 n=10)
VerifyRangeProof1000-8 5.149m ± 32% 4.095m ± 49% ~ (p=0.971 n=10)
VerifyRangeProof5000-8 19.79m ± 60% 19.16m ± 28% ~ (p=0.631 n=10)
VerifyRangeNoProof10-8 499.0µ ± 15% 422.8µ ± 29% -15.25% (p=0.035 n=10)
VerifyRangeNoProof500-8 1.747m ± 30% 1.417m ± 24% -18.91% (p=0.023 n=10)
VerifyRangeNoProof1000-8 3.025m ± 29% 2.239m ± 33% -25.98% (p=0.009 n=10)
geomean 750.9µ 622.6µ -17.09%
│ old.txt │ new.txt │
│ sec/op │ sec/op vs base │
HashFixedSize/10-8 60.30µ ± 19% 44.84µ ± 17% -25.64% (p=0.000 n=10)
HashFixedSize/100-8 205.9µ ± 32% 145.2µ ± 19% -29.48% (p=0.000 n=10)
HashFixedSize/1K-8 1326.5µ ± 23% 939.2µ ± 25% -29.20% (p=0.002 n=10)
HashFixedSize/10K-8 14.77m ± 25% 12.74m ± 19% ~ (p=0.075 n=10)
HashFixedSize/100K-8 135.2m ± 19% 104.1m ± 18% -23.03% (p=0.003 n=10)
geomean 2.011m 1.520m -24.43%
│ old.txt │ new.txt │
│ B/op │ B/op vs base │
HashFixedSize/10-8 11.729Ki ± 0% 9.752Ki ± 0% -16.85% (p=0.000 n=10)
HashFixedSize/100-8 58.56Ki ± 0% 49.23Ki ± 0% -15.93% (p=0.000 n=10)
HashFixedSize/1K-8 578.1Ki ± 0% 481.5Ki ± 0% -16.72% (p=0.000 n=10)
HashFixedSize/10K-8 6.019Mi ± 0% 4.985Mi ± 0% -17.18% (p=0.000 n=10)
HashFixedSize/100K-8 59.53Mi ± 0% 49.29Mi ± 0% -17.20% (p=0.000 n=10)
geomean 683.5Ki 568.8Ki -16.78%
│ old.txt │ new.txt │
│ allocs/op │ allocs/op vs base │
HashFixedSize/10-8 149.0 ± 0% 142.0 ± 0% -4.70% (p=0.000 n=10)
HashFixedSize/100-8 772.0 ± 0% 739.0 ± 0% -4.27% (p=0.000 n=10)
HashFixedSize/1K-8 7.443k ± 0% 7.099k ± 0% -4.62% (p=0.000 n=10)
HashFixedSize/10K-8 77.09k ± 0% 73.32k ± 0% -4.89% (p=0.000 n=10)
HashFixedSize/100K-8 767.8k ± 0% 730.5k ± 0% -4.86% (p=0.000 n=10)
geomean 8.729k 8.321k -4.67%
Co-authored-by: Qian Bin
Co-authored-by: Felix Lange
---
rlp/encbuffer.go | 51 +++++++++++++++++++++---------
rlp/encode.go | 10 ++----
rlp/encode_test.go | 15 +++++++++
trie/committer.go | 2 --
trie/hasher.go | 56 +++++++++++++++++----------------
trie/iterator.go | 4 +--
trie/node.go | 16 +++-------
trie/node_enc.go | 64 ++++++++++++++++++++++++++++++++++++++
trie/proof.go | 3 +-
trie/stacktrie.go | 77 ++++++++++++++++++++++------------------------
trie/trie_test.go | 8 +++--
11 files changed, 195 insertions(+), 111 deletions(-)
create mode 100644 trie/node_enc.go
diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go
index 64dd4fd884..b4582f12e6 100644
--- a/rlp/encbuffer.go
+++ b/rlp/encbuffer.go
@@ -39,27 +39,31 @@ func (buf *encBuffer) size() int {
return len(buf.str) + buf.lhsize
}
-// toBytes creates the encoder output.
-func (w *encBuffer) toBytes() []byte {
+// makeBytes creates the encoder output.
+func (w *encBuffer) makeBytes() []byte {
out := make([]byte, w.size())
+ w.copyTo(out)
+ return out
+}
+
+func (w *encBuffer) copyTo(dst []byte) {
strpos := 0
pos := 0
for _, head := range w.lheads {
// write string data before header
- n := copy(out[pos:], w.str[strpos:head.offset])
+ n := copy(dst[pos:], w.str[strpos:head.offset])
pos += n
strpos += n
// write the header
- enc := head.encode(out[pos:])
+ enc := head.encode(dst[pos:])
pos += len(enc)
}
// copy string data after the last list header
- copy(out[pos:], w.str[strpos:])
- return out
+ copy(dst[pos:], w.str[strpos:])
}
-// toWriter writes the encoder output to w.
-func (buf *encBuffer) toWriter(w io.Writer) (err error) {
+// writeTo writes the encoder output to w.
+func (buf *encBuffer) writeTo(w io.Writer) (err error) {
strpos := 0
for _, head := range buf.lheads {
// write string data before header
@@ -268,6 +272,19 @@ func (r *encReader) next() []byte {
}
}
+func encBufferFromWriter(w io.Writer) *encBuffer {
+ switch w := w.(type) {
+ case EncoderBuffer:
+ return w.buf
+ case *EncoderBuffer:
+ return w.buf
+ case *encBuffer:
+ return w
+ default:
+ return nil
+ }
+}
+
// EncoderBuffer is a buffer for incremental encoding.
//
// The zero value is NOT ready for use. To get a usable buffer,
@@ -295,14 +312,10 @@ func (w *EncoderBuffer) Reset(dst io.Writer) {
// If the destination writer has an *encBuffer, use it.
// Note that w.ownBuffer is left false here.
if dst != nil {
- if outer, ok := dst.(*encBuffer); ok {
+ if outer := encBufferFromWriter(dst); outer != nil {
*w = EncoderBuffer{outer, nil, false}
return
}
- if outer, ok := dst.(EncoderBuffer); ok {
- *w = EncoderBuffer{outer.buf, nil, false}
- return
- }
}
// Get a fresh buffer.
@@ -319,7 +332,7 @@ func (w *EncoderBuffer) Reset(dst io.Writer) {
func (w *EncoderBuffer) Flush() error {
var err error
if w.dst != nil {
- err = w.buf.toWriter(w.dst)
+ err = w.buf.writeTo(w.dst)
}
// Release the internal buffer.
if w.ownBuffer {
@@ -331,7 +344,15 @@ func (w *EncoderBuffer) Flush() error {
// ToBytes returns the encoded bytes.
func (w *EncoderBuffer) ToBytes() []byte {
- return w.buf.toBytes()
+ return w.buf.makeBytes()
+}
+
+// AppendToBytes appends the encoded bytes to dst.
+func (w *EncoderBuffer) AppendToBytes(dst []byte) []byte {
+ size := w.buf.size()
+ out := append(dst, make([]byte, size)...)
+ w.buf.copyTo(out[len(dst):])
+ return out
}
// Write appends b directly to the encoder output.
diff --git a/rlp/encode.go b/rlp/encode.go
index de11410b4f..cb2418b8b3 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -57,20 +57,16 @@ type Encoder interface {
// Please see package-level documentation of encoding rules.
func Encode(w io.Writer, val interface{}) error {
// Optimization: reuse *encBuffer when called by EncodeRLP.
- if buf, ok := w.(*encBuffer); ok {
+ if buf := encBufferFromWriter(w); buf != nil {
return buf.encode(val)
}
- if ebuf, ok := w.(EncoderBuffer); ok {
- return ebuf.buf.encode(val)
- }
buf := getEncBuffer()
defer encBufferPool.Put(buf)
-
if err := buf.encode(val); err != nil {
return err
}
- return buf.toWriter(w)
+ return buf.writeTo(w)
}
// EncodeToBytes returns the RLP encoding of val.
@@ -82,7 +78,7 @@ func EncodeToBytes(val interface{}) ([]byte, error) {
if err := buf.encode(val); err != nil {
return nil, err
}
- return buf.toBytes(), nil
+ return buf.makeBytes(), nil
}
// EncodeToReader returns a reader from which the RLP encoding of val
diff --git a/rlp/encode_test.go b/rlp/encode_test.go
index 58d90c1f12..ce916caf6d 100644
--- a/rlp/encode_test.go
+++ b/rlp/encode_test.go
@@ -431,6 +431,21 @@ func TestEncodeToBytes(t *testing.T) {
runEncTests(t, EncodeToBytes)
}
+func TestEncodeAppendToBytes(t *testing.T) {
+ buffer := make([]byte, 20)
+ runEncTests(t, func(val interface{}) ([]byte, error) {
+ w := NewEncoderBuffer(nil)
+ defer w.Flush()
+
+ err := Encode(w, val)
+ if err != nil {
+ return nil, err
+ }
+ output := w.AppendToBytes(buffer[:0])
+ return output, nil
+ })
+}
+
func TestEncodeToReader(t *testing.T) {
runEncTests(t, func(val interface{}) ([]byte, error) {
_, r, err := EncodeToReader(val)
diff --git a/trie/committer.go b/trie/committer.go
index 72abd9a1fd..9d689e8e35 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -35,7 +35,6 @@ const leafChanSize = 200
// capture all dirty nodes during the commit process and keep them cached in
// insertion order.
type committer struct {
- tmp sliceBuffer
sha crypto.KeccakState
owner common.Hash // TODO: same as nodes.owner, consider removing
@@ -48,7 +47,6 @@ type committer struct {
var committerPool = sync.Pool{
New: func() interface{} {
return &committer{
- tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
}
},
diff --git a/trie/hasher.go b/trie/hasher.go
index 3a62a2f119..7f0748c13d 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -24,22 +24,12 @@ import (
"golang.org/x/crypto/sha3"
)
-type sliceBuffer []byte
-
-func (b *sliceBuffer) Write(data []byte) (n int, err error) {
- *b = append(*b, data...)
- return len(data), nil
-}
-
-func (b *sliceBuffer) Reset() {
- *b = (*b)[:0]
-}
-
// hasher is a type used for the trie Hash operation. A hasher has some
// internal preallocated temp space
type hasher struct {
sha crypto.KeccakState
- tmp sliceBuffer
+ tmp []byte
+ encbuf rlp.EncoderBuffer
parallel bool // Whether to use paralallel threads when hashing
}
@@ -47,8 +37,9 @@ type hasher struct {
var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
- tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
- sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ tmp: make([]byte, 0, 550), // cap is as large as a full fullNode.
+ sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ encbuf: rlp.NewEncoderBuffer(nil),
}
},
}
@@ -153,30 +144,41 @@ func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached
// into compact form for RLP encoding.
// If the rlp data is smaller than 32 bytes, `nil` is returned.
func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
- h.tmp.Reset()
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic("encode error: " + err.Error())
- }
+ n.encode(h.encbuf)
+ enc := h.encodedBytes()
- if len(h.tmp) < 32 && !force {
+ if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
- return h.hashData(h.tmp)
+ return h.hashData(enc)
}
// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which
// may contain nil values)
func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
- h.tmp.Reset()
- // Generate the RLP encoding of the node
- if err := n.EncodeRLP(&h.tmp); err != nil {
- panic("encode error: " + err.Error())
- }
+ n.encode(h.encbuf)
+ enc := h.encodedBytes()
- if len(h.tmp) < 32 && !force {
+ if len(enc) < 32 && !force {
return n // Nodes smaller than 32 bytes are stored inside their parent
}
- return h.hashData(h.tmp)
+ return h.hashData(enc)
+}
+
+// encodedBytes returns the result of the last encoding operation on h.encbuf.
+// This also resets the encoder buffer.
+//
+// All node encoding must be done like this:
+//
+// node.encode(h.encbuf)
+// enc := h.encodedBytes()
+//
+// This convention exists because node.encode can only be inlined/escape-analyzed when
+// called on a concrete receiver type.
+func (h *hasher) encodedBytes() []byte {
+ h.tmp = h.encbuf.AppendToBytes(h.tmp[:0])
+ h.encbuf.Reset(nil)
+ return h.tmp
}
// hashData hashes the provided data
diff --git a/trie/iterator.go b/trie/iterator.go
index 54afb69cee..92f64a4c07 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -22,7 +22,6 @@ import (
"errors"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/rlp"
)
// NodeResolver is used for looking up trie nodes before reaching into the real
@@ -223,8 +222,7 @@ func (it *nodeIterator) LeafProof() [][]byte {
// Gather nodes that end up as hash nodes (or the root)
node, hashed := hasher.proofHash(item.node)
if _, ok := hashed.(hashNode); ok || i == 0 {
- enc, _ := rlp.EncodeToBytes(node)
- proofs = append(proofs, enc)
+ proofs = append(proofs, nodeToBytes(node))
}
}
return proofs
diff --git a/trie/node.go b/trie/node.go
index 07a6595b03..285e2d7702 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -28,8 +28,9 @@ import (
var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
type node interface {
- fstring(string) string
cache() (hashNode, bool)
+ encode(w rlp.EncoderBuffer)
+ fstring(string) string
}
type (
@@ -52,16 +53,9 @@ var nilValueNode = valueNode(nil)
// EncodeRLP encodes a full node into the consensus RLP format.
func (n *fullNode) EncodeRLP(w io.Writer) error {
- var nodes [17]node
-
- for i, child := range &n.Children {
- if child != nil {
- nodes[i] = child
- } else {
- nodes[i] = nilValueNode
- }
- }
- return rlp.Encode(w, nodes)
+ eb := rlp.NewEncoderBuffer(w)
+ n.encode(eb)
+ return eb.Flush()
}
func (n *fullNode) copy() *fullNode { copy := *n; return © }
diff --git a/trie/node_enc.go b/trie/node_enc.go
new file mode 100644
index 0000000000..1b2eca682f
--- /dev/null
+++ b/trie/node_enc.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+func nodeToBytes(n node) []byte {
+ w := rlp.NewEncoderBuffer(nil)
+ n.encode(w)
+ result := w.ToBytes()
+ w.Flush()
+ return result
+}
+
+func (n *fullNode) encode(w rlp.EncoderBuffer) {
+ offset := w.List()
+ for _, c := range n.Children {
+ if c != nil {
+ c.encode(w)
+ } else {
+ w.Write(rlp.EmptyString)
+ }
+ }
+ w.ListEnd(offset)
+}
+
+func (n *shortNode) encode(w rlp.EncoderBuffer) {
+ offset := w.List()
+ w.WriteBytes(n.Key)
+ if n.Val != nil {
+ n.Val.encode(w)
+ } else {
+ w.Write(rlp.EmptyString)
+ }
+ w.ListEnd(offset)
+}
+
+func (n hashNode) encode(w rlp.EncoderBuffer) {
+ w.WriteBytes(n)
+}
+
+func (n valueNode) encode(w rlp.EncoderBuffer) {
+ w.WriteBytes(n)
+}
+
+func (n rawNode) encode(w rlp.EncoderBuffer) {
+ w.Write(n)
+}
diff --git a/trie/proof.go b/trie/proof.go
index c1b4ae305f..52673e19b3 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
// Prove constructs a merkle proof for key. The result contains all encoded nodes
@@ -94,7 +93,7 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
if hash, ok := hn.(hashNode); ok || i == 0 {
// If the node's database encoding is a hash (or is the
// root node), it becomes a proof element.
- enc, _ := rlp.EncodeToBytes(n)
+ enc := nodeToBytes(n)
if !ok {
hash = hasher.hashData(enc)
}
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 79f6e39bfd..9fcc0831ee 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/rlp"
)
var ErrCommitDisabled = errors.New("no database for committing")
@@ -399,65 +398,62 @@ func (st *StackTrie) hash(path []byte) {
// The 'hasher' is taken from a pool, but we don't actually
// claim an instance until all children are done with their hashing,
// and we actually need one
- var h *hasher
+ var (
+ h *hasher
+ encodedNode []byte
+ )
switch st.nodeType {
case branchNode:
- var nodes [17]node
+ var node fullNode
for i, child := range st.children {
if child == nil {
- nodes[i] = nilValueNode
+ node.Children[i] = nilValueNode
continue
}
child.hash(append(path, byte(i)))
if len(child.val) < 32 {
- nodes[i] = rawNode(child.val)
+ node.Children[i] = rawNode(child.val)
} else {
- nodes[i] = hashNode(child.val)
+ node.Children[i] = hashNode(child.val)
}
st.children[i] = nil // Reclaim mem from subtree
returnToPool(child)
}
- nodes[16] = nilValueNode
h = newHasher(false)
defer returnHasherToPool(h)
- h.tmp.Reset()
- if err := rlp.Encode(&h.tmp, nodes); err != nil {
- panic(err)
- }
+
+ node.encode(h.encbuf)
+ encodedNode = h.encodedBytes()
case extNode:
st.children[0].hash(append(path, st.key...))
- h = newHasher(false)
- defer returnHasherToPool(h)
- h.tmp.Reset()
- var valuenode node
+ sz := hexToCompactInPlace(st.key)
+ n := shortNode{Key: st.key[:sz]}
+
if len(st.children[0].val) < 32 {
- valuenode = rawNode(st.children[0].val)
+ n.Val = rawNode(st.children[0].val)
} else {
- valuenode = hashNode(st.children[0].val)
- }
- n := struct {
- Key []byte
- Val node
- }{
- Key: hexToCompact(st.key),
- Val: valuenode,
- }
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic(err)
+ n.Val = hashNode(st.children[0].val)
}
+
+ h = newHasher(false)
+ defer returnHasherToPool(h)
+
+ n.encode(h.encbuf)
+ encodedNode = h.encodedBytes()
+
returnToPool(st.children[0])
st.children[0] = nil // Reclaim mem from subtree
case leafNode:
h = newHasher(false)
defer returnHasherToPool(h)
- h.tmp.Reset()
+
st.key = append(st.key, byte(16))
sz := hexToCompactInPlace(st.key)
- n := [][]byte{st.key[:sz], st.val}
- if err := rlp.Encode(&h.tmp, n); err != nil {
- panic(err)
- }
+ n := shortNode{Key: st.key[:sz], Val: valueNode(st.val)}
+
+ n.encode(h.encbuf)
+ encodedNode = h.encodedBytes()
case emptyNode:
st.val = emptyRoot.Bytes()
st.key = st.key[:0]
@@ -468,20 +464,19 @@ func (st *StackTrie) hash(path []byte) {
}
st.key = st.key[:0]
st.nodeType = hashedNode
- if len(h.tmp) < 32 {
+ if len(encodedNode) < 32 {
// If rlp-encoded value was < 32 bytes, then val point directly to the rlp-encoded value
- st.val = common.CopyBytes(h.tmp)
+ st.val = common.CopyBytes(encodedNode)
return
}
- // Write the hash to the 'val'. We allocate a new val here to not mutate
- // input values
- st.val = make([]byte, 32)
- h.sha.Reset()
- h.sha.Write(h.tmp)
- h.sha.Read(st.val)
+
+ h = newHasher(false)
+ defer returnHasherToPool(h)
+
+ st.val = h.hashData(encodedNode)
if st.writeFn != nil {
- st.writeFn(st.owner, path, common.BytesToHash(st.val), h.tmp)
+ st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode)
}
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 04e2851e6e..b53f6f83eb 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -494,8 +494,9 @@ func runRandTest(rt randTest) bool {
tr.tracer = newTracer()
for i, step := range rt {
- fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
- step.op, step.key, step.value, i)
+ // fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
+ // step.op, step.key, step.value, i)
+
switch step.op {
case opUpdate:
@@ -974,7 +975,8 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
- fmt.Printf("root: %x\n", stRoot)
+
+ t.Logf("root: %x\n", stRoot)
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
}
From 1af81296139a2dc26f159797098f7e0b0dad9a85 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Tue, 29 Oct 2024 17:18:21 +0700
Subject: [PATCH 31/41] core, accounts, eth, trie: pbss fix release v1.13.2
(#615)
* core, accounts, eth, trie: handle genesis state missing (#28171)
* core, accounts, eth, trie: handle genesis state missing
* core, eth, trie: polish
* core: manage txpool subscription in mainpool
* eth/backend: fix test
* cmd, eth: fix test
* core/rawdb, trie/triedb/pathdb: address comments
* eth, trie: address comments
* eth: inline the function
* eth: use synced flag
* core/txpool: revert changes in txpool
* core, eth, trie: rename functions
* trie: remove internal nodes between shortNode and child in path mode (#28163)
* trie: remove internal nodes between shortNode and child in path mode
* trie: address comments
* core/rawdb, trie: address comments
* core/rawdb: delete unused func
* trie: change comments
* trie: add missing tests
* trie: fix lint
---------
Co-authored-by: rjl493456442
---
accounts/abi/bind/backends/simulated.go | 15 +-
cmd/devp2p/internal/ethtest/suite_test.go | 1 +
core/blockchain.go | 51 +++----
core/rawdb/accessors_sync.go | 44 ++++++
core/rawdb/accessors_trie.go | 20 +++
core/rawdb/database.go | 2 +-
core/rawdb/schema.go | 3 +
eth/api_backend.go | 10 +-
eth/backend.go | 3 +-
eth/downloader/downloader.go | 4 +-
eth/handler.go | 39 ++++--
eth/handler_eth.go | 2 +-
eth/handler_eth_test.go | 4 +-
eth/sync.go | 27 ++--
miner/miner_test.go | 7 +-
trie/database.go | 20 ++-
trie/sync.go | 83 +++++++++--
trie/sync_test.go | 163 ++++++++++++++++++----
trie/triedb/pathdb/database.go | 103 +++++++++-----
trie/triedb/pathdb/database_test.go | 37 ++---
trie/triedb/pathdb/errors.go | 10 +-
trie/triedb/pathdb/journal.go | 2 +-
22 files changed, 480 insertions(+), 170 deletions(-)
create mode 100644 core/rawdb/accessors_sync.go
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 146b209f7b..60e39a34e8 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -181,7 +181,6 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address,
if err != nil {
return nil, err
}
-
return stateDB.GetCode(contract), nil
}
@@ -194,7 +193,6 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
if err != nil {
return nil, err
}
-
return stateDB.GetBalance(contract), nil
}
@@ -207,7 +205,6 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
if err != nil {
return 0, err
}
-
return stateDB.GetNonce(contract), nil
}
@@ -220,7 +217,6 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
if err != nil {
return nil, err
}
-
val := stateDB.GetState(contract, key)
return val[:], nil
}
@@ -667,7 +663,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
}
block.AddTxWithChain(b.blockchain, tx)
}, true)
- stateDB, _ := b.blockchain.State()
+ stateDB, err := b.blockchain.State()
+ if err != nil {
+ return err
+ }
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
@@ -782,11 +781,13 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
block.OffsetTime(int64(adjustment.Seconds()))
}, true)
- stateDB, _ := b.blockchain.State()
+ stateDB, err := b.blockchain.State()
+ if err != nil {
+ return err
+ }
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
-
return nil
}
diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go
index 55e386c4aa..4754e81e1b 100644
--- a/cmd/devp2p/internal/ethtest/suite_test.go
+++ b/cmd/devp2p/internal/ethtest/suite_test.go
@@ -99,6 +99,7 @@ func setupGeth(stack *node.Node) error {
if err != nil {
return err
}
+ backend.SetSynced()
_, err = backend.BlockChain().InsertChain(chain.blocks[1:], nil)
return err
diff --git a/core/blockchain.go b/core/blockchain.go
index d1ec0cedca..5881713f65 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -374,12 +374,11 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
if !bc.HasState(head.Root()) {
if head.NumberU64() == 0 {
// The genesis state is missing, which is only possible in the path-based
- // scheme. This situation occurs when the state syncer overwrites it.
- //
- // The solution is to reset the state to the genesis state. Although it may not
- // match the sync target, the state healer will later address and correct any
- // inconsistencies.
- bc.resetState()
+ // scheme. This situation occurs when the initial state sync is not finished
+ // yet, or the chain head is rewound below the pivot point. In both scenario,
+ // there is no possible recovery approach except for rerunning a snap sync.
+ // Do nothing here until the state syncer picks it up.
+ log.Info("Genesis state is missing, wait state sync")
} else {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
@@ -675,28 +674,6 @@ func (bc *BlockChain) SetHead(head uint64) error {
return err
}
-// resetState resets the persistent state to genesis state if it's not present.
-func (bc *BlockChain) resetState() {
- // Short circuit if the genesis state is already present.
- root := bc.genesisBlock.Root()
- if bc.HasState(root) {
- return
- }
- // Reset the state database to empty for committing genesis state.
- // Note, it should only happen in path-based scheme and Reset function
- // is also only call-able in this mode.
- if bc.triedb.Scheme() == rawdb.PathScheme {
- if err := bc.triedb.Reset(types.EmptyRootHash); err != nil {
- log.Crit("Failed to clean state", "err", err) // Shouldn't happen
- }
- }
- // Write genesis state into database.
- if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil {
- log.Crit("Failed to commit genesis state", "err", err)
- }
- log.Info("Reset state to genesis", "root", root)
-}
-
// setHeadBeyondRoot rewinds the local chain to a new head with the extra condition
// that the rewind must pass the specified state root. This method is meant to be
// used when rewinding with snapshots enabled to ensure that we go back further than
@@ -728,7 +705,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if newHeadBlock == nil {
log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
- bc.resetState()
} else {
// Block exists, keep rewinding until we find one with state,
// keeping rewinding until we exceed the optional threshold
@@ -758,16 +734,14 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
}
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
- if newHeadBlock.NumberU64() == 0 {
- bc.resetState()
- } else if !bc.HasState(newHeadBlock.Root()) {
+ if !bc.HasState(newHeadBlock.Root()) && bc.stateRecoverable(newHeadBlock.Root()) {
// Rewind to a block with recoverable state. If the state is
// missing, run the state recovery here.
if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil {
log.Crit("Failed to rollback state", "err", err) // Shouldn't happen
}
+ log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
}
- log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break
}
log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root())
@@ -782,6 +756,15 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// to low, so it's safe the update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
+
+ // The head state is missing, which is only possible in the path-based
+ // scheme. This situation occurs when the chain head is rewound below
+ // the pivot point. In this scenario, there is no possible recovery
+ // approach except for rerunning a snap sync. Do nothing here until the
+ // state syncer picks it up.
+ if !bc.HasState(newHeadBlock.Root()) {
+ log.Info("Chain is stateless, wait state sync", "number", newHeadBlock.Number(), "hash", newHeadBlock.Hash())
+ }
}
// Rewind the fast block in a simpleton way to the target head
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
@@ -865,7 +848,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
// Reset the trie database with the fresh fast synced state.
root := block.Root()
if bc.triedb.Scheme() == rawdb.PathScheme {
- if err := bc.triedb.Reset(root); err != nil {
+ if err := bc.triedb.Enable(root); err != nil {
return err
}
}
diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go
new file mode 100644
index 0000000000..54e5967d81
--- /dev/null
+++ b/core/rawdb/accessors_sync.go
@@ -0,0 +1,44 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+const (
+ StateSyncUnknown = uint8(0) // flags the state snap sync is unknown
+ StateSyncRunning = uint8(1) // flags the state snap sync is not completed yet
+ StateSyncFinished = uint8(2) // flags the state snap sync is completed
+)
+
+// ReadSnapSyncStatusFlag retrieves the state snap sync status flag.
+func ReadSnapSyncStatusFlag(db ethdb.KeyValueReader) uint8 {
+ blob, err := db.Get(snapSyncStatusFlagKey)
+ if err != nil || len(blob) != 1 {
+ return StateSyncUnknown
+ }
+ return blob[0]
+}
+
+// WriteSnapSyncStatusFlag stores the state snap sync status flag into database.
+func WriteSnapSyncStatusFlag(db ethdb.KeyValueWriter, flag uint8) {
+ if err := db.Put(snapSyncStatusFlagKey, []byte{flag}); err != nil {
+ log.Crit("Failed to store sync status flag", "err", err)
+ }
+}
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index 4706156602..14b3a96ba0 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -88,6 +88,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
return h.hash(data) == hash
}
+// ExistsAccountTrieNode checks the presence of the account trie node with the
+// specified node path, regardless of the node hash.
+func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool {
+ has, err := db.Has(accountTrieNodeKey(path))
+ if err != nil {
+ return false
+ }
+ return has
+}
+
// WriteAccountTrieNode writes the provided account trie node into database.
func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
if err := db.Put(accountTrieNodeKey(path), node); err != nil {
@@ -126,6 +136,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
return h.hash(data) == hash
}
+// ExistsStorageTrieNode checks the presence of the storage trie node with the
+// specified account hash and node path, regardless of the node hash.
+func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool {
+ has, err := db.Has(storageTrieNodeKey(accountHash, path))
+ if err != nil {
+ return false
+ }
+ return has
+}
+
// WriteStorageTrieNode writes the provided storage trie node into database.
func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 522fa33ca7..9ad0287d59 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -535,7 +535,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, highestFinalityVoteKey, storeInternalTxsEnabledKey,
- snapshotSyncStatusKey, persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey,
+ snapshotSyncStatusKey, persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
} {
if bytes.Equal(key, meta) {
metadata.Add(size)
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 84b2bc8e5f..3c99d1db66 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -90,6 +90,9 @@ var (
snapshotConsortiumPrefix = []byte("consortium-") // key = ConsortiumSnapshotPrefix + block hash
+ // snapSyncStatusFlagKey flags that status of snap sync.
+ snapSyncStatusFlagKey = []byte("SnapSyncStatus")
+
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td
diff --git a/eth/api_backend.go b/eth/api_backend.go
index b7e4b1c2ae..60a1869e76 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -168,7 +168,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.B
return nil, nil, errors.New("header not found")
}
stateDb, err := b.eth.BlockChain().StateAt(header.Root)
- return stateDb, header, err
+ if err != nil {
+ return nil, nil, err
+ }
+ return stateDb, header, nil
}
func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {
@@ -187,7 +190,10 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN
return nil, nil, errors.New("hash is not currently canonical")
}
stateDb, err := b.eth.BlockChain().StateAt(header.Root)
- return stateDb, header, err
+ if err != nil {
+ return nil, nil, err
+ }
+ return stateDb, header, nil
}
return nil, nil, errors.New("invalid arguments; neither block nor hash specified")
}
diff --git a/eth/backend.go b/eth/backend.go
index 6345ad8097..cc6e36b331 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -637,7 +637,8 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening
func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader }
-func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.handler.acceptTxs) == 1 }
+func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.handler.synced) == 1 }
+func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() }
func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning }
func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer }
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 345c6d98f4..e7a9502e03 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -388,7 +388,9 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
// subsequent state reads, explicitly disable the trie database and state
// syncer is responsible to address and correct any state missing.
if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
- d.blockchain.TrieDB().Reset(types.EmptyRootHash)
+ if err := d.blockchain.TrieDB().Disable(); err != nil {
+ return err
+ }
}
// Snap sync uses the snapshot namespace to store potentially flakey data until
// sync completely heals and finishes. Pause snapshot maintenance in the mean
diff --git a/eth/handler.go b/eth/handler.go
index c6353e650e..0c0e5f276a 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -109,9 +109,9 @@ type handler struct {
networkID uint64
forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node
- fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
- snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol
- acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
+ fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
+ snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol
+ synced uint32 // Flag whether we're considered synchronised (enables transaction processing)
checkpointNumber uint64 // Block number for the sync progress validator to cross reference
checkpointHash common.Hash // Block hash for the sync progress validator to cross reference
@@ -183,17 +183,23 @@ func newHandler(config *handlerConfig) (*handler, error) {
fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock()
if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {
h.fastSync = uint32(1)
- log.Warn("Switch sync mode from full sync to fast sync")
+ log.Warn("Switch sync mode from full sync to fast sync", "reason", "snap sync incomplete")
+ } else if !h.chain.HasState(fullBlock.Root()) {
+ h.fastSync = uint32(1)
+ log.Warn("Switch sync mode from full sync to snap sync", "reason", "head state missing")
}
} else {
- if h.chain.CurrentBlock().NumberU64() > 0 {
+ head := h.chain.CurrentBlock()
+ if head.NumberU64() > 0 {
// Print warning log if database is not empty to run fast sync.
log.Warn("Switch sync mode from fast sync to full sync")
} else {
// If fast sync was requested and our database is empty, grant it
h.fastSync = uint32(1)
+ log.Info("Enabled fast sync", "head", head.Number, "hash", head.Hash())
if config.Sync == downloader.SnapSync {
h.snapSync = uint32(1)
+ log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash())
}
}
}
@@ -237,15 +243,11 @@ func newHandler(config *handlerConfig) (*handler, error) {
// accept each others' blocks until a restart. Unfortunately we haven't figured
// out a way yet where nodes can decide unilaterally whether the network is new
// or not. This should be fixed if we figure out a solution.
- if atomic.LoadUint32(&h.fastSync) == 1 {
- log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
+ if atomic.LoadUint32(&h.synced) == 0 {
+ log.Warn("Syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
return 0, nil
}
- n, err := h.chain.InsertChain(blocks, sidecars)
- if err == nil {
- h.enableSyncedFeatures() // Mark initial sync done on any fetcher import
- }
- return n, err
+ return h.chain.InsertChain(blocks, sidecars)
}
h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.chain.Engine().VerifyBlobHeader,
h.BroadcastBlock, heighter, nil, inserter, h.removePeer)
@@ -706,7 +708,18 @@ func (h *handler) voteBroadcastLoop() {
// enableSyncedFeatures enables the post-sync functionalities when the initial
// sync is finished.
func (h *handler) enableSyncedFeatures() {
- atomic.StoreUint32(&h.acceptTxs, 1)
+ atomic.StoreUint32(&h.synced, 1)
+
+ // If we were running fast/snap sync and it finished, disable doing another
+ // round on next sync cycle
+ if atomic.LoadUint32(&h.fastSync) == 1 {
+ log.Info("Fast sync complete, auto disabling")
+ atomic.StoreUint32(&h.fastSync, 0)
+ }
+ if atomic.LoadUint32(&h.snapSync) == 1 {
+ log.Info("Snap sync complete, auto disabling")
+ atomic.StoreUint32(&h.snapSync, 0)
+ }
if h.chain.TrieDB().Scheme() == rawdb.PathScheme {
h.chain.TrieDB().SetBufferSize(pathdb.DefaultBufferSize)
}
diff --git a/eth/handler_eth.go b/eth/handler_eth.go
index cfad1fdfa1..6be4a5cd7b 100644
--- a/eth/handler_eth.go
+++ b/eth/handler_eth.go
@@ -56,7 +56,7 @@ func (h *ethHandler) PeerInfo(id enode.ID) interface{} {
// AcceptTxs retrieves whether transaction processing is enabled on the node
// or if inbound transactions should simply be dropped.
func (h *ethHandler) AcceptTxs() bool {
- return atomic.LoadUint32(&h.acceptTxs) == 1
+ return atomic.LoadUint32(&h.synced) == 1
}
// Handle is invoked from a peer's message handler when it receives a new remote
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index 73f16a4771..0661825ae1 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -259,7 +259,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
handler := newTestHandler()
defer handler.close()
- handler.handler.acceptTxs = 1 // mark synced to accept transactions
+ handler.handler.synced = 1 // mark synced to accept transactions
txs := make(chan core.NewTxsEvent)
sub := handler.txpool.SubscribeTransactions(txs, false)
@@ -408,7 +408,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
sinks[i] = newTestHandler()
defer sinks[i].close()
- sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions
+ sinks[i].handler.synced = 1 // mark synced to accept transactions
}
// Interconnect all the sink handlers with the source handler
for i, sink := range sinks {
diff --git a/eth/sync.go b/eth/sync.go
index ecbbcc3e05..386132bf40 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -190,15 +190,24 @@ func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {
}
// We are probably in full sync, but we might have rewound to before the
// fast sync pivot, check if we should reenable
+ head := cs.handler.chain.CurrentBlock()
if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {
- if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {
+ if head.NumberU64() < *pivot {
block := cs.handler.chain.CurrentFastBlock()
td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
return downloader.FastSync, td
}
}
+ // We are in a full sync, but the associated head state is missing. To complete
+ // the head state, forcefully rerun the snap sync. Note it doesn't mean the
+ // persistent state is corrupted, just mismatch with the head block.
+ if !cs.handler.chain.HasState(head.Root()) {
+ block := cs.handler.chain.CurrentFastBlock()
+ td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64())
+ log.Info("Reenabled snap sync as chain is stateless")
+ return downloader.SnapSync, td
+ }
// Nope, we're really full syncing
- head := cs.handler.chain.CurrentBlock()
td := cs.handler.chain.GetTd(head.Hash(), head.NumberU64())
return downloader.FullSync, td
}
@@ -234,22 +243,14 @@ func (h *handler) doSync(op *chainSyncOp) error {
if err != nil {
return err
}
- if atomic.LoadUint32(&h.fastSync) == 1 {
- log.Info("Fast sync complete, auto disabling")
- atomic.StoreUint32(&h.fastSync, 0)
- }
- if atomic.LoadUint32(&h.snapSync) == 1 {
- log.Info("Snap sync complete, auto disabling")
- atomic.StoreUint32(&h.snapSync, 0)
- }
- // If we've successfully finished a sync cycle and passed any required checkpoint,
- // enable accepting transactions from the network.
+ h.enableSyncedFeatures()
+
head := h.chain.CurrentBlock()
if head.NumberU64() >= h.checkpointNumber {
// Checkpoint passed, sanity check the timestamp to have a fallback mechanism
// for non-checkpointed (number = 0) private networks.
if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {
- atomic.StoreUint32(&h.acceptTxs, 1)
+ atomic.StoreUint32(&h.synced, 1)
}
}
if head.NumberU64() > 0 {
diff --git a/miner/miner_test.go b/miner/miner_test.go
index fa652b27e4..dd2fec27ca 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -57,6 +57,7 @@ func (m *mockBackend) TxPool() *txpool.TxPool {
}
type testBlockChain struct {
+ root common.Hash
statedb *state.StateDB
gasLimit uint64
chainHeadFeed *event.Feed
@@ -76,6 +77,10 @@ func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
return bc.statedb, nil
}
+func (bc *testBlockChain) HasState(root common.Hash) bool {
+ return bc.root == root
+}
+
func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
return bc.chainHeadFeed.Subscribe(ch)
}
@@ -252,7 +257,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) {
t.Fatalf("can't create new chain %v", err)
}
statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil)
- blockchain := &testBlockChain{statedb, 10000000, new(event.Feed)}
+ blockchain := &testBlockChain{bc.Genesis().Root(), statedb, 10000000, new(event.Feed)}
legacyPool := legacypool.New(testTxPoolConfig, bc.Config(), blockchain)
txPool, err := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{legacyPool})
diff --git a/trie/database.go b/trie/database.go
index 79ce83588e..42eedc2001 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -280,15 +280,27 @@ func (db *Database) Recoverable(root common.Hash) (bool, error) {
return pdb.Recoverable(root), nil
}
-// Reset wipes all available journal from the persistent database and discard
-// all caches and diff layers. Using the given root to create a new disk layer.
+// Disable deactivates the database and invalidates all available state layers
+// as stale to prevent access to the persistent state, which is in the syncing
+// stage.
+//
// It's only supported by path-based database and will return an error for others.
-func (db *Database) Reset(root common.Hash) error {
+func (db *Database) Disable() error {
+ pdb, ok := db.backend.(*pathdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ return pdb.Disable()
+}
+
+// Enable activates database and resets the state tree with the provided persistent
+// state root once the state sync is finished.
+func (db *Database) Enable(root common.Hash) error {
pdb, ok := db.backend.(*pathdb.Database)
if !ok {
return errors.New("not supported")
}
- return pdb.Reset(root)
+ return pdb.Enable(root)
}
// Journal commits an entire diff hierarchy to disk into a single journal entry.
diff --git a/trie/sync.go b/trie/sync.go
index a6338b8a89..2b257fe2f7 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
)
// ErrNotRequested is returned by the trie sync when it's requested to process a
@@ -40,6 +41,16 @@ var ErrAlreadyProcessed = errors.New("already processed")
// memory if the node was configured with a significant number of peers.
const maxFetchesPerDepth = 16384
+var (
+ // deletionGauge is the metric to track how many trie node deletions
+ // are performed in total during the sync process.
+ deletionGauge = metrics.NewRegisteredGauge("trie/sync/delete", nil)
+
+ // lookupGauge is the metric to track how many trie node lookups are
+ // performed to determine if node needs to be deleted.
+ lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil)
+)
+
// SyncPath is a path tuple identifying a particular trie node either in a single
// trie (account) or a layered trie (account -> storage).
//
@@ -75,9 +86,10 @@ func NewSyncPath(path []byte) SyncPath {
// nodeRequest represents a scheduled or already in-flight trie node retrieval request.
type nodeRequest struct {
- hash common.Hash // Hash of the trie node to retrieve
- path []byte // Merkle path leading to this node for prioritization
- data []byte // Data content of the node, cached until all subtrees complete
+ hash common.Hash // Hash of the trie node to retrieve
+ path []byte // Merkle path leading to this node for prioritization
+ data []byte // Data content of the node, cached until all subtrees complete
+ deletes [][]byte // List of internal path segments for trie nodes to delete
parent *nodeRequest // Parent state node referencing this entry
deps int // Number of dependencies before allowed to commit this node
@@ -107,17 +119,19 @@ type CodeSyncResult struct {
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
// persisted data items.
type syncMemBatch struct {
- nodes map[string][]byte // In-memory membatch of recently completed nodes
- hashes map[string]common.Hash // Hashes of recently completed nodes
- codes map[common.Hash][]byte // In-memory membatch of recently completed codes
+ nodes map[string][]byte // In-memory membatch of recently completed nodes
+ hashes map[string]common.Hash // Hashes of recently completed nodes
+ deletes map[string]struct{} // List of paths for trie node to delete
+ codes map[common.Hash][]byte // In-memory membatch of recently completed codes
}
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
func newSyncMemBatch() *syncMemBatch {
return &syncMemBatch{
- nodes: make(map[string][]byte),
- hashes: make(map[string]common.Hash),
- codes: make(map[common.Hash][]byte),
+ nodes: make(map[string][]byte),
+ hashes: make(map[string]common.Hash),
+ deletes: make(map[string]struct{}),
+ codes: make(map[common.Hash][]byte),
}
}
@@ -358,7 +372,7 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error {
// Commit flushes the data stored in the internal membatch out to persistent
// storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error {
- // Dump the membatch into a database dbw
+ // Flush the pending node writes into database batch.
for path, value := range s.membatch.nodes {
owner, inner := ResolvePath([]byte(path))
rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
@@ -367,14 +381,21 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
s.bloom.Add(hash[:])
}
}
+ // Flush the pending node deletes into the database batch.
+ // Please note that each written and deleted node has a
+ // unique path, ensuring no duplication occurs.
+ for path := range s.membatch.deletes {
+ owner, inner := ResolvePath([]byte(path))
+ rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme)
+ }
+ // Flush the pending code writes into database batch.
for hash, value := range s.membatch.codes {
rawdb.WriteCode(dbw, hash, value)
if s.bloom != nil {
s.bloom.Add(hash[:])
}
}
- // Drop the membatch data and return
- s.membatch = newSyncMemBatch()
+ s.membatch = newSyncMemBatch() // reset the batch
return nil
}
@@ -438,6 +459,39 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
node: node.Val,
path: append(append([]byte(nil), req.path...), key...),
}}
+ // Mark all internal nodes between shortNode and its **in disk**
+ // child as invalid. This is essential in the case of path mode
+ // scheme; otherwise, state healing might overwrite existing child
+ // nodes silently while leaving a dangling parent node within the
+ // range of this internal path on disk. This would break the
+ // guarantee for state healing.
+ //
+ // While it's possible for this shortNode to overwrite a previously
+ // existing full node, the other branches of the fullNode can be
+ // retained as they remain untouched and complete.
+ //
+ // This step is only necessary for path mode, as there is no deletion
+ // in hash mode at all.
+ if _, ok := node.Val.(hashNode); ok && s.scheme == rawdb.PathScheme {
+ owner, inner := ResolvePath(req.path)
+ for i := 1; i < len(key); i++ {
+ // While checking for a non-existent item in Pebble can be less efficient
+ // without a bloom filter, the relatively low frequency of lookups makes
+ // the performance impact negligible.
+ var exists bool
+ if owner == (common.Hash{}) {
+ exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...))
+ } else {
+ exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...))
+ }
+ if exists {
+ req.deletes = append(req.deletes, key[:i])
+ deletionGauge.Inc(1)
+ log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...))
+ }
+ }
+ lookupGauge.Inc(int64(len(key) - 1))
+ }
case *fullNode:
for i := 0; i < 17; i++ {
if node.Children[i] != nil {
@@ -507,6 +561,11 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error {
s.membatch.nodes[string(req.path)] = req.data
s.membatch.hashes[string(req.path)] = req.hash
+ // Delete the internal nodes which are marked as invalid
+ for _, segment := range req.deletes {
+ path := append(req.path, segment...)
+ s.membatch.deletes[string(path)] = struct{}{}
+ }
delete(s.nodeReqs, string(req.path))
s.fetches[len(req.path)]--
diff --git a/trie/sync_test.go b/trie/sync_test.go
index b3fb10db14..a07dbccd87 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -76,31 +76,53 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[st
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
-func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) {
+func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte, rawTrie bool) {
// Check root availability and trie contents
ndb := newTestDatabase(db, scheme)
- trie, err := NewSecure(TrieID(common.BytesToHash(root)), ndb)
- if err != nil {
- t.Fatalf("failed to create trie at %x: %v", root, err)
- }
- if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil {
+ if err := checkTrieConsistency(db, scheme, common.BytesToHash(root), rawTrie); err != nil {
t.Fatalf("inconsistent trie at %x: %v", root, err)
}
+ type reader interface {
+ Get(key []byte) []byte
+ }
+ var r reader
+ if rawTrie {
+ trie, err := New(TrieID(common.BytesToHash(root)), ndb)
+ if err != nil {
+ t.Fatalf("failed to create trie at %x: %v", root, err)
+ }
+ r = trie
+ } else {
+ trie, err := NewSecure(TrieID(common.BytesToHash(root)), ndb)
+ if err != nil {
+ t.Fatalf("failed to create trie at %x: %v", root, err)
+ }
+ r = trie
+ }
for key, val := range content {
- if have := trie.Get([]byte(key)); !bytes.Equal(have, val) {
+ if have := r.Get([]byte(key)); !bytes.Equal(have, val) {
t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val)
}
}
}
// checkTrieConsistency checks that all nodes in a trie are indeed present.
-func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error {
+func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error {
ndb := newTestDatabase(db, scheme)
- trie, err := NewSecure(TrieID(root), ndb)
- if err != nil {
- return nil // Consider a non existent state consistent
+ var it NodeIterator
+ if rawTrie {
+ trie, err := New(TrieID(root), ndb)
+ if err != nil {
+ return nil // Consider a non existent state consistent
+ }
+ it = trie.MustNodeIterator(nil)
+ } else {
+ trie, err := NewSecure(TrieID(root), ndb)
+ if err != nil {
+ return nil // Consider a non existent state consistent
+ }
+ it = trie.MustNodeIterator(nil)
}
- it := trie.MustNodeIterator(nil)
for it.Next(true) {
}
return it.Error()
@@ -211,7 +233,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
@@ -276,7 +298,7 @@ func testIterativeDelayedSync(t *testing.T, scheme string) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
@@ -346,7 +368,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
@@ -418,7 +440,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that a trie sync will not request nodes multiple times, even if they
@@ -491,7 +513,7 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
}
// Tests that at any point in time during a sync, only complete sub-tries are in
@@ -577,7 +599,7 @@ func testIncompleteSync(t *testing.T, scheme string) {
nodeHash := addedHashes[i]
value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme)
rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme)
- if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil {
+ if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root, false); err == nil {
t.Fatalf("trie inconsistency not caught, missing: %x", path)
}
rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme)
@@ -653,7 +675,7 @@ func testSyncOrdering(t *testing.T, scheme string) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
// Check that the trie nodes have been requested path-ordered
for i := 0; i < len(reqs)-1; i++ {
@@ -674,7 +696,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database
// The code requests are ignored here since there is no code
// at the testing trie.
- paths, nodes, _ := sched.Missing(1)
+ paths, nodes, _ := sched.Missing(0)
var elements []trieElement
for i := 0; i < len(paths); i++ {
elements = append(elements, trieElement{
@@ -708,7 +730,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database
}
batch.Write()
- paths, nodes, _ = sched.Missing(1)
+ paths, nodes, _ = sched.Missing(0)
elements = elements[:0]
for i := 0; i < len(paths); i++ {
elements = append(elements, trieElement{
@@ -734,7 +756,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
syncWith(t, srcTrie.Hash(), diskdb, srcDb)
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false)
// Push more modifications into the src trie, to see if dest trie can still
// sync with it(overwrite stale states)
@@ -758,7 +780,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
srcTrie, _ = NewSecure(TrieID(root), srcDb)
syncWith(t, srcTrie.Hash(), diskdb, srcDb)
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff, false)
// Revert added modifications from the src trie, to see if dest trie can still
// sync with it(overwrite reverted states)
@@ -782,5 +804,98 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
srcTrie, _ = NewSecure(TrieID(root), srcDb)
syncWith(t, srcTrie.Hash(), diskdb, srcDb)
- checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted, false)
+}
+
+// Tests if state syncer can correctly catch up the pivot move.
+func TestPivotMove(t *testing.T) {
+ testPivotMove(t, rawdb.HashScheme, true)
+ testPivotMove(t, rawdb.HashScheme, false)
+ testPivotMove(t, rawdb.PathScheme, true)
+ testPivotMove(t, rawdb.PathScheme, false)
+}
+
+func testPivotMove(t *testing.T, scheme string, tiny bool) {
+ var (
+ srcDisk = rawdb.NewMemoryDatabase()
+ srcTrieDB = newTestDatabase(srcDisk, scheme)
+ srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB)
+
+ deleteFn = func(key []byte, tr *Trie, states map[string][]byte) {
+ tr.Delete(key)
+ delete(states, string(key))
+ }
+ writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) {
+ if val == nil {
+ if tiny {
+ val = randBytes(4)
+ } else {
+ val = randBytes(32)
+ }
+ }
+ tr.Update(key, val)
+ states[string(key)] = common.CopyBytes(val)
+ }
+ copyStates = func(states map[string][]byte) map[string][]byte {
+ cpy := make(map[string][]byte)
+ for k, v := range states {
+ cpy[k] = v
+ }
+ return cpy
+ }
+ )
+ stateA := make(map[string][]byte)
+ writeFn([]byte{0x01, 0x23}, nil, srcTrie, stateA)
+ writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateA)
+ writeFn([]byte{0x12, 0x33}, nil, srcTrie, stateA)
+ writeFn([]byte{0x12, 0x34}, nil, srcTrie, stateA)
+ writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateA)
+ writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA)
+
+ rootA, nodesA, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootA, false); err != nil {
+ panic(err)
+ }
+ // Create a destination trie and sync with the scheduler
+ destDisk := rawdb.NewMemoryDatabase()
+ syncWith(t, rootA, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true)
+
+ // Delete element to collapse trie
+ stateB := copyStates(stateA)
+ srcTrie, _ = New(TrieID(rootA), srcTrieDB)
+ deleteFn([]byte{0x02, 0x34}, srcTrie, stateB)
+ deleteFn([]byte{0x13, 0x44}, srcTrie, stateB)
+ writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB)
+
+ rootB, nodesB, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootB, false); err != nil {
+ panic(err)
+ }
+ syncWith(t, rootB, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateB, true)
+
+ // Add elements to expand trie
+ stateC := copyStates(stateB)
+ srcTrie, _ = New(TrieID(rootB), srcTrieDB)
+
+ writeFn([]byte{0x01, 0x24}, stateA[string([]byte{0x01, 0x24})], srcTrie, stateC)
+ writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateC)
+ writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC)
+
+ rootC, nodesC, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootC, false); err != nil {
+ panic(err)
+ }
+ syncWith(t, rootC, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true)
}
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
index f02f2164ed..873418c9f0 100644
--- a/trie/triedb/pathdb/database.go
+++ b/trie/triedb/pathdb/database.go
@@ -127,7 +127,8 @@ type Database struct {
// readOnly is the flag whether the mutation is allowed to be applied.
// It will be set automatically when the database is journaled during
// the shutdown to reject all following unexpected mutations.
- readOnly bool // Indicator if database is opened in read only mode
+ readOnly bool // Flag if database is opened in read only mode
+ waitSync bool // Flag if database is deactivated due to initial state sync
bufferSize int // Memory allowance (in bytes) for caching dirty nodes
config *Config // Configuration for database
diskdb ethdb.Database // Persistent storage for matured trie nodes
@@ -176,6 +177,12 @@ func New(diskdb ethdb.Database, config *Config) *Database {
log.Warn("Truncated extra state histories from freezer", "count", pruned)
}
}
+ // Disable database in case node is still in the initial state sync stage.
+ if rawdb.ReadSnapSyncStatusFlag(diskdb) == rawdb.StateSyncRunning && !db.readOnly {
+ if err := db.Disable(); err != nil {
+ log.Crit("Failed to disable database", "err", err) // impossible to happen
+ }
+ }
log.Warn("Path-based state scheme is an experimental feature")
return db
}
@@ -201,9 +208,9 @@ func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint6
db.lock.Lock()
defer db.lock.Unlock()
- // Short circuit if the database is in read only mode.
- if db.readOnly {
- return errSnapshotReadOnly
+ // Short circuit if the mutation is not allowed.
+ if err := db.modifyAllowed(); err != nil {
+ return err
}
if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil {
return err
@@ -228,45 +235,59 @@ func (db *Database) Commit(root common.Hash, report bool) error {
db.lock.Lock()
defer db.lock.Unlock()
- // Short circuit if the database is in read only mode.
- if db.readOnly {
- return errSnapshotReadOnly
+ // Short circuit if the mutation is not allowed.
+ if err := db.modifyAllowed(); err != nil {
+ return err
}
return db.tree.cap(root, 0)
}
-// Reset rebuilds the database with the specified state as the base.
-//
-// - if target state is empty, clear the stored state and all layers on top
-// - if target state is non-empty, ensure the stored state matches with it
-// and clear all other layers on top.
-func (db *Database) Reset(root common.Hash) error {
+// Disable deactivates the database and invalidates all available state layers
+// as stale to prevent access to the persistent state, which is in the syncing
+// stage.
+func (db *Database) Disable() error {
db.lock.Lock()
defer db.lock.Unlock()
// Short circuit if the database is in read only mode.
if db.readOnly {
- return errSnapshotReadOnly
+ return errDatabaseReadOnly
}
- batch := db.diskdb.NewBatch()
- root = types.TrieRootHash(root)
- if root == types.EmptyRootHash {
- // Empty state is requested as the target, nuke out
- // the root node and leave all others as dangling.
- rawdb.DeleteAccountTrieNode(batch, nil)
- } else {
- // Ensure the requested state is existent before any
- // action is applied.
- _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil)
- if hash != root {
- return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root)
- }
+ // Prevent duplicated disable operation.
+ if db.waitSync {
+ log.Error("Reject duplicated disable operation")
+ return nil
}
- // Mark the disk layer as stale before applying any mutation.
+ db.waitSync = true
+
+ // Mark the disk layer as stale to prevent access to persistent state.
db.tree.bottom().markStale()
+ // Write the initial sync flag to persist it across restarts.
+ rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncRunning)
+ log.Info("Disabled trie database due to state sync")
+ return nil
+}
+
+// Enable activates database and resets the state tree with the provided persistent
+// state root once the state sync is finished.
+func (db *Database) Enable(root common.Hash) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ // Short circuit if the database is in read only mode.
+ if db.readOnly {
+ return errDatabaseReadOnly
+ }
+ // Ensure the provided state root matches the stored one.
+ root = types.TrieRootHash(root)
+ _, stored := rawdb.ReadAccountTrieNode(db.diskdb, nil)
+ if stored != root {
+ return fmt.Errorf("state root mismatch: stored %x, synced %x", stored, root)
+ }
// Drop the stale state journal in persistent database and
// reset the persistent state id back to zero.
+ batch := db.diskdb.NewBatch()
rawdb.DeleteTrieJournal(batch)
rawdb.WritePersistentStateID(batch, 0)
if err := batch.Write(); err != nil {
@@ -285,8 +306,11 @@ func (db *Database) Reset(root common.Hash) error {
// Re-construct a new disk layer backed by persistent state
// with **empty clean cache and node buffer**.
- dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0))
- db.tree.reset(dl)
+ db.tree.reset(newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)))
+
+ // Re-enable the database as the final step.
+ db.waitSync = false
+ rawdb.WriteSnapSyncStatusFlag(db.diskdb, rawdb.StateSyncFinished)
log.Info("Rebuilt trie database", "root", root)
return nil
}
@@ -299,8 +323,11 @@ func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error
defer db.lock.Unlock()
// Short circuit if rollback operation is not supported.
- if db.readOnly || db.freezer == nil {
- return errors.New("state rollback is not supported")
+ if err := db.modifyAllowed(); err != nil {
+ return err
+ }
+ if db.freezer == nil {
+ return errors.New("state rollback is non-supported")
}
// Short circuit if the target state is not recoverable.
@@ -438,3 +465,15 @@ func (db *Database) Close() error {
}
return db.freezer.Close()
}
+
+// modifyAllowed returns the indicator if mutation is allowed. This function
+// assumes the db.lock is already held.
+func (db *Database) modifyAllowed() error {
+ if db.readOnly {
+ return errDatabaseReadOnly
+ }
+ if db.waitSync {
+ return errDatabaseWaitSync
+ }
+ return nil
+}
diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go
index 37d92ac268..1eaaef6911 100644
--- a/trie/triedb/pathdb/database_test.go
+++ b/trie/triedb/pathdb/database_test.go
@@ -440,38 +440,39 @@ func TestDatabaseRecoverable(t *testing.T) {
}
}
-func TestReset(t *testing.T) {
- var (
- tester = newTester(t)
- index = tester.bottomIndex()
- )
+func TestDisable(t *testing.T) {
+ tester := newTester(t)
defer tester.release()
- // Reset database to unknown target, should reject it
- if err := tester.db.Reset(testutil.RandomHash()); err == nil {
- t.Fatal("Failed to reject invalid reset")
+ _, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
+ if err := tester.db.Disable(); err != nil {
+ t.Fatal("Failed to deactivate database")
}
- // Reset database to state persisted in the disk
- if err := tester.db.Reset(types.EmptyRootHash); err != nil {
- t.Fatalf("Failed to reset database %v", err)
+ if err := tester.db.Enable(types.EmptyRootHash); err == nil {
+ t.Fatalf("Invalid activation should be rejected")
}
+ if err := tester.db.Enable(stored); err != nil {
+ t.Fatal("Failed to activate database")
+ }
+
// Ensure journal is deleted from disk
if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 {
t.Fatal("Failed to clean journal")
}
// Ensure all trie histories are removed
- for i := 0; i <= index; i++ {
- _, err := readHistory(tester.db.freezer, uint64(i+1))
- if err == nil {
- t.Fatalf("Failed to clean state history, index %d", i+1)
- }
+ n, err := tester.db.freezer.Ancients()
+ if err != nil {
+ t.Fatal("Failed to clean state history")
+ }
+ if n != 0 {
+ t.Fatal("Failed to clean state history")
}
// Verify layer tree structure, single disk layer is expected
if tester.db.tree.len() != 1 {
t.Fatalf("Extra layer kept %d", tester.db.tree.len())
}
- if tester.db.tree.bottom().rootHash() != types.EmptyRootHash {
- t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash())
+ if tester.db.tree.bottom().rootHash() != stored {
+ t.Fatalf("Root hash is not matched exp %x got %x", stored, tester.db.tree.bottom().rootHash())
}
}
diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go
index f503a9c49d..450cbaa4bc 100644
--- a/trie/triedb/pathdb/errors.go
+++ b/trie/triedb/pathdb/errors.go
@@ -24,9 +24,13 @@ import (
)
var (
- // errSnapshotReadOnly is returned if the database is opened in read only mode
- // and mutation is requested.
- errSnapshotReadOnly = errors.New("read only")
+ // errDatabaseReadOnly is returned if the database is opened in read only mode
+ // to prevent any mutation.
+ errDatabaseReadOnly = errors.New("read only")
+
+ // errDatabaseWaitSync is returned if the initial state sync is not completed
+ // yet and database is disabled to prevent accessing state.
+ errDatabaseWaitSync = errors.New("waiting for sync")
// errSnapshotStale is returned from data accessors if the underlying layer
// layer had been invalidated due to the chain progressing forward far enough
diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go
index f6e1854dee..ec7d1d6806 100644
--- a/trie/triedb/pathdb/journal.go
+++ b/trie/triedb/pathdb/journal.go
@@ -368,7 +368,7 @@ func (db *Database) Journal(root common.Hash) error {
// Short circuit if the database is in read only mode.
if db.readOnly {
- return errSnapshotReadOnly
+ return errDatabaseReadOnly
}
// Firstly write out the metadata of journal
journal := new(bytes.Buffer)
From 17113e8f04574f5d05764da9d764157954f20e78 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Wed, 30 Oct 2024 00:54:27 +0700
Subject: [PATCH 32/41] trie/triedb/pathdb, core/rawdb: pbss fix release
v1.13.5 (corner-cases in path scheme state management) (#619)
* trie/triedb/pathdb, core/rawdb: enhance error message in freezer (#28198)
This PR adds more error message for debugging purpose.
* trie/triedb/pathdb: improve dirty node flushing trigger (#28426)
* trie/triedb/pathdb: improve dirty node flushing trigger
* trie/triedb/pathdb: add tests
* trie/triedb/pathdb: address comment
* core/rawdb: fsync the index file after each freezer write (#28483)
* core/rawdb: fsync the index and data file after each freezer write
* core/rawdb: fsync the data file in freezer after write
---------
Co-authored-by: rjl493456442
---
core/rawdb/ancient_utils.go | 3 ++
core/rawdb/freezer_batch.go | 12 +++++--
core/rawdb/freezer_resettable.go | 3 +-
core/rawdb/freezer_table.go | 28 +++++++++++++---
core/rawdb/freezer_utils.go | 6 +---
trie/triedb/pathdb/database_test.go | 51 +++++++++++++++++++++++-----
trie/triedb/pathdb/disklayer.go | 52 +++++++++++++++++++++++------
trie/triedb/pathdb/history.go | 48 +++++++++++++++-----------
trie/triedb/pathdb/history_test.go | 44 ++++++++++++++++++++++++
9 files changed, 194 insertions(+), 53 deletions(-)
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
index 2a88ae5c18..fd76e5348a 100644
--- a/core/rawdb/ancient_utils.go
+++ b/core/rawdb/ancient_utils.go
@@ -18,6 +18,7 @@ package rawdb
import (
"fmt"
+ "path/filepath"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
@@ -124,6 +125,8 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
switch freezerName {
case chainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
+ case stateFreezerName:
+ path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
diff --git a/core/rawdb/freezer_batch.go b/core/rawdb/freezer_batch.go
index e143dba2d4..aa4a439e1d 100644
--- a/core/rawdb/freezer_batch.go
+++ b/core/rawdb/freezer_batch.go
@@ -183,19 +183,27 @@ func (batch *freezerTableBatch) maybeCommit() error {
// commit writes the batched items to the backing freezerTable.
func (batch *freezerTableBatch) commit() error {
- // Write data.
+ // Write data. The head file is fsync'd after write to ensure the
+ // data is truly transferred to disk.
_, err := batch.t.head.Write(batch.dataBuffer)
if err != nil {
return err
}
+ if err := batch.t.head.Sync(); err != nil {
+ return err
+ }
dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0]
- // Write index.
+ // Write indices. The index file is fsync'd after write to ensure the
+ // data indexes are truly transferred to disk.
_, err = batch.t.index.Write(batch.indexBuffer)
if err != nil {
return err
}
+ if err := batch.t.index.Sync(); err != nil {
+ return err
+ }
indexSize := int64(len(batch.indexBuffer))
batch.indexBuffer = batch.indexBuffer[:0]
diff --git a/core/rawdb/freezer_resettable.go b/core/rawdb/freezer_resettable.go
index 45d32b0f88..184e908187 100644
--- a/core/rawdb/freezer_resettable.go
+++ b/core/rawdb/freezer_resettable.go
@@ -225,8 +225,7 @@ func cleanup(pathToDelete string) error {
for _, name := range names {
if name == filepath.Base(pathToDelete)+tmpSuffix {
- // Figure out then delete the tmp directory which is renamed in Reset Method.
- log.Info("Cleaning up the freezer Reset directory", "pathToDelete", pathToDelete, "total files inside", len(names))
+ log.Info("Removed leftover freezer directory", "name", name)
return os.RemoveAll(filepath.Join(parentDir, name))
}
}
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index fd26882d5e..1856873211 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -201,7 +201,9 @@ func (t *freezerTable) repair() error {
}
// Ensure the index is a multiple of indexEntrySize bytes
if overflow := stat.Size() % indexEntrySize; overflow != 0 {
- truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
+ if err := truncateFreezerFile(t.index, stat.Size()-overflow); err != nil {
+ return err
+ } // New file can't trigger this path
}
// Retrieve the file sizes and prepare for truncation
if stat, err = t.index.Stat(); err != nil {
@@ -238,6 +240,12 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
}
+ // Print an error log if the index is corrupted due to an incorrect
+ // last index item. While it is theoretically possible to have a zero offset
+ // by storing all zero-size items, it is highly unlikely to occur in practice.
+ if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
+ log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
+ }
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
if err != nil {
return err
@@ -372,6 +380,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
return err
}
+ if err := t.index.Sync(); err != nil {
+ return err
+ }
var expected indexEntry
if length == 0 {
expected = indexEntry{filenum: t.tailId, offset: 0}
@@ -394,6 +405,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
// Release any files _after the current head -- both the previous head
// and any files which may have been opened for reading
t.releaseFilesAfter(expected.filenum, true)
+
// Set back the historic head
t.head = newHead
t.headId = expected.filenum
@@ -401,6 +413,9 @@ func (t *freezerTable) truncateHead(items uint64) error {
if err := truncateFreezerFile(t.head, int64(expected.offset)); err != nil {
return err
}
+ if err := t.head.Sync(); err != nil {
+ return err
+ }
// All data files truncated, set internal counters and return
t.headBytes = int64(expected.offset)
t.items.Store(items)
@@ -491,6 +506,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
if err := t.meta.Sync(); err != nil {
return err
}
+ // Close the index file before shorten it.
+ if err := t.index.Close(); err != nil {
+ return err
+ }
// Truncate the deleted index entries from the index file. It overwrites the entries in current index file.
err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
tailIndex := indexEntry{
@@ -504,13 +523,14 @@ func (t *freezerTable) truncateTail(items uint64) error {
return err
}
// Reopen the modified index file to load the changes
- if err := t.index.Close(); err != nil {
- return err
- }
t.index, err = openFreezerFileForAppend(t.index.Name())
if err != nil {
return err
}
+ // Sync the file to ensure changes are flushed to disk
+ if err := t.index.Sync(); err != nil {
+ return err
+ }
// Release/Delete any files before the current tail
t.tailId = newTailId
t.itemOffset.Store(newDeleted)
diff --git a/core/rawdb/freezer_utils.go b/core/rawdb/freezer_utils.go
index 6a18a5a016..d0b103fa84 100644
--- a/core/rawdb/freezer_utils.go
+++ b/core/rawdb/freezer_utils.go
@@ -79,11 +79,7 @@ func copyFrom(srcPath, destPath string, offset uint64, beforeCopyFunc func(f *os
return err
}
f = nil
-
- if err := os.Rename(fname, destPath); err != nil {
- return err
- }
- return nil
+ return os.Rename(fname, destPath)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end, if it's not exist, create it.
diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go
index 1eaaef6911..d4ef368c71 100644
--- a/trie/triedb/pathdb/database_test.go
+++ b/trie/triedb/pathdb/database_test.go
@@ -97,11 +97,15 @@ type tester struct {
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
}
-func newTester(t *testing.T) *tester {
+func newTester(t *testing.T, historyLimit uint64) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
- db = New(disk, &Config{CleanCacheSize: 256 * 1024, DirtyCacheSize: 256 * 1024})
- obj = &tester{
+ db = New(disk, &Config{
+ StateHistory: historyLimit,
+ CleanCacheSize: 256 * 1024,
+ DirtyCacheSize: 256 * 1024,
+ })
+ obj = &tester{
db: db,
preimages: make(map[common.Hash]common.Address),
accounts: make(map[common.Hash][]byte),
@@ -377,7 +381,7 @@ func (t *tester) bottomIndex() int {
func TestDatabaseRollback(t *testing.T) {
// Verify state histories
- tester := newTester(t)
+ tester := newTester(t, 0)
defer tester.release()
if err := tester.verifyHistory(); err != nil {
@@ -403,7 +407,7 @@ func TestDatabaseRollback(t *testing.T) {
func TestDatabaseRecoverable(t *testing.T) {
var (
- tester = newTester(t)
+ tester = newTester(t, 0)
index = tester.bottomIndex()
)
defer tester.release()
@@ -441,7 +445,7 @@ func TestDatabaseRecoverable(t *testing.T) {
}
func TestDisable(t *testing.T) {
- tester := newTester(t)
+ tester := newTester(t, 0)
defer tester.release()
_, stored := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)
@@ -477,7 +481,7 @@ func TestDisable(t *testing.T) {
}
func TestCommit(t *testing.T) {
- tester := newTester(t)
+ tester := newTester(t, 0)
defer tester.release()
if err := tester.db.Commit(tester.lastHash(), false); err != nil {
@@ -501,7 +505,7 @@ func TestCommit(t *testing.T) {
}
func TestJournal(t *testing.T) {
- tester := newTester(t)
+ tester := newTester(t, 0)
defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil {
@@ -525,7 +529,7 @@ func TestJournal(t *testing.T) {
}
func TestCorruptedJournal(t *testing.T) {
- tester := newTester(t)
+ tester := newTester(t, 0)
defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil {
@@ -554,6 +558,35 @@ func TestCorruptedJournal(t *testing.T) {
}
}
+// TestTailTruncateHistory function is designed to test a specific edge case where,
+// when history objects are removed from the end, it should trigger a state flush
+// if the ID of the new tail object is even higher than the persisted state ID.
+//
+// For example, let's say the ID of the persistent state is 10, and the current
+// history objects range from ID(5) to ID(15). As we accumulate six more objects,
+// the history will expand to cover ID(11) to ID(21). ID(11) then becomes the
+// oldest history object, and its ID is even higher than the stored state.
+//
+// In this scenario, it is mandatory to update the persistent state before
+// truncating the tail histories. This ensures that the ID of the persistent state
+// always falls within the range of [oldest-history-id, latest-history-id].
+func TestTailTruncateHistory(t *testing.T) {
+ tester := newTester(t, 10)
+ defer tester.release()
+
+ tester.db.Close()
+ tester.db = New(tester.db.diskdb, &Config{StateHistory: 10})
+
+ head, err := tester.db.freezer.Ancients()
+ if err != nil {
+ t.Fatalf("Failed to obtain freezer head")
+ }
+ stored := rawdb.ReadPersistentStateID(tester.db.diskdb)
+ if head != stored {
+ t.Fatalf("Failed to truncate excess history object above, stored: %d, head: %d", stored, head)
+ }
+}
+
// copyAccounts returns a deep-copied account set of the provided one.
func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte {
copied := make(map[common.Hash][]byte, len(set))
diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go
index 369f7a6cd5..ba9381c932 100644
--- a/trie/triedb/pathdb/disklayer.go
+++ b/trie/triedb/pathdb/disklayer.go
@@ -172,15 +172,30 @@ func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map
func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
dl.lock.Lock()
defer dl.lock.Unlock()
- // Construct and store the state history first. If crash happens
- // after storing the state history but without flushing the
- // corresponding states(journal), the stored state history will
- // be truncated in the next restart.
+
+ // Construct and store the state history first. If crash happens after storing
+ // the state history but without flushing the corresponding states(journal),
+ // the stored state history will be truncated from head in the next restart.
+ var (
+ overflow bool
+ oldest uint64
+ )
if dl.db.freezer != nil {
- err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateHistory)
+ err := writeHistory(dl.db.freezer, bottom)
if err != nil {
return nil, err
}
+ // Determine if the persisted history object has exceeded the configured
+ // limitation, set the overflow as true if so.
+ tail, err := dl.db.freezer.Tail()
+ if err != nil {
+ return nil, err
+ }
+ limit := dl.db.config.StateHistory
+ if limit != 0 && bottom.stateID()-tail > limit {
+ overflow = true
+ oldest = bottom.stateID() - limit + 1 // track the id of history **after truncation**
+ }
}
// Mark the diskLayer as stale before applying any mutations on top.
@@ -194,15 +209,30 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
}
rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
- // Construct a new disk layer by merging the nodes from the provided
- // diff layer, and flush the content in disk layer if there are too
- // many nodes cached. The clean cache is inherited from the original
- // disk layer for reusing.
+ // Construct a new disk layer by merging the nodes from the provided diff
+ // layer, and flush the content in disk layer if there are too many nodes
+ // cached. The clean cache is inherited from the original disk layer.
ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes))
- err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force)
- if err != nil {
+
+ // In a unique scenario where the ID of the oldest history object (after tail
+ // truncation) surpasses the persisted state ID, we take the necessary action
+ // of forcibly committing the cached dirty nodes to ensure that the persisted
+ // state ID remains higher.
+ if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest {
+ force = true
+ }
+ if err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force); err != nil {
return nil, err
}
+ // To remove outdated history objects from the end, we set the 'tail' parameter
+ // to 'oldest-1' due to the offset between the freezer index and the history ID.
+ if overflow {
+ pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.freezer, oldest-1)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("Pruned state history", "items", pruned, "tailid", oldest)
+ }
return ndl, nil
}
diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go
index 7d672d3254..a13068fa50 100644
--- a/trie/triedb/pathdb/history.go
+++ b/trie/triedb/pathdb/history.go
@@ -530,39 +530,29 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error)
return &dec, nil
}
-// writeHistory writes the state history with provided state set. After
-// storing the corresponding state history, it will also prune the stale
-// histories from the disk with the given threshold.
-func writeHistory(db ethdb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error {
+// writeHistory persists the state history with the provided state set.
+func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error {
// Short circuit if state set is not available.
if dl.states == nil {
return errors.New("state change set is not available")
}
var (
- err error
- n int
- start = time.Now()
- h = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states)
+ start = time.Now()
+ history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states)
)
// Return byte streams of account and storage infor in current state.
- accountData, storageData, accountIndex, storageIndex := h.encode()
+ accountData, storageData, accountIndex, storageIndex := history.encode()
dataSize := common.StorageSize(len(accountData) + len(storageData))
indexSize := common.StorageSize(len(accountIndex) + len(storageIndex))
// Write history data into five freezer table respectively.
- rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ rawdb.WriteStateHistory(freezer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData)
- // Prune stale state histories based on the config.
- if limit != 0 && dl.stateID() > limit {
- n, err = truncateFromTail(db, freezer, dl.stateID()-limit)
- if err != nil {
- return err
- }
- }
historyDataBytesMeter.Mark(int64(dataSize))
historyIndexBytesMeter.Mark(int64(indexSize))
historyBuildTimeMeter.UpdateSince(start)
- log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start)))
+ log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start)))
+
return nil
}
@@ -600,7 +590,16 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead
if err != nil {
return 0, err
}
- if ohead <= nhead {
+ otail, err := freezer.Tail()
+ if err != nil {
+ return 0, err
+ }
+ // Ensure that the truncation target falls within the specified range.
+ if ohead < nhead || nhead < otail {
+ return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, nhead)
+ }
+ // Short circuit if nothing to truncate.
+ if ohead == nhead {
return 0, nil
}
// Load the meta objects in range [nhead+1, ohead]
@@ -629,11 +628,20 @@ func truncateFromHead(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, nhead
// truncateFromTail removes the extra state histories from the tail with the given
// parameters. It returns the number of items removed from the tail.
func truncateFromTail(db ethdb.Batcher, freezer *rawdb.ResettableFreezer, ntail uint64) (int, error) {
+ ohead, err := freezer.Ancients()
+ if err != nil {
+ return 0, err
+ }
otail, err := freezer.Tail()
if err != nil {
return 0, err
}
- if otail >= ntail {
+ // Ensure that the truncation target falls within the specified range.
+ if otail > ntail || ntail > ohead {
+ return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, ntail)
+ }
+ // Short circuit if nothing to truncate.
+ if otail == ntail {
return 0, nil
}
// Load the meta objects in range [otail+1, ntail]
diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go
index 2216524b46..6828787efc 100644
--- a/trie/triedb/pathdb/history_test.go
+++ b/trie/triedb/pathdb/history_test.go
@@ -246,6 +246,50 @@ func TestTruncateTailHistories(t *testing.T) {
}
}
+func TestTruncateOutOfRange(t *testing.T) {
+ var (
+ hs = makeHistories(10)
+ db = rawdb.NewMemoryDatabase()
+ freezer, _ = openFreezer(t.TempDir(), false)
+ )
+ defer freezer.Close()
+
+ for i := 0; i < len(hs); i++ {
+ accountData, storageData, accountIndex, storageIndex := hs[i].encode()
+ rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
+ }
+ truncateFromTail(db, freezer, uint64(len(hs)/2))
+
+ // Ensure of-out-range truncations are rejected correctly.
+ head, _ := freezer.Ancients()
+ tail, _ := freezer.Tail()
+
+ cases := []struct {
+ mode int
+ target uint64
+ expErr error
+ }{
+ {0, head, nil}, // nothing to delete
+ {0, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
+ {0, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
+ {1, tail, nil}, // nothing to delete
+ {1, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
+ {1, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
+ }
+ for _, c := range cases {
+ var gotErr error
+ if c.mode == 0 {
+ _, gotErr = truncateFromHead(db, freezer, c.target)
+ } else {
+ _, gotErr = truncateFromTail(db, freezer, c.target)
+ }
+ if !reflect.DeepEqual(gotErr, c.expErr) {
+ t.Errorf("Unexpected error, want: %v, got: %v", c.expErr, gotErr)
+ }
+ }
+}
+
// openFreezer initializes the freezer instance for storing state histories.
func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) {
return rawdb.NewStateFreezer(datadir, readOnly)
From 6c0bfbf43650ea29de231065d1074152cf815c44 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Wed, 30 Oct 2024 13:22:34 +0700
Subject: [PATCH 33/41] cmd/ronin/chaincmd: open ancient freezer when init
genesis (#620)
---
cmd/ronin/chaincmd.go | 3 ++-
docker/chainnode/entrypoint.sh | 1 +
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/cmd/ronin/chaincmd.go b/cmd/ronin/chaincmd.go
index 07475ef219..128b9b3c9f 100644
--- a/cmd/ronin/chaincmd.go
+++ b/cmd/ronin/chaincmd.go
@@ -53,6 +53,7 @@ var (
utils.ForceOverrideChainConfigFlag,
utils.CachePreimagesFlag,
utils.StateSchemeFlag,
+ utils.AncientFlag,
},
Category: "BLOCKCHAIN COMMANDS",
Description: `
@@ -226,7 +227,7 @@ func initGenesis(ctx *cli.Context) error {
defer stack.Close()
for _, name := range []string{"chaindata", "lightchaindata"} {
- chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
+ chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
diff --git a/docker/chainnode/entrypoint.sh b/docker/chainnode/entrypoint.sh
index 095cfdc37f..90ccf2ceaa 100755
--- a/docker/chainnode/entrypoint.sh
+++ b/docker/chainnode/entrypoint.sh
@@ -92,6 +92,7 @@ elif [[ "$FORCE_INIT" = "true" && "$INIT_FORCE_OVERRIDE_CHAIN_CONFIG" = "true" ]
elif [ "$FORCE_INIT" = "true" ]; then
echo "Forcing update chain config with $genesisPath, state_scheme $state_scheme ..."
ronin init $dbEngine --datadir $datadir --state.scheme $state_scheme $genesisPath
+fi
# password file
if [[ ! -f $PASSWORD_FILE ]]; then
From d1e7874b63c6a436673a2ceab112d541690d6666 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Thu, 31 Oct 2024 10:23:11 +0700
Subject: [PATCH 34/41] [docker] remove duplicate param in entrypoint.sh
---
docker/chainnode/entrypoint.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/chainnode/entrypoint.sh b/docker/chainnode/entrypoint.sh
index 90ccf2ceaa..85f9469451 100755
--- a/docker/chainnode/entrypoint.sh
+++ b/docker/chainnode/entrypoint.sh
@@ -85,7 +85,7 @@ fi
# data dir
if [[ ! -d $datadir/ronin ]]; then
echo "No blockchain data, creating genesis block with $genesisPath, state_scheme $state_scheme ..."
- ronin init $dbEngine --datadir $datadir --state.scheme $state_scheme $genesisPath $genesisPath
+ ronin init $dbEngine --datadir $datadir --state.scheme $state_scheme $genesisPath
elif [[ "$FORCE_INIT" = "true" && "$INIT_FORCE_OVERRIDE_CHAIN_CONFIG" = "true" ]]; then
echo "Forcing update chain config with force overriding chain config with $genesisPath, state_scheme $state_scheme ..."
ronin init $dbEngine --overrideChainConfig --datadir $datadir --state.scheme $state_scheme $genesisPath
From 4700b40a0efebb115a61fbea21e5b3e444d1ee17 Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Fri, 1 Nov 2024 17:19:26 +0700
Subject: [PATCH 35/41] consensus: get Validators from genesis instead of
triedb in v1 consortium. (#624)
In snap sync, we will disable accessing/mark stale to triedb when enabling path scheme for protecting the persistent storing, so the data of validators only used for checking in some first blocks which we can return hardcore list from genesis data for following the flow of snap-sync from go-eth team.
---
consensus/consortium/v1/consortium.go | 30 ++++++++++++++++++++++++---
1 file changed, 27 insertions(+), 3 deletions(-)
diff --git a/consensus/consortium/v1/consortium.go b/consensus/consortium/v1/consortium.go
index dee4b9c326..5114586a0c 100644
--- a/consensus/consortium/v1/consortium.go
+++ b/consensus/consortium/v1/consortium.go
@@ -338,11 +338,10 @@ func (c *Consortium) snapshot(chain consensus.ChainHeaderReader, number uint64,
if cpHeader != nil {
hash := cpHeader.Hash()
- validators, err := c.getValidatorsFromContract(chain, number)
+ validators, err := c.getValidatorsFromGenesis()
if err != nil {
return nil, err
}
-
snap = newSnapshot(c.config, c.signatures, number, hash, validators)
if err := snap.store(c.db); err != nil {
return nil, err
@@ -756,6 +755,31 @@ func (c *Consortium) doCalcDifficulty(signer common.Address, number uint64, vali
return new(big.Int).Set(diffNoTurn)
}
+// getValidatorsFromGenesis gets the list of validators from the genesis block support backward compatibility in v1, only used with Snap Sync.
+func (c *Consortium) getValidatorsFromGenesis() ([]common.Address, error) {
+ var validatorSet []string
+ switch {
+ case c.chainConfig.ChainID.Cmp(big.NewInt(2020)) == 0:
+ validatorSet = []string{
+ "0x000000000000000000000000f224beff587362a88d859e899d0d80c080e1e812",
+ "0x00000000000000000000000011360eacdedd59bc433afad4fc8f0417d1fbebab",
+ "0x00000000000000000000000070bb1fb41c8c42f6ddd53a708e2b82209495e455",
+ }
+ case c.chainConfig.ChainID.Cmp(big.NewInt(2021)) == 0:
+ validatorSet = []string{
+ "0x0000000000000000000000004a4bc674a97737376cfe990ae2fe0d2b6e738393",
+ "0x000000000000000000000000b6bc5bc0410773a3f86b1537ce7495c52e38f88b",
+ }
+ default:
+ return nil, errors.New("no validator set for this chain only support Mainnet & Testnet")
+ }
+ var addresses []common.Address
+ for _, str := range validatorSet {
+ addresses = append(addresses, common.HexToAddress(str))
+ }
+ return addresses, nil
+}
+
// Read the validator list from contract
func (c *Consortium) getValidatorsFromContract(chain consensus.ChainHeaderReader, number uint64) ([]common.Address, error) {
if chain.Config().IsFenix(big.NewInt(int64(number))) {
@@ -779,7 +803,7 @@ func (c *Consortium) getValidatorsFromLastCheckpoint(chain consensus.ChainHeader
if lastCheckpoint == 0 {
// TODO(andy): Review if we should put validators in genesis block's extra data
- return c.getValidatorsFromContract(chain, number)
+ return c.getValidatorsFromGenesis()
}
var header *types.Header
From 6e71b46f99abecbf43687ceca4b5a4fbf003e93b Mon Sep 17 00:00:00 2001
From: Harry Ngo <17699212+huyngopt1994@users.noreply.github.com>
Date: Thu, 7 Nov 2024 11:26:33 +0700
Subject: [PATCH 36/41] cmd,rawdb: avoid extend Tail method in chainfreezer
which make db inspect failed in chain freezer (#627)
---
core/rawdb/ancient_utils.go | 5 +++++
core/rawdb/chain_freezer.go | 5 -----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
index fd76e5348a..2dd37d5b27 100644
--- a/core/rawdb/ancient_utils.go
+++ b/core/rawdb/ancient_utils.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
)
type tableSize struct {
@@ -89,6 +90,10 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
infos = append(infos, info)
case stateFreezerName:
+ if ReadStateScheme(db) != PathScheme {
+ log.Info("Skip inspecting state freezer", "reason", "state freezer is supported for PathScheme only")
+ continue
+ }
datadir, err := db.AncientDatadir()
if err != nil {
return nil, err
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
index 632f7a960e..d6e3ac5646 100644
--- a/core/rawdb/chain_freezer.go
+++ b/core/rawdb/chain_freezer.go
@@ -73,11 +73,6 @@ func (f *chainFreezer) Close() error {
return err
}
-// Tail returns an error as we don't have a backing chain freezer.
-func (f *chainFreezer) Tail() (uint64, error) {
- return 0, errNotSupported
-}
-
// freeze is a background thread that periodically checks the blockchain for any
// import progress and moves ancient data from the fast database into the freezer.
//
From 63c0e3777eabd32eda6b4cce0b2f0a6c4d7b294c Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Fri, 8 Nov 2024 13:10:45 +0700
Subject: [PATCH 37/41] trie: pbss fix release v1.13.5 continue (#621)
* trie: refactor stacktrie (#28233)
This change refactors stacktrie to separate the stacktrie itself from the
internal representation of nodes: a stacktrie is not a recursive structure
of stacktries, rather, a framework for representing and operating upon a set of nodes.
---------
Co-authored-by: Gary Rong
* trie: remove owner and binary marshaling from stacktrie (#28291)
This change
- Removes the owner-notion from a stacktrie; the owner is only ever needed for comitting to the database, but the commit-function, the `writeFn` is provided by the caller, so the caller can just set the owner into the `writeFn` instead of having it passed through the stacktrie.
- Removes the `encoding.BinaryMarshaler`/`encoding.BinaryUnmarshaler` interface from stacktrie. We're not using it, and it is doubtful whether anyone downstream is either.
* core, trie, eth: refactor stacktrie constructor
This change enhances the stacktrie constructor by introducing an option struct. It also simplifies the `Hash` and `Commit` operations, getting rid of the special handling round root node.
* core, eth, trie: filter out boundary nodes and remove dangling nodes in stacktrie (#28327)
* core, eth, trie: filter out boundary nodes in stacktrie
* eth/protocol/snap: add comments
* Update trie/stacktrie.go
Co-authored-by: Martin Holst Swende
* eth, trie: remove onBoundary callback
* eth/protocols/snap: keep complete boundary nodes
* eth/protocols/snap: skip healing if the storage trie is already complete
* eth, trie: add more metrics
* eth, trie: address comment
---------
Co-authored-by: Martin Holst Swende
---------
Co-authored-by: Martin Holst Swende
Co-authored-by: Gary Rong
---
common/types.go | 3 +
core/state/snapshot/conversion.go | 16 +-
eth/protocols/snap/metrics.go | 28 ++
eth/protocols/snap/sync.go | 156 +++++++--
tests/fuzzers/stacktrie/trie_fuzzer.go | 19 +-
trie/iterator_test.go | 4 +
trie/proof.go | 2 +-
trie/stacktrie.go | 455 +++++++++++--------------
trie/stacktrie_test.go | 113 ++++--
trie/sync.go | 26 ++
trie/trie_test.go | 28 +-
11 files changed, 495 insertions(+), 355 deletions(-)
diff --git a/common/types.go b/common/types.go
index 0f53406980..7779e800a7 100644
--- a/common/types.go
+++ b/common/types.go
@@ -49,6 +49,9 @@ const (
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
+
+ // MaxHash represents the maximum possible hash value.
+ MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index b567579525..03f6466f49 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -363,22 +363,16 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
- var nodeWriter trie.NodeWriteFunc
+ options := trie.NewStackTrieOptions()
// Implement nodeWriter in case db is existed otherwise let it be nil.
if db != nil {
- nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
- }
+ })
}
- t := trie.NewStackTrieWithOwner(nodeWriter, owner)
+ t := trie.NewStackTrie(options)
for leaf := range in {
t.TryUpdate(leaf.key[:], leaf.value)
}
- var root common.Hash
- if db == nil {
- root = t.Hash()
- } else {
- root, _ = t.Commit()
- }
- out <- root
+ out <- t.Commit()
}
diff --git a/eth/protocols/snap/metrics.go b/eth/protocols/snap/metrics.go
index a8ea143b54..ffc9a6a5f3 100644
--- a/eth/protocols/snap/metrics.go
+++ b/eth/protocols/snap/metrics.go
@@ -8,4 +8,32 @@ var (
IngressRegistrationErrorMeter = metrics.NewRegisteredMeter(ingressRegistrationErrorName, nil)
EgressRegistrationErrorMeter = metrics.NewRegisteredMeter(egressRegistrationErrorName, nil)
+
+ // deletionGauge is the metric to track how many trie node deletions
+ // are performed in total during the sync process.
+ deletionGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/delete", nil)
+
+ // lookupGauge is the metric to track how many trie node lookups are
+ // performed to determine if node needs to be deleted.
+ lookupGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/lookup", nil)
+
+ // boundaryAccountNodesGauge is the metric to track how many boundary trie
+ // nodes in account trie are met.
+ boundaryAccountNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/account", nil)
+
+ // boundaryAccountNodesGauge is the metric to track how many boundary trie
+ // nodes in storage tries are met.
+ boundaryStorageNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/storage", nil)
+
+ // smallStorageGauge is the metric to track how many storages are small enough
+ // to retrieved in one or two request.
+ smallStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/small", nil)
+
+ // largeStorageGauge is the metric to track how many storages are large enough
+ // to retrieved concurrently.
+ largeStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/large", nil)
+
+ // skipStorageHealingGauge is the metric to track how many storages are retrieved
+ // in multiple requests but healing is not necessary.
+ skipStorageHealingGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/noheal", nil)
)
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 538d6f0b5a..e1d9406f21 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -699,6 +699,19 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
}
}
+// cleanPath is used to remove the dangling nodes in the stackTrie.
+func (s *Syncer) cleanPath(batch ethdb.Batch, owner common.Hash, path []byte) {
+ if owner == (common.Hash{}) && rawdb.ExistsAccountTrieNode(s.db, path) {
+ rawdb.DeleteAccountTrieNode(batch, path)
+ deletionGauge.Inc(1)
+ }
+ if owner != (common.Hash{}) && rawdb.ExistsStorageTrieNode(s.db, owner, path) {
+ rawdb.DeleteStorageTrieNode(batch, owner, path)
+ deletionGauge.Inc(1)
+ }
+ lookupGauge.Inc(1)
+}
+
// loadSyncStatus retrieves a previously aborted sync status from the database,
// or generates a fresh one if none is available.
func (s *Syncer) loadSyncStatus() {
@@ -721,9 +734,22 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
- task.genTrie = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(task.genBatch, owner, path, hash, val, s.scheme)
+ options := trie.NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, blob, s.scheme)
})
+ if s.scheme == rawdb.PathScheme {
+ // Configure the dangling node cleaner and also filter out boundary nodes
+ // only in the context of the path scheme. Deletion is forbidden in the
+ // hash scheme, as it can disrupt state completeness.
+ options = options.WithCleaner(func(path []byte) {
+ s.cleanPath(task.genBatch, common.Hash{}, path)
+ })
+ // Skip the left boundary if it's not the first range.
+ // Skip the right boundary if it's not the last range.
+ options = options.WithSkipBoundary(task.Next != (common.Hash{}), task.Last != common.MaxHash, boundaryAccountNodesGauge)
+ }
+ task.genTrie = trie.NewStackTrie(options)
for accountHash, subtasks := range task.SubTasks {
for _, subtask := range subtasks {
@@ -735,9 +761,24 @@ func (s *Syncer) loadSyncStatus() {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
- subtask.genTrie = trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, val, s.scheme)
- }, accountHash)
+ owner := accountHash // local assignment for stacktrie writer closure
+ options := trie.NewStackTrieOptions()
+
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(subtask.genBatch, owner, path, hash, blob, s.scheme)
+ })
+ if s.scheme == rawdb.PathScheme {
+ // Configure the dangling node cleaner and also filter out boundary nodes
+ // only in the context of the path scheme. Deletion is forbidden in the
+ // hash scheme, as it can disrupt state completeness.
+ options = options.WithCleaner(func(path []byte) {
+ s.cleanPath(subtask.genBatch, owner, path)
+ })
+ // Skip the left boundary if it's not the first range.
+ // Skip the right boundary if it's not the last range.
+ options = options.WithSkipBoundary(subtask.Next != common.Hash{}, subtask.Last != common.MaxHash, boundaryStorageNodesGauge)
+ }
+ subtask.genTrie = trie.NewStackTrie(options)
}
}
}
@@ -786,14 +827,27 @@ func (s *Syncer) loadSyncStatus() {
s.accountBytes += common.StorageSize(len(key) + len(value))
},
}
+ options := trie.NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, blob, s.scheme)
+ })
+ if s.scheme == rawdb.PathScheme {
+ // Configure the dangling node cleaner and also filter out boundary nodes
+ // only in the context of the path scheme. Deletion is forbidden in the
+ // hash scheme, as it can disrupt state completeness.
+ options = options.WithCleaner(func(path []byte) {
+ s.cleanPath(batch, common.Hash{}, path)
+ })
+ // Skip the left boundary if it's not the first range.
+ // Skip the right boundary if it's not the last range.
+ options = options.WithSkipBoundary(next != common.Hash{}, last != common.MaxHash, boundaryAccountNodesGauge)
+ }
s.tasks = append(s.tasks, &accountTask{
Next: next,
Last: last,
SubTasks: make(map[common.Hash][]*storageTask),
genBatch: batch,
- genTrie: trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }),
+ genTrie: trie.NewStackTrie(options),
})
log.Debug("Created account sync task", "from", next, "last", last)
next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
@@ -1930,6 +1984,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {
res.mainTask.needState[j] = false
res.mainTask.pend--
+ smallStorageGauge.Inc(1)
}
// If the last contract was chunked, mark it as needing healing
// to avoid writing it out to disk prematurely.
@@ -1965,7 +2020,11 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
log.Debug("Chunked large contract", "initiators", len(keys), "tail", lastKey, "chunks", chunks)
}
r := newHashRange(lastKey, chunks)
-
+ if chunks == 1 {
+ smallStorageGauge.Inc(1)
+ } else {
+ largeStorageGauge.Inc(1)
+ }
// Our first task is the one that was just filled by this response.
batch := ethdb.HookedBatch{
Batch: s.db.NewBatch(),
@@ -1973,14 +2032,25 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
+ owner := account // local assignment for stacktrie writer closure
+ options := trie.NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, owner, path, hash, blob, s.scheme)
+ })
+ if s.scheme == rawdb.PathScheme {
+ options = options.WithCleaner(func(path []byte) {
+ s.cleanPath(batch, owner, path)
+ })
+ // Keep the left boundary as it's the first range.
+ // Skip the right boundary if it's not the last range.
+ options = options.WithSkipBoundary(false, r.End() != common.MaxHash, boundaryStorageNodesGauge)
+ }
tasks = append(tasks, &storageTask{
Next: common.Hash{},
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account),
+ genTrie: trie.NewStackTrie(options),
})
for r.Next() {
batch := ethdb.HookedBatch{
@@ -1989,14 +2059,27 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
s.storageBytes += common.StorageSize(len(key) + len(value))
},
}
+ options := trie.NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, owner, path, hash, blob, s.scheme)
+ })
+ if s.scheme == rawdb.PathScheme {
+ // Configure the dangling node cleaner and also filter out boundary nodes
+ // only in the context of the path scheme. Deletion is forbidden in the
+ // hash scheme, as it can disrupt state completeness.
+ options = options.WithCleaner(func(path []byte) {
+ s.cleanPath(batch, owner, path)
+ })
+ // Skip the left boundary as it's not the first range
+ // Skip the right boundary if it's not the last range.
+ options = options.WithSkipBoundary(true, r.End() != common.MaxHash, boundaryStorageNodesGauge)
+ }
tasks = append(tasks, &storageTask{
Next: r.Start(),
Last: r.End(),
root: acc.Root,
genBatch: batch,
- genTrie: trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account),
+ genTrie: trie.NewStackTrie(options),
})
}
for _, task := range tasks {
@@ -2041,9 +2124,23 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
slots += len(res.hashes[i])
if i < len(res.hashes)-1 || res.subTask == nil {
- tr := trie.NewStackTrieWithOwner(func(owner common.Hash, path []byte, hash common.Hash, val []byte) {
- rawdb.WriteTrieNode(batch, owner, path, hash, val, s.scheme)
- }, account)
+ // no need to make local reassignment of account: this closure does not outlive the loop
+ options := trie.NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, account, path, hash, blob, s.scheme)
+ })
+ if s.scheme == rawdb.PathScheme {
+ // Configure the dangling node cleaner only in the context of the
+ // path scheme. Deletion is forbidden in the hash scheme, as it can
+ // disrupt state completeness.
+ //
+ // Notably, boundary nodes can be also kept because the whole storage
+ // trie is complete.
+ options = options.WithCleaner(func(path []byte) {
+ s.cleanPath(batch, account, path)
+ })
+ }
+ tr := trie.NewStackTrie(options)
for j := 0; j < len(res.hashes[i]); j++ {
tr.Update(res.hashes[i][j][:], res.slots[i][j])
}
@@ -2065,18 +2162,25 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
// Large contracts could have generated new trie nodes, flush them to disk
if res.subTask != nil {
if res.subTask.done {
- if root, err := res.subTask.genTrie.Commit(); err != nil {
- log.Error("Failed to commit stack slots", "err", err)
- } else if root == res.subTask.root {
- // If the chunk's root is an overflown but full delivery, clear the heal request
+ root := res.subTask.genTrie.Commit()
+ if err := res.subTask.genBatch.Write(); err != nil {
+ log.Error("Failed to persist stack slots", "err", err)
+ }
+ res.subTask.genBatch.Reset()
+
+ // If the chunk's root is an overflown but full delivery,
+ // clear the heal request.
+ accountHash := res.accounts[len(res.accounts)-1]
+ if root == res.subTask.root && rawdb.HasStorageTrieNode(s.db, accountHash, nil, root) {
for i, account := range res.mainTask.res.hashes {
- if account == res.accounts[len(res.accounts)-1] {
+ if account == accountHash {
res.mainTask.needHeal[i] = false
+ skipStorageHealingGauge.Inc(1)
}
}
}
}
- if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || res.subTask.done {
+ if res.subTask.genBatch.ValueSize() > ethdb.IdealBatchSize {
if err := res.subTask.genBatch.Write(); err != nil {
log.Error("Failed to persist stack slots", "err", err)
}
@@ -2283,9 +2387,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
// flush after finalizing task.done. It's fine even if we crash and lose this
// write as it will only cause more data to be downloaded during heal.
if task.done {
- if _, err := task.genTrie.Commit(); err != nil {
- log.Error("Failed to commit stack account", "err", err)
- }
+ task.genTrie.Commit()
}
if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
if err := task.genBatch.Write(); err != nil {
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index 9ade6f2b2f..0e291d7b9d 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -153,9 +153,10 @@ func (f *fuzzer) fuzz() int {
trieA = trie.NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil)
- trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme())
+ options = trie.NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
})
+ trieB = trie.NewStackTrie(options)
vals kvs
useful bool
maxElements = 10000
@@ -203,9 +204,7 @@ func (f *fuzzer) fuzz() int {
trieB.Update(kv.k, kv.v)
}
rootB := trieB.Hash()
- if _, err := trieB.Commit(); err != nil {
- panic(err)
- }
+ trieB.Commit()
if rootA != rootB {
panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootB))
}
@@ -217,22 +216,20 @@ func (f *fuzzer) fuzz() int {
// Ensure all the nodes are persisted correctly
// Need tracked deleted nodes.
var (
- nodeset = make(map[string][]byte) // path -> blob
- trieC = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ nodeset = make(map[string][]byte) // path -> blob
+ optionsC = trie.NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
if crypto.Keccak256Hash(blob) != hash {
panic("invalid node blob")
}
- if owner != (common.Hash{}) {
- panic("invalid node owner")
- }
nodeset[string(path)] = common.CopyBytes(blob)
})
+ trieC = trie.NewStackTrie(optionsC)
checked int
)
for _, kv := range vals {
trieC.Update(kv.k, kv.v)
}
- rootC, _ := trieC.Commit()
+ rootC := trieC.Commit()
if rootA != rootC {
panic(fmt.Sprintf("roots differ: (trie) %x != %x (stacktrie)", rootA, rootC))
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 240aa25284..f173e3d45b 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -86,6 +86,10 @@ type kv struct {
t bool
}
+func (k *kv) cmp(other *kv) int {
+ return bytes.Compare(k.k, other.k)
+}
+
func TestIteratorLargeData(t *testing.T) {
trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
diff --git a/trie/proof.go b/trie/proof.go
index 52673e19b3..437cf3d00b 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -502,7 +502,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key
if proof == nil {
tr := NewStackTrie(nil)
for index, key := range keys {
- tr.TryUpdate(key, values[index])
+ tr.Update(key, values[index])
}
if have, want := tr.Hash(), rootHash; have != want {
return false, fmt.Errorf("invalid proof, want hash %x, got %x", want, have)
diff --git a/trie/stacktrie.go b/trie/stacktrie.go
index 9fcc0831ee..8a18a6f86f 100644
--- a/trie/stacktrie.go
+++ b/trie/stacktrie.go
@@ -17,179 +17,145 @@
package trie
import (
- "bufio"
"bytes"
- "encoding/gob"
- "errors"
"fmt"
- "io"
"sync"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
)
-var ErrCommitDisabled = errors.New("no database for committing")
+var (
+ stPool = sync.Pool{New: func() any { return new(stNode) }}
+ _ = types.TrieHasher((*StackTrie)(nil)) // Ensure StackTrie implements the TrieHasher interface
+)
+
+// StackTrieOptions contains the configured options for manipulating the stackTrie.
+type StackTrieOptions struct {
+ Writer func(path []byte, hash common.Hash, blob []byte) // The function to commit the dirty nodes
+ Cleaner func(path []byte) // The function to clean up dangling nodes
-var stPool = sync.Pool{
- New: func() interface{} {
- return NewStackTrie(nil)
- },
+ SkipLeftBoundary bool // Flag whether the nodes on the left boundary are skipped for committing
+ SkipRightBoundary bool // Flag whether the nodes on the right boundary are skipped for committing
+ boundaryGauge metrics.Gauge // Gauge to track how many boundary nodes are met
}
-// NodeWriteFunc is used to provide all information of a dirty node for committing
-// so that callers can flush nodes into database with desired scheme.
-type NodeWriteFunc = func(owner common.Hash, path []byte, hash common.Hash, blob []byte)
+// NewStackTrieOptions initializes an empty options for stackTrie.
+func NewStackTrieOptions() *StackTrieOptions { return &StackTrieOptions{} }
-func stackTrieFromPool(writenFn NodeWriteFunc, owner common.Hash) *StackTrie {
- st := stPool.Get().(*StackTrie)
- st.owner = owner
- st.writeFn = writenFn
- return st
+// WithWriter configures trie node writer within the options.
+func (o *StackTrieOptions) WithWriter(writer func(path []byte, hash common.Hash, blob []byte)) *StackTrieOptions {
+ o.Writer = writer
+ return o
+}
+
+// WithCleaner configures the cleaner in the option for removing dangling nodes.
+func (o *StackTrieOptions) WithCleaner(cleaner func(path []byte)) *StackTrieOptions {
+ o.Cleaner = cleaner
+ return o
}
-func returnToPool(st *StackTrie) {
- st.Reset()
- stPool.Put(st)
+// WithSkipBoundary configures whether the left and right boundary nodes are
+// filtered for committing, along with a gauge metrics to track how many
+// boundary nodes are met.
+func (o *StackTrieOptions) WithSkipBoundary(skipLeft, skipRight bool, gauge metrics.Gauge) *StackTrieOptions {
+ o.SkipLeftBoundary = skipLeft
+ o.SkipRightBoundary = skipRight
+ o.boundaryGauge = gauge
+ return o
}
// StackTrie is a trie implementation that expects keys to be inserted
// in order. Once it determines that a subtree will no longer be inserted
// into, it will hash it and free up the memory it uses.
type StackTrie struct {
- owner common.Hash // the owner of the trie
- nodeType uint8 // node type (as in branch, ext, leaf)
- val []byte // value contained by this node if it's a leaf
- key []byte // key chunk covered by this (full|ext) node
- children [16]*StackTrie // list of children (for fullnodes and exts)
- writeFn NodeWriteFunc // function for commiting nodes, can be nil
+ options *StackTrieOptions
+ root *stNode
+ h *hasher
+
+ first []byte // The (hex-encoded without terminator) key of first inserted entry, tracked as left boundary.
+ last []byte // The (hex-encoded without terminator) key of last inserted entry, tracked as right boundary.
}
// NewStackTrie allocates and initializes an empty trie.
-func NewStackTrie(writeFn NodeWriteFunc) *StackTrie {
- return &StackTrie{
- nodeType: emptyNode,
- writeFn: writeFn, // function for committing nodes, can be nil
+func NewStackTrie(options *StackTrieOptions) *StackTrie {
+ if options == nil {
+ options = NewStackTrieOptions()
}
-}
-
-// NewStackTrieWithOwner allocates and initializes an empty trie, but with
-// the additional owner field.
-func NewStackTrieWithOwner(writeFn NodeWriteFunc, owner common.Hash) *StackTrie {
return &StackTrie{
- owner: owner,
- nodeType: emptyNode,
- writeFn: writeFn, // function for committing nodes, can be nil
+ options: options,
+ root: stPool.Get().(*stNode),
+ h: newHasher(false),
}
}
-// NewFromBinary initialises a serialized stacktrie with the given db.
-func NewFromBinary(data []byte, writeFn NodeWriteFunc) (*StackTrie, error) {
- var st StackTrie
- if err := st.UnmarshalBinary(data); err != nil {
- return nil, err
- }
- // If a database is used, we need to recursively add it to every child
- if writeFn != nil {
- st.setWriteFunc(writeFn)
+func (t *StackTrie) Update(key, value []byte) {
+ if err := t.TryUpdate(key, value); err != nil {
+ log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
}
- return &st, nil
}
-// MarshalBinary implements encoding.BinaryMarshaler
-func (st *StackTrie) MarshalBinary() (data []byte, err error) {
- var (
- b bytes.Buffer
- w = bufio.NewWriter(&b)
- )
- if err := gob.NewEncoder(w).Encode(struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
- }{
- st.owner,
- st.nodeType,
- st.val,
- st.key,
- }); err != nil {
- return nil, err
- }
- for _, child := range st.children {
- if child == nil {
- w.WriteByte(0)
- continue
- }
- w.WriteByte(1)
- if childData, err := child.MarshalBinary(); err != nil {
- return nil, err
- } else {
- w.Write(childData)
- }
+// Update inserts a (key, value) pair into the stack trie.
+func (t *StackTrie) TryUpdate(key, value []byte) error {
+ k := keybytesToHex(key)
+ if len(value) == 0 {
+ panic("deletion not supported")
}
- w.Flush()
- return b.Bytes(), nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler
-func (st *StackTrie) UnmarshalBinary(data []byte) error {
- r := bytes.NewReader(data)
- return st.unmarshalBinary(r)
-}
+ k = k[:len(k)-1] // chop the termination flag
-func (st *StackTrie) unmarshalBinary(r io.Reader) error {
- var dec struct {
- Owner common.Hash
- NodeType uint8
- Val []byte
- Key []byte
+ // track the first and last inserted entries.
+ if t.first == nil {
+ t.first = append([]byte{}, k...)
}
- gob.NewDecoder(r).Decode(&dec)
- st.owner = dec.Owner
- st.nodeType = dec.NodeType
- st.val = dec.Val
- st.key = dec.Key
-
- var hasChild = make([]byte, 1)
- for i := range st.children {
- if _, err := r.Read(hasChild); err != nil {
- return err
- } else if hasChild[0] == 0 {
- continue
- }
- var child StackTrie
- child.unmarshalBinary(r)
- st.children[i] = &child
+ if t.last == nil {
+ t.last = append([]byte{}, k...) // allocate key slice
+ } else {
+ t.last = append(t.last[:0], k...) // reuse key slice
}
+ t.insert(t.root, k, value, nil)
return nil
}
-func (st *StackTrie) setWriteFunc(writeFn NodeWriteFunc) {
- st.writeFn = writeFn
- for _, child := range st.children {
- if child != nil {
- child.setWriteFunc(writeFn)
- }
- }
+// Reset resets the stack trie object to empty state.
+func (t *StackTrie) Reset() {
+ t.options = NewStackTrieOptions()
+ t.root = stPool.Get().(*stNode)
+ t.first = nil
+ t.last = nil
}
-func newLeaf(owner common.Hash, key, val []byte, writeFn NodeWriteFunc) *StackTrie {
- st := stackTrieFromPool(writeFn, owner)
- st.nodeType = leafNode
+// stNode represents a node within a StackTrie
+type stNode struct {
+ typ uint8 // node type (as in branch, ext, leaf)
+ key []byte // key chunk covered by this (leaf|ext) node
+ val []byte // value contained by this node if it's a leaf
+ children [16]*stNode // list of children (for branch and exts)
+}
+
+// newLeaf constructs a leaf node with provided node key and value. The key
+// will be deep-copied in the function and safe to modify afterwards, but
+// value is not.
+func newLeaf(key, val []byte) *stNode {
+ st := stPool.Get().(*stNode)
+ st.typ = leafNode
st.key = append(st.key, key...)
st.val = val
return st
}
-func newExt(owner common.Hash, key []byte, child *StackTrie, writeFn NodeWriteFunc) *StackTrie {
- st := stackTrieFromPool(writeFn, owner)
- st.nodeType = extNode
+// newExt constructs an extension node with provided node key and child. The
+// key will be deep-copied in the function and safe to modify afterwards.
+func newExt(key []byte, child *stNode) *stNode {
+ st := stPool.Get().(*stNode)
+ st.typ = extNode
st.key = append(st.key, key...)
st.children[0] = child
return st
}
-// List all values that StackTrie#nodeType can hold
+// List all values that stNode#nodeType can hold
const (
emptyNode = iota
branchNode
@@ -198,65 +164,48 @@ const (
hashedNode
)
-// TryUpdate inserts a (key, value) pair into the stack trie
-func (st *StackTrie) TryUpdate(key, value []byte) error {
- k := keybytesToHex(key)
- if len(value) == 0 {
- panic("deletion not supported")
- }
- st.insert(k[:len(k)-1], value, nil)
- return nil
-}
-
-func (st *StackTrie) Update(key, value []byte) {
- if err := st.TryUpdate(key, value); err != nil {
- log.Error(fmt.Sprintf("Unhandled trie error: %v", err))
+func (n *stNode) reset() *stNode {
+ n.key = n.key[:0]
+ n.val = nil
+ for i := range n.children {
+ n.children[i] = nil
}
-}
-
-func (st *StackTrie) Reset() {
- st.owner = common.Hash{}
- st.writeFn = nil
- st.key = st.key[:0]
- st.val = nil
- for i := range st.children {
- st.children[i] = nil
- }
- st.nodeType = emptyNode
+ n.typ = emptyNode
+ return n
}
// Helper function that, given a full key, determines the index
// at which the chunk pointed by st.keyOffset is different from
// the same chunk in the full key.
-func (st *StackTrie) getDiffIndex(key []byte) int {
- for idx, nibble := range st.key {
+func (n *stNode) getDiffIndex(key []byte) int {
+ for idx, nibble := range n.key {
if nibble != key[idx] {
return idx
}
}
- return len(st.key)
+ return len(n.key)
}
// Helper function to that inserts a (key, value) pair into
-// the trie. Adding the prefix when inserting too.
-func (st *StackTrie) insert(key, value, prefix []byte) {
- switch st.nodeType {
+// the trie.
+func (t *StackTrie) insert(st *stNode, key, value []byte, path []byte) {
+ switch st.typ {
case branchNode: /* Branch */
idx := int(key[0])
// Unresolve elder siblings
for i := idx - 1; i >= 0; i-- {
if st.children[i] != nil {
- if st.children[i].nodeType != hashedNode {
- st.children[i].hash(append(prefix, byte(i)))
+ if st.children[i].typ != hashedNode {
+ t.hash(st.children[i], append(path, byte(i)))
}
break
}
}
// Add new child
if st.children[idx] == nil {
- st.children[idx] = newLeaf(st.owner, key[1:], value, st.writeFn)
+ st.children[idx] = newLeaf(key[1:], value)
} else {
- st.children[idx].insert(key[1:], value, append(prefix, key[0]))
+ t.insert(st.children[idx], key[1:], value, append(path, key[0]))
}
case extNode: /* Ext */
@@ -271,46 +220,45 @@ func (st *StackTrie) insert(key, value, prefix []byte) {
if diffidx == len(st.key) {
// Ext key and key segment are identical, recurse into
// the child node.
- st.children[0].insert(key[diffidx:], value, append(prefix, key[:diffidx]...))
+ t.insert(st.children[0], key[diffidx:], value, append(path, key[:diffidx]...))
return
}
// Save the original part. Depending if the break is
// at the extension's last byte or not, create an
// intermediate extension or use the extension's child
// node directly.
- var n *StackTrie
+ var n *stNode
if diffidx < len(st.key)-1 {
// Break on the non-last byte, insert an intermediate
// extension. The path prefix of the newly-inserted
// extension should also contain the different byte.
- n = newExt(st.owner, st.key[diffidx+1:], st.children[0], st.writeFn)
- n.hash(append(prefix, st.key[:diffidx+1]...))
+ n = newExt(st.key[diffidx+1:], st.children[0])
+ t.hash(n, append(path, st.key[:diffidx+1]...))
} else {
// an extension node: reuse the current node.
// The path prefix of the original part should
// still be same.
n = st.children[0]
- n.hash(append(prefix, st.key...))
+ t.hash(n, append(path, st.key...))
}
-
- var p *StackTrie
+ var p *stNode
if diffidx == 0 {
// the break is on the first byte, so
// the current node is converted into
// a branch node.
st.children[0] = nil
p = st
- st.nodeType = branchNode
+ st.typ = branchNode
} else {
// the common prefix is at least one byte
// long, insert a new intermediate branch
// node.
- st.children[0] = stackTrieFromPool(st.writeFn, st.owner)
- st.children[0].nodeType = branchNode
+ st.children[0] = stPool.Get().(*stNode)
+ st.children[0].typ = branchNode
p = st.children[0]
}
// Create a leaf for the inserted part
- o := newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
+ o := newLeaf(key[diffidx+1:], value)
// Insert both child leaves where they belong:
origIdx := st.key[diffidx]
@@ -336,18 +284,18 @@ func (st *StackTrie) insert(key, value, prefix []byte) {
// Check if the split occurs at the first nibble of the
// chunk. In that case, no prefix extnode is necessary.
// Otherwise, create that
- var p *StackTrie
+ var p *stNode
if diffidx == 0 {
// Convert current leaf into a branch
- st.nodeType = branchNode
+ st.typ = branchNode
p = st
st.children[0] = nil
} else {
// Convert current node into an ext,
// and insert a child branch node.
- st.nodeType = extNode
- st.children[0] = NewStackTrieWithOwner(st.writeFn, st.owner)
- st.children[0].nodeType = branchNode
+ st.typ = extNode
+ st.children[0] = stPool.Get().(*stNode)
+ st.children[0].typ = branchNode
p = st.children[0]
}
@@ -356,18 +304,18 @@ func (st *StackTrie) insert(key, value, prefix []byte) {
// The child leave will be hashed directly in order to
// free up some memory.
origIdx := st.key[diffidx]
- p.children[origIdx] = newLeaf(st.owner, st.key[diffidx+1:], st.val, st.writeFn)
- p.children[origIdx].hash(append(prefix, st.key[:diffidx+1]...))
+ p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val)
+ t.hash(p.children[origIdx], append(path, st.key[:diffidx+1]...))
newIdx := key[diffidx]
- p.children[newIdx] = newLeaf(st.owner, key[diffidx+1:], value, st.writeFn)
+ p.children[newIdx] = newLeaf(key[diffidx+1:], value)
// Finally, cut off the key part that has been passed
// over to the children.
st.key = st.key[:diffidx]
st.val = nil
case emptyNode: /* Empty */
- st.nodeType = leafNode
+ st.typ = leafNode
st.key = key
st.val = value
case hashedNode:
@@ -390,20 +338,21 @@ func (st *StackTrie) insert(key, value, prefix []byte) {
// This method will also:
// set 'st.type' to hashedNode
// clear 'st.key'
-func (st *StackTrie) hash(path []byte) {
+func (t *StackTrie) hash(st *stNode, path []byte) {
/* Shortcut if node is already hashed */
- if st.nodeType == hashedNode {
+ if st.typ == hashedNode {
return
}
// The 'hasher' is taken from a pool, but we don't actually
// claim an instance until all children are done with their hashing,
// and we actually need one
+
var (
- h *hasher
- encodedNode []byte
+ blob []byte // RLP-encoded node blob
+ internal [][]byte // List of node paths covered by the extension node
)
- switch st.nodeType {
+ switch st.typ {
case branchNode:
var node fullNode
for i, child := range st.children {
@@ -411,22 +360,32 @@ func (st *StackTrie) hash(path []byte) {
node.Children[i] = nilValueNode
continue
}
- child.hash(append(path, byte(i)))
+ t.hash(child, append(path, byte(i)))
if len(child.val) < 32 {
node.Children[i] = rawNode(child.val)
} else {
node.Children[i] = hashNode(child.val)
}
- st.children[i] = nil // Reclaim mem from subtree
- returnToPool(child)
+ st.children[i] = nil // Reclaim mem from subtree
+ stPool.Put(child.reset()) // Release child back to pool.
}
- h = newHasher(false)
- defer returnHasherToPool(h)
- node.encode(h.encbuf)
- encodedNode = h.encodedBytes()
+ node.encode(t.h.encbuf)
+ blob = t.h.encodedBytes()
case extNode:
- st.children[0].hash(append(path, st.key...))
+ // recursively hash and commit child as the first step
+ t.hash(st.children[0], append(path, st.key...))
+
+ // Collect the path of internal nodes between shortNode and its **in disk**
+ // child. This is essential in the case of path mode scheme to avoid leaving
+ // danging nodes within the range of this internal path on disk, which would
+ // break the guarantee for state healing.
+ if len(st.children[0].val) >= 32 && t.options.Cleaner != nil {
+ for i := 1; i < len(st.key); i++ {
+ internal = append(internal, append(path, st.key[:i]...))
+ }
+ }
+ // encode the extension node
sz := hexToCompactInPlace(st.key)
n := shortNode{Key: st.key[:sz]}
@@ -436,93 +395,83 @@ func (st *StackTrie) hash(path []byte) {
n.Val = hashNode(st.children[0].val)
}
- h = newHasher(false)
- defer returnHasherToPool(h)
+ n.encode(t.h.encbuf)
+ blob = t.h.encodedBytes()
- n.encode(h.encbuf)
- encodedNode = h.encodedBytes()
-
- returnToPool(st.children[0])
- st.children[0] = nil // Reclaim mem from subtree
+ stPool.Put(st.children[0].reset()) // Release child back to pool.
+ st.children[0] = nil // Reclaim mem from subtree
case leafNode:
- h = newHasher(false)
- defer returnHasherToPool(h)
st.key = append(st.key, byte(16))
sz := hexToCompactInPlace(st.key)
n := shortNode{Key: st.key[:sz], Val: valueNode(st.val)}
- n.encode(h.encbuf)
- encodedNode = h.encodedBytes()
+ n.encode(t.h.encbuf)
+ blob = t.h.encodedBytes()
case emptyNode:
st.val = emptyRoot.Bytes()
st.key = st.key[:0]
- st.nodeType = hashedNode
+ st.typ = hashedNode
return
default:
panic("Invalid node type")
}
st.key = st.key[:0]
- st.nodeType = hashedNode
- if len(encodedNode) < 32 {
+ st.typ = hashedNode
+ // Skip committing the non-root node if the size is smaller than 32 bytes.
+ if len(blob) < 32 && len(path) > 0 {
// If rlp-encoded value was < 32 bytes, then val point directly to the rlp-encoded value
- st.val = common.CopyBytes(encodedNode)
+ st.val = common.CopyBytes(blob)
return
}
- h = newHasher(false)
- defer returnHasherToPool(h)
-
- st.val = h.hashData(encodedNode)
+ st.val = t.h.hashData(blob)
- if st.writeFn != nil {
- st.writeFn(st.owner, path, common.BytesToHash(st.val), encodedNode)
+ // Short circuit if the stack trie is not configured for writing.
+ if t.options.Writer == nil {
+ return
+ }
+ // Skip committing if the node is on the left boundary and stackTrie is
+ // configured to filter the boundary.
+ if t.options.SkipLeftBoundary && bytes.HasPrefix(t.first, path) {
+ if t.options.boundaryGauge != nil {
+ t.options.boundaryGauge.Inc(1)
+ }
+ return
+ }
+ // Skip committing if the node is on the right boundary and stackTrie is
+ // configured to filter the boundary.
+ if t.options.SkipRightBoundary && bytes.HasPrefix(t.last, path) {
+ if t.options.boundaryGauge != nil {
+ t.options.boundaryGauge.Inc(1)
+ }
+ return
}
+ // Clean up the internal dangling nodes covered by the extension node.
+ // This should be done before writing the node to adhere to the committing
+ // order from bottom to top.
+ for _, path := range internal {
+ t.options.Cleaner(path)
+ }
+ t.options.Writer(path, common.BytesToHash(st.val), blob)
}
-// Hash returns the hash of the current node
-func (st *StackTrie) Hash() (h common.Hash) {
- st.hash(nil)
- if len(st.val) != 32 {
- // If the node's RLP isn't 32 bytes long, the node will not
- // be hashed, and instead contain the rlp-encoding of the
- // node. For the top level node, we need to force the hashing.
- ret := make([]byte, 32)
- h := newHasher(false)
- defer returnHasherToPool(h)
- h.sha.Reset()
- h.sha.Write(st.val)
- h.sha.Read(ret)
- return common.BytesToHash(ret)
- }
- return common.BytesToHash(st.val)
+// Hash will firstly hash the entire trie if it's still not hashed and then commit
+// all nodes to the associated database. Actually most of the trie nodes have been
+// committed already. The main purpose here is to commit the nodes on right boundary.
+//
+// For stack trie, Hash and Commit are functionally identical
+func (t *StackTrie) Hash() (h common.Hash) {
+ n := t.root
+ t.hash(n, nil)
+ return common.BytesToHash(n.val)
}
-// Commit will firstly hash the entrie trie if it's still not hashed
-// and then commit all nodes to the associated database. Actually most
-// of the trie nodes MAY have been committed already. The main purpose
-// here is to commit the root node.
+// Commit will firstly hash the entire trie if it's still not hashed and then commit
+// all nodes to the associated database. Actually most of the trie nodes have been
+// committed already. The main purpose here is to commit the nodes on right boundary.
//
-// The associated database is expected, otherwise the whole commit
-// functionality should be disabled.
-func (st *StackTrie) Commit() (common.Hash, error) {
- if st.writeFn == nil {
- return common.Hash{}, ErrCommitDisabled
- }
- st.hash(nil)
- if len(st.val) != 32 {
- // If the node's RLP isn't 32 bytes long, the node will not
- // be hashed (and committed), and instead contain the rlp-encoding of the
- // node. For the top level node, we need to force the hashing+commit.
- ret := make([]byte, 32)
- h := newHasher(false)
- defer returnHasherToPool(h)
- h.sha.Reset()
- // hash st.val -> ret
- h.sha.Write(st.val)
- h.sha.Read(ret)
- st.writeFn(st.owner, nil, common.BytesToHash(ret), st.val)
- return common.BytesToHash(ret), nil
- }
- return common.BytesToHash(st.val), nil
+// For stack trie, Hash and Commit are functionally identical.
+func (t *StackTrie) Commit() common.Hash {
+ return t.Hash()
}
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index 10baeaf441..6f93c1f53a 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -19,11 +19,14 @@ package trie
import (
"bytes"
"math/big"
+ "math/rand"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/trie/testutil"
+ "golang.org/x/exp/slices"
)
func TestStackTrieInsertAndHash(t *testing.T) {
@@ -166,12 +169,11 @@ func TestStackTrieInsertAndHash(t *testing.T) {
{"13aa", "x___________________________3", "ff0dc70ce2e5db90ee42a4c2ad12139596b890e90eb4e16526ab38fa465b35cf"},
},
}
- st := NewStackTrie(nil)
for i, test := range tests {
// The StackTrie does not allow Insert(), Hash(), Insert(), ...
// so we will create new trie for every sequence length of inserts.
for l := 1; l <= len(test); l++ {
- st.Reset()
+ st := NewStackTrie(nil)
for j := 0; j < l; j++ {
kv := &test[j]
if err := st.TryUpdate(common.FromHex(kv.K), []byte(kv.V)); err != nil {
@@ -350,47 +352,86 @@ func TestStacktrieNotModifyValues(t *testing.T) {
}
}
-// TestStacktrieSerialization tests that the stacktrie works well if we
-// serialize/unserialize it a lot
-func TestStacktrieSerialization(t *testing.T) {
+func buildPartialTree(entries []*kv, t *testing.T) map[string]common.Hash {
var (
- st = NewStackTrie(nil)
- nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
- keyB = big.NewInt(1)
- keyDelta = big.NewInt(1)
- vals [][]byte
- keys [][]byte
+ options = NewStackTrieOptions()
+ nodes = make(map[string]common.Hash)
)
- getValue := func(i int) []byte {
- if i%2 == 0 { // large
- return crypto.Keccak256(big.NewInt(int64(i)).Bytes())
- } else { //small
- return big.NewInt(int64(i)).Bytes()
+ var (
+ first int
+ last = len(entries) - 1
+
+ noLeft bool
+ noRight bool
+ )
+ // Enter split mode if there are at least two elements
+ if rand.Intn(5) != 0 {
+ for {
+ first = rand.Intn(len(entries))
+ last = rand.Intn(len(entries))
+ if first <= last {
+ break
+ }
+ }
+ if first != 0 {
+ noLeft = true
+ }
+ if last != len(entries)-1 {
+ noRight = true
}
}
- for i := 0; i < 10; i++ {
- vals = append(vals, getValue(i))
- keys = append(keys, common.BigToHash(keyB).Bytes())
- keyB = keyB.Add(keyB, keyDelta)
- keyDelta.Add(keyDelta, common.Big1)
- }
- for i, k := range keys {
- nt.TryUpdate(k, common.CopyBytes(vals[i]))
+ options = options.WithSkipBoundary(noLeft, noRight, nil)
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ nodes[string(path)] = hash
+ })
+ tr := NewStackTrie(options)
+
+ for i := first; i <= last; i++ {
+ tr.TryUpdate(entries[i].k, entries[i].v)
}
+ tr.Commit()
+ return nodes
+}
+
+func TestPartialStackTrie(t *testing.T) {
+ for round := 0; round < 100; round++ {
+ var (
+ n = rand.Intn(100) + 1
+ entries []*kv
+ )
+ for i := 0; i < n; i++ {
+ var val []byte
+ if rand.Intn(3) == 0 {
+ val = testutil.RandBytes(3)
+ } else {
+ val = testutil.RandBytes(32)
+ }
+ entries = append(entries, &kv{
+ k: testutil.RandBytes(32),
+ v: val,
+ })
+ }
+ slices.SortFunc(entries, (*kv).cmp)
+
+ var (
+ nodes = make(map[string]common.Hash)
+ options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ nodes[string(path)] = hash
+ })
+ )
+ tr := NewStackTrie(options)
- for i, k := range keys {
- blob, err := st.MarshalBinary()
- if err != nil {
- t.Fatal(err)
+ for i := 0; i < len(entries); i++ {
+ tr.TryUpdate(entries[i].k, entries[i].v)
}
- newSt, err := NewFromBinary(blob, nil)
- if err != nil {
- t.Fatal(err)
+ tr.Commit()
+
+ for j := 0; j < 100; j++ {
+ for path, hash := range buildPartialTree(entries, t) {
+ if nodes[path] != hash {
+ t.Errorf("%v, want %x, got %x", []byte(path), nodes[path], hash)
+ }
+ }
}
- st = newSt
- st.TryUpdate(k, common.CopyBytes(vals[i]))
- }
- if have, want := st.Hash(), nt.Hash(); have != want {
- t.Fatalf("have %#x want %#x", have, want)
}
}
diff --git a/trie/sync.go b/trie/sync.go
index 2b257fe2f7..195395f737 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -49,6 +49,18 @@ var (
// lookupGauge is the metric to track how many trie node lookups are
// performed to determine if node needs to be deleted.
lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil)
+
+ // accountNodeSyncedGauge is the metric to track how many account trie
+ // node are written during the sync.
+ accountNodeSyncedGauge = metrics.NewRegisteredGauge("trie/sync/nodes/account", nil)
+
+ // storageNodeSyncedGauge is the metric to track how many account trie
+ // node are written during the sync.
+ storageNodeSyncedGauge = metrics.NewRegisteredGauge("trie/sync/nodes/storage", nil)
+
+ // codeSyncedGauge is the metric to track how many contract codes are
+ // written during the sync.
+ codeSyncedGauge = metrics.NewRegisteredGauge("trie/sync/codes", nil)
)
// SyncPath is a path tuple identifying a particular trie node either in a single
@@ -373,14 +385,26 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error {
// storage, returning any occurred error.
func (s *Sync) Commit(dbw ethdb.Batch) error {
// Flush the pending node writes into database batch.
+ var (
+ account int
+ storage int
+ )
for path, value := range s.membatch.nodes {
owner, inner := ResolvePath([]byte(path))
+ if owner == (common.Hash{}) {
+ account += 1
+ } else {
+ storage += 1
+ }
rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
hash := s.membatch.hashes[path]
if s.bloom != nil {
s.bloom.Add(hash[:])
}
}
+ accountNodeSyncedGauge.Inc(int64(account))
+ storageNodeSyncedGauge.Inc(int64(storage))
+
// Flush the pending node deletes into the database batch.
// Please note that each written and deleted node has a
// unique path, ensuring no duplication occurs.
@@ -395,6 +419,8 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
s.bloom.Add(hash[:])
}
}
+ codeSyncedGauge.Inc(int64(len(s.membatch.codes)))
+
s.membatch = newSyncMemBatch() // reset the batch
return nil
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index b53f6f83eb..ff087b6ef9 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -893,10 +893,11 @@ func TestCommitSequenceStackTrie(t *testing.T) {
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
- }
- stTrie := NewStackTrie(writeFn)
+ options := NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
+ })
+ stTrie := NewStackTrie(options)
// Fill the trie with elements, should start 0, otherwise nodes will be nil in the first time.
for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order
@@ -919,10 +920,7 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// Flush memdb -> disk (sponge)
db.Commit(root, false)
// And flush stacktrie -> disk
- stRoot, err := stTrie.Commit()
- if err != nil {
- t.Fatalf("Failed to commit stack trie %v", err)
- }
+ stRoot := stTrie.Commit()
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
@@ -953,10 +951,11 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
trie := NewEmpty(db)
// Another sponge is used for the stacktrie commits
stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
- writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
- rawdb.WriteTrieNode(stackTrieSponge, owner, path, hash, blob, db.Scheme())
- }
- stTrie := NewStackTrie(writeFn)
+ options := NewStackTrieOptions()
+ options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
+ })
+ stTrie := NewStackTrie(options)
// Add a single small-element to the trie(s)
key := make([]byte, 5)
key[0] = 1
@@ -968,10 +967,7 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
// Flush memdb -> disk (sponge)
db.Commit(root, false)
// And flush stacktrie -> disk
- stRoot, err := stTrie.Commit()
- if err != nil {
- t.Fatalf("Failed to commit stack trie %v", err)
- }
+ stRoot := stTrie.Commit()
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
From fa4a08775df4e69ca1948944a86681f8b9b7302e Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Fri, 8 Nov 2024 17:47:07 +0700
Subject: [PATCH 38/41] core, triedb/pathdb: pbss fix release v1.13.8
(continue) and v1.14.0 (#626)
* core/rawdb: improve state scheme checking (#28724)
This pull request improves the condition to check if path state scheme is in use.
Originally, root node presence was used as the indicator if path scheme is used or not. However due to fact that root node will be deleted during the initial snap sync, this condition is no longer useful.
If PersistentStateID is present, it shows that we've already configured for path scheme.
* core, triedb/pathdb: calculate the size for batch pre-allocation (#29106)
* core, triedb/pathdb: calculate the size for batch pre-allocation
* triedb/pathdb: address comment
* triedb/pathdb: fix panic in recoverable (#29107)
* triedb/pathdb: fix panic in recoverable
* triedb/pathdb: add todo
* triedb/pathdb: rename
* triedb/pathdb: rename
---------
Co-authored-by: rjl493456442
---
core/rawdb/accessors_trie.go | 5 +++++
core/rawdb/schema.go | 26 +++++++++++++-------------
core/rawdb/schema_test.go | 4 ++--
trie/triedb/pathdb/database.go | 13 +++++++++----
trie/triedb/pathdb/nodebuffer.go | 15 ++++++++++++++-
5 files changed, 43 insertions(+), 20 deletions(-)
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index 14b3a96ba0..869991a388 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -294,6 +294,11 @@ func ReadStateScheme(db ethdb.Reader) string {
if len(blob) != 0 {
return PathScheme
}
+ // The root node might be deleted during the initial snap sync, check
+ // the persistent state id then.
+ if id := ReadPersistentStateID(db); id != 0 {
+ return PathScheme
+ }
// In a hash-based scheme, the genesis state is consistently stored
// on the disk. To assess the scheme of the persistent state, it
// suffices to inspect the scheme of the genesis state.
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 3c99d1db66..c1ea97e163 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -114,8 +114,8 @@ var (
dirtyAccountsKey = []byte("dacc") // dirtyAccountsPrefix + block hash -> dirty accounts
// Path-based storage scheme of merkle patricia trie.
- trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
- trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
+ TrieNodeAccountPrefix = []byte("A") // TrieNodeAccountPrefix + hexPath -> trie node
+ TrieNodeStoragePrefix = []byte("O") // TrieNodeStoragePrefix + accountHash + hexPath -> trie node
stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
@@ -250,12 +250,12 @@ func genesisStateSpecKey(hash common.Hash) []byte {
// accountTrieNodeKey = trieNodeAccountPrefix + nodePath.
func accountTrieNodeKey(path []byte) []byte {
- return append(trieNodeAccountPrefix, path...)
+ return append(TrieNodeAccountPrefix, path...)
}
-// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
+// storageTrieNodeKey = TrieNodeStoragePrefix + accountHash + nodePath.
func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
- return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
+ return append(append(TrieNodeStoragePrefix, accountHash.Bytes()...), path...)
}
func snapshotConsortiumKey(hash common.Hash) []byte {
@@ -277,16 +277,16 @@ func IsLegacyTrieNode(key []byte, val []byte) bool {
// account trie node in path-based state scheme, and returns the resolved
// node path if so.
func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) {
- if !bytes.HasPrefix(key, trieNodeAccountPrefix) {
+ if !bytes.HasPrefix(key, TrieNodeAccountPrefix) {
return false, nil
}
// The remaining key should only consist a hex node path
// whose length is in the range 0 to 64 (64 is excluded
// since leaves are always wrapped with shortNode).
- if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 {
+ if len(key) >= len(TrieNodeAccountPrefix)+common.HashLength*2 {
return false, nil
}
- return true, key[len(trieNodeAccountPrefix):]
+ return true, key[len(TrieNodeAccountPrefix):]
}
// IsAccountTrieNode reports whether a provided database entry is an account
@@ -300,20 +300,20 @@ func IsAccountTrieNode(key []byte) bool {
// trie node in path-based state scheme, and returns the resolved account hash
// and node path if so.
func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
- if !bytes.HasPrefix(key, trieNodeStoragePrefix) {
+ if !bytes.HasPrefix(key, TrieNodeStoragePrefix) {
return false, common.Hash{}, nil
}
// The remaining key consists of 2 parts:
// - 32 bytes account hash
// - hex node path whose length is in the range 0 to 64
- if len(key) < len(trieNodeStoragePrefix)+common.HashLength {
+ if len(key) < len(TrieNodeStoragePrefix)+common.HashLength {
return false, common.Hash{}, nil
}
- if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
+ if len(key) >= len(TrieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
return false, common.Hash{}, nil
}
- accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength])
- return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
+ accountHash := common.BytesToHash(key[len(TrieNodeStoragePrefix) : len(TrieNodeStoragePrefix)+common.HashLength])
+ return true, accountHash, key[len(TrieNodeStoragePrefix)+common.HashLength:]
}
// IsStorageTrieNode reports whether a provided database entry is a storage
diff --git a/core/rawdb/schema_test.go b/core/rawdb/schema_test.go
index 06b017d41c..11d036756d 100644
--- a/core/rawdb/schema_test.go
+++ b/core/rawdb/schema_test.go
@@ -99,7 +99,7 @@ func TestResolveAccountTrieNodeKey(t *testing.T) {
},
{
name: "storage prefixed",
- inputKey: append(trieNodeStoragePrefix, bytes4...),
+ inputKey: append(TrieNodeStoragePrefix, bytes4...),
expectedCheck: false,
expectedKey: nil,
},
@@ -175,7 +175,7 @@ func TestResolveStorageTrieNode(t *testing.T) {
},
{
name: "storage prefixed hash 20 length 4",
- inputKey: append(append(trieNodeStoragePrefix, bytes20...), bytes4...),
+ inputKey: append(append(TrieNodeStoragePrefix, bytes20...), bytes4...),
expectedCheck: false,
expectedHash: common.Hash{},
expectedKey: nil,
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
index 873418c9f0..ea5c50ab6a 100644
--- a/trie/triedb/pathdb/database.go
+++ b/trie/triedb/pathdb/database.go
@@ -380,18 +380,23 @@ func (db *Database) Recoverable(root common.Hash) bool {
if *id >= dl.stateID() {
return false
}
-
+ // This is a temporary workaround for the unavailability of the freezer in
+ // dev mode. As a consequence, the Pathdb loses the ability for deep reorg
+ // in certain cases.
+ // TODO(rjl493456442): Implement the in-memory ancient store.
+ if db.freezer == nil {
+ return false
+ }
// Ensure the requested state is a canonical state and all state
// histories in range [id+1, disklayer.ID] are present and complete.
- parent := root
return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error {
- if m.parent != parent {
+ if m.parent != root {
return errors.New("unexpected state history")
}
if len(m.incomplete) > 0 {
return errors.New("incomplete state history")
}
- parent = m.root
+ root = m.root
return nil
}) == nil
}
diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go
index b024986d3a..0a334baba2 100644
--- a/trie/triedb/pathdb/nodebuffer.go
+++ b/trie/triedb/pathdb/nodebuffer.go
@@ -205,6 +205,19 @@ func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.
return b.flush(db, clean, id, false)
}
+// allocBatch returns a database batch with pre-allocated buffer.
+func (b *nodebuffer) allocBatch(db ethdb.KeyValueStore) ethdb.Batch {
+ var metasize int
+ for owner, nodes := range b.nodes {
+ if owner == (common.Hash{}) {
+ metasize += len(nodes) * len(rawdb.TrieNodeAccountPrefix) // database key prefix
+ } else {
+ metasize += len(nodes) * (len(rawdb.TrieNodeStoragePrefix) + common.HashLength) // database key prefix + owner
+ }
+ }
+ return db.NewBatchWithSize((metasize + int(b.size)) * 11 / 10) // extra 10% for potential pebble internal stuff
+}
+
// flush persists the in-memory dirty trie node into the disk if the configured
// memory threshold is reached. Note, all data must be written atomically.
func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error {
@@ -218,7 +231,7 @@ func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id ui
}
var (
start = time.Now()
- batch = db.NewBatchWithSize(int(b.size))
+ batch = b.allocBatch(db)
)
nodes := writeNodes(batch, b.nodes, clean)
rawdb.WritePersistentStateID(batch, id)
From 8b1fa06ba49d1d23eb672bf017ba683187ea0b36 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Sat, 9 Nov 2024 15:52:50 +0700
Subject: [PATCH 39/41] protocols/snap/sync_test: each peer have a different
account trie (#628)
---
eth/protocols/snap/sync_test.go | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index fcf3373740..2ee04c7d29 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -579,7 +579,7 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(scheme, 100)
source := newTestPeer("source", t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
@@ -663,7 +663,7 @@ func testSync(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
return source
}
@@ -696,7 +696,7 @@ func testSyncTinyTriePanic(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
return source
}
@@ -730,7 +730,7 @@ func testMultiSync(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
return source
}
@@ -767,7 +767,7 @@ func testSyncWithStorage(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
@@ -984,7 +984,7 @@ func testSyncBoundaryAccountTrie(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
return source
}
@@ -1108,7 +1108,7 @@ func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) {
mkSource := func(name string, accFn accountHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.accountRequestHandler = accFn
return source
@@ -1151,7 +1151,7 @@ func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) {
mkSource := func(name string, codeFn codeHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.codeRequestHandler = codeFn
return source
@@ -1204,7 +1204,7 @@ func testSyncBoundaryStorageTrie(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
@@ -1244,7 +1244,7 @@ func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) {
mkSource := func(name string, slow bool) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
@@ -1290,7 +1290,7 @@ func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) {
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
@@ -1332,7 +1332,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
mkSource := func(name string, handler storageHandlerFunc) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
@@ -1377,7 +1377,7 @@ func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
source.storageTries = storageTries
source.storageValues = storageElems
@@ -1800,7 +1800,7 @@ func testSyncAccountPerformance(t *testing.T, scheme string) {
mkSource := func(name string) *testPeer {
source := newTestPeer(name, t, term)
- source.accountTrie = sourceAccountTrie
+ source.accountTrie = sourceAccountTrie.Copy()
source.accountValues = elems
return source
}
From 935aa9d915f158d1c77a1432770b1d54e78e0f04 Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Mon, 11 Nov 2024 15:29:09 +0700
Subject: [PATCH 40/41] core, cmd, trie: pbss fix release v1.13.6 and v1.13.8
(#618)
* trie: remove inconsistent trie nodes during sync in path mode (#28595)
This fixes a database corruption issue that could occur during state healing.
When sync is aborted while certain modifications were already committed, and a reorg occurs, the database would contain incorrect trie nodes stored by path.
These nodes need to detected/deleted in order to obtain a complete and fully correct state after state healing.
---------
Co-authored-by: Felix Lange
* core, cmd, trie: fix the condition of pathdb initialization (#28718)
Original problem was caused by #28595, where we made it so that as soon as we start to sync, the root of the disk layer is deleted. That is not wrong per se, but another part of the code uses the "presence of the root" as an init-check for the pathdb. And, since the init-check now failed, the code tried to re-initialize it which failed since a sync was already ongoing.
The total impact being: after a state-sync has begun, if the node for some reason is is shut down, it will refuse to start up again, with the error message: `Fatal: Failed to register the Ethereum service: waiting for sync.`.
This change also modifies how `geth removedb` works, so that the user is prompted for two things: `state data` and `ancient chain`. The former includes both the chaindb aswell as any state history stored in ancients.
---------
Co-authored-by: Martin HS
---------
Co-authored-by: rjl493456442
Co-authored-by: Felix Lange
Co-authored-by: Martin HS
---
cmd/ronin/dbcmd.go | 99 +++++++++--------
core/rawdb/ancient_scheme.go | 8 +-
core/rawdb/ancient_utils.go | 12 +--
core/rawdb/database.go | 2 +-
ethdb/dbtest/testsuite.go | 6 +-
trie/sync.go | 192 ++++++++++++++++++++++-----------
trie/sync_test.go | 125 ++++++++++++++++++++-
trie/triedb/pathdb/database.go | 36 +++++--
8 files changed, 350 insertions(+), 130 deletions(-)
diff --git a/cmd/ronin/dbcmd.go b/cmd/ronin/dbcmd.go
index 53a83b5031..1de4223294 100644
--- a/cmd/ronin/dbcmd.go
+++ b/cmd/ronin/dbcmd.go
@@ -248,60 +248,73 @@ WARNING: This is a low-level operation which may cause database corruption!`,
func removeDB(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
- // Remove the full node state database
- path := stack.ResolvePath("chaindata")
- if common.FileExist(path) {
- confirmAndRemoveDB(path, "full node state database")
- } else {
- log.Info("Full node state database missing", "path", path)
- }
- // Remove the full node ancient database
- path = config.Eth.DatabaseFreezer
+ // Resolve folder paths.
+ var (
+ rootDir = stack.ResolvePath("chaindata")
+ ancientDir = config.Eth.DatabaseFreezer
+ )
switch {
- case path == "":
- path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
- case !filepath.IsAbs(path):
- path = config.Node.ResolvePath(path)
- }
- if common.FileExist(path) {
- confirmAndRemoveDB(path, "full node ancient database")
- } else {
- log.Info("Full node ancient database missing", "path", path)
- }
- // Remove the light node database
- path = stack.ResolvePath("lightchaindata")
- if common.FileExist(path) {
- confirmAndRemoveDB(path, "light node database")
- } else {
- log.Info("Light node database missing", "path", path)
- }
+ case ancientDir == "":
+ ancientDir = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
+ case !filepath.IsAbs(ancientDir):
+ ancientDir = config.Node.ResolvePath(ancientDir)
+ }
+ // Delete state data
+ statePaths := []string{rootDir, filepath.Join(ancientDir, rawdb.StateFreezerName)}
+ confirmAndRemoveDB(statePaths, "state data")
+
+ // Delete ancient chain
+ chainPaths := []string{filepath.Join(ancientDir, rawdb.ChainFreezerName)}
+ confirmAndRemoveDB(chainPaths, "ancient chain")
return nil
}
+// removeFolder deletes all files (not folders) inside the directory 'dir' (but
+// not files in subfolders).
+func removeFolder(dir string) {
+ filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ // If we're at the top level folder, recurse into
+ if path == dir {
+ return nil
+ }
+ // Delete all the files, but not subfolders
+ if !info.IsDir() {
+ os.Remove(path)
+ return nil
+ }
+ return filepath.SkipDir
+ })
+}
+
// confirmAndRemoveDB prompts the user for a last confirmation and removes the
-// folder if accepted.
-func confirmAndRemoveDB(database string, kind string) {
- confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
+// list of folders if accepted.
+func confirmAndRemoveDB(paths []string, kind string) {
+ msg := fmt.Sprintf("Location(s) of '%s': \n", kind)
+ for _, path := range paths {
+ msg += fmt.Sprintf("\t- %s\n", path)
+ }
+ fmt.Println(msg)
+
+ confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove '%s'?", kind))
switch {
case err != nil:
utils.Fatalf("%v", err)
case !confirm:
- log.Info("Database deletion skipped", "path", database)
+ log.Info("Database deletion skipped", "kind", kind, "paths", paths)
default:
- start := time.Now()
- filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
- // If we're at the top level folder, recurse into
- if path == database {
- return nil
+ var (
+ deleted []string
+ start = time.Now()
+ )
+ for _, path := range paths {
+ if common.FileExist(path) {
+ removeFolder(path)
+ deleted = append(deleted, path)
+ } else {
+ log.Info("Folder is not existent", "path", path)
}
- // Delete all the files, but not subfolders
- if !info.IsDir() {
- os.Remove(path)
- return nil
- }
- return filepath.SkipDir
- })
- log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
+ }
+ log.Info("Database successfully deleted", "kind", kind, "paths", deleted, "elapsed", common.PrettyDuration(time.Since(start)))
}
}
diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go
index 0e6d4bea5a..5a173f4915 100644
--- a/core/rawdb/ancient_scheme.go
+++ b/core/rawdb/ancient_scheme.go
@@ -73,16 +73,16 @@ var chainFreezerNoSnappy = map[string]bool{
// The list of identifiers of ancient stores. It can split more in the futures.
var (
- chainFreezerName = "chain" // the folder name of chain segment ancient store.
- stateFreezerName = "state" // the folder name of reverse diff ancient store.
+ ChainFreezerName = "chain" // the folder name of chain segment ancient store.
+ StateFreezerName = "state" // the folder name of reverse diff ancient store.
)
// freezers the collections of all builtin freezers.
-var freezers = []string{chainFreezerName, stateFreezerName}
+var freezers = []string{ChainFreezerName, StateFreezerName}
// NewStateFreezer initializes the freezer for state history.
func NewStateFreezer(ancientDir string, readOnly bool) (*ResettableFreezer, error) {
return NewResettableFreezer(
- filepath.Join(ancientDir, stateFreezerName), namespace, readOnly,
+ filepath.Join(ancientDir, StateFreezerName), namespace, readOnly,
stateHistoryTableSize, stateFreezerNoSnappy)
}
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
index 2dd37d5b27..f0c0f234f1 100644
--- a/core/rawdb/ancient_utils.go
+++ b/core/rawdb/ancient_utils.go
@@ -82,14 +82,14 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
var infos []freezerInfo
for _, freezer := range freezers {
switch freezer {
- case chainFreezerName:
- info, err := inspect(chainFreezerName, chainFreezerNoSnappy, db)
+ case ChainFreezerName:
+ info, err := inspect(ChainFreezerName, chainFreezerNoSnappy, db)
if err != nil {
return nil, err
}
infos = append(infos, info)
- case stateFreezerName:
+ case StateFreezerName:
if ReadStateScheme(db) != PathScheme {
log.Info("Skip inspecting state freezer", "reason", "state freezer is supported for PathScheme only")
continue
@@ -104,7 +104,7 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
}
defer f.Close()
- info, err := inspect(stateFreezerName, stateFreezerNoSnappy, f)
+ info, err := inspect(StateFreezerName, stateFreezerNoSnappy, f)
if err != nil {
return nil, err
}
@@ -128,9 +128,9 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s
)
switch freezerName {
- case chainFreezerName:
+ case ChainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
- case stateFreezerName:
+ case StateFreezerName:
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 9ad0287d59..6bf5366ea2 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -173,7 +173,7 @@ func resolveChainFreezerDir(ancient string) string {
// - chain freezer is not initialized
// - it's legacy location, chain freezer is present in the root ancient folder
- freezer := path.Join(ancient, chainFreezerName)
+ freezer := path.Join(ancient, ChainFreezerName)
if !common.FileExist(freezer) {
if !common.FileExist(ancient) {
// The entire ancient store is not initialized, still use the sub
diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go
index a2b7003c27..30cef82bec 100644
--- a/ethdb/dbtest/testsuite.go
+++ b/ethdb/dbtest/testsuite.go
@@ -272,9 +272,13 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) {
b.Put([]byte("5"), nil)
b.Delete([]byte("1"))
b.Put([]byte("6"), nil)
- b.Delete([]byte("3"))
+
+ b.Delete([]byte("3")) // delete then put
b.Put([]byte("3"), nil)
+ b.Put([]byte("7"), nil) // put then delete
+ b.Delete([]byte("7"))
+
if err := b.Write(); err != nil {
t.Fatal(err)
}
diff --git a/trie/sync.go b/trie/sync.go
index 195395f737..f7f45d3ae1 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -19,6 +19,7 @@ package trie
import (
"errors"
"fmt"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/prque"
@@ -98,10 +99,9 @@ func NewSyncPath(path []byte) SyncPath {
// nodeRequest represents a scheduled or already in-flight trie node retrieval request.
type nodeRequest struct {
- hash common.Hash // Hash of the trie node to retrieve
- path []byte // Merkle path leading to this node for prioritization
- data []byte // Data content of the node, cached until all subtrees complete
- deletes [][]byte // List of internal path segments for trie nodes to delete
+ hash common.Hash // Hash of the trie node to retrieve
+ path []byte // Merkle path leading to this node for prioritization
+ data []byte // Data content of the node, cached until all subtrees complete
parent *nodeRequest // Parent state node referencing this entry
deps int // Number of dependencies before allowed to commit this node
@@ -128,37 +128,69 @@ type CodeSyncResult struct {
Data []byte // Data content of the retrieved bytecode
}
+// nodeOp represents an operation upon the trie node. It can either represent a
+// deletion to the specific node or a node write for persisting retrieved node.
+type nodeOp struct {
+ owner common.Hash // identifier of the trie (empty for account trie)
+ path []byte // path from the root to the specified node.
+ blob []byte // the content of the node (nil for deletion)
+ hash common.Hash // hash of the node content (empty for node deletion)
+}
+
+// isDelete indicates if the operation is a database deletion.
+func (op *nodeOp) isDelete() bool {
+ return len(op.blob) == 0
+}
+
// syncMemBatch is an in-memory buffer of successfully downloaded but not yet
// persisted data items.
type syncMemBatch struct {
- nodes map[string][]byte // In-memory membatch of recently completed nodes
- hashes map[string]common.Hash // Hashes of recently completed nodes
- deletes map[string]struct{} // List of paths for trie node to delete
- codes map[common.Hash][]byte // In-memory membatch of recently completed codes
+ scheme string // State scheme identifier
+ nodes []nodeOp // In-memory batch of recently completed/deleted nodes
+ codes map[common.Hash][]byte // In-memory membatch of recently completed codes
}
// newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes.
-func newSyncMemBatch() *syncMemBatch {
+func newSyncMemBatch(scheme string) *syncMemBatch {
return &syncMemBatch{
- nodes: make(map[string][]byte),
- hashes: make(map[string]common.Hash),
- deletes: make(map[string]struct{}),
- codes: make(map[common.Hash][]byte),
+ scheme: scheme,
+ codes: make(map[common.Hash][]byte),
}
}
-// hasNode reports the trie node with specific path is already cached.
-func (batch *syncMemBatch) hasNode(path []byte) bool {
- _, ok := batch.nodes[string(path)]
- return ok
-}
-
// hasCode reports the contract code with specific hash is already cached.
func (batch *syncMemBatch) hasCode(hash common.Hash) bool {
_, ok := batch.codes[hash]
return ok
}
+// addCode caches a contract code database write operation.
+func (batch *syncMemBatch) addCode(hash common.Hash, code []byte) {
+ batch.codes[hash] = code
+}
+
+// addNode caches a node database write operation.
+func (batch *syncMemBatch) addNode(owner common.Hash, path []byte, blob []byte, hash common.Hash) {
+ batch.nodes = append(batch.nodes, nodeOp{
+ owner: owner,
+ path: path,
+ blob: blob,
+ hash: hash,
+ })
+}
+
+// delNode caches a node database delete operation.
+func (batch *syncMemBatch) delNode(owner common.Hash, path []byte) {
+ if batch.scheme != rawdb.PathScheme {
+ log.Error("Unexpected node deletion", "owner", owner, "path", path, "scheme", batch.scheme)
+ return // deletion is not supported in hash mode.
+ }
+ batch.nodes = append(batch.nodes, nodeOp{
+ owner: owner,
+ path: path,
+ })
+}
+
// Sync is the main state trie synchronisation scheduler, which provides yet
// unknown trie hashes to retrieve, accepts node data associated with said hashes
// and reconstructs the trie step by step until all is done.
@@ -194,7 +226,7 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb
ts := &Sync{
scheme: scheme,
database: database,
- membatch: newSyncMemBatch(),
+ membatch: newSyncMemBatch(scheme),
nodeReqs: make(map[string]*nodeRequest),
codeReqs: make(map[common.Hash]*codeRequest),
queue: prque.New(nil),
@@ -213,16 +245,18 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, par
if root == emptyRoot {
return
}
- if s.membatch.hasNode(path) {
- return
- }
if s.bloom == nil || s.bloom.Contains(root[:]) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
owner, inner := ResolvePath(path)
- if rawdb.HasTrieNode(s.database, owner, inner, root, s.scheme) {
+ exist, inconsistent := s.hasNode(owner, inner, root)
+ if exist {
+ // The entire subtrie is already present in the database.
return
+ } else if inconsistent {
+ // There is a pre-existing node with the wrong hash in DB, remove it.
+ s.membatch.delNode(owner, inner)
}
// False positive, bump fault meter
bloomFaultMeter.Mark(1)
@@ -382,22 +416,32 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error {
}
// Commit flushes the data stored in the internal membatch out to persistent
-// storage, returning any occurred error.
+// storage, returning any occurred error. The whole data set will be flushed
+// in an atomic database batch.
func (s *Sync) Commit(dbw ethdb.Batch) error {
- // Flush the pending node writes into database batch.
var (
account int
storage int
)
- for path, value := range s.membatch.nodes {
- owner, inner := ResolvePath([]byte(path))
- if owner == (common.Hash{}) {
- account += 1
+ // Flush the pending node writes into database batch.
+ for _, op := range s.membatch.nodes {
+ if op.isDelete() {
+ // node deletion is only supported in path mode.
+ if op.owner == (common.Hash{}) {
+ rawdb.DeleteAccountTrieNode(dbw, op.path)
+ } else {
+ rawdb.DeleteStorageTrieNode(dbw, op.owner, op.path)
+ }
+ deletionGauge.Inc(1)
} else {
- storage += 1
+ if op.owner == (common.Hash{}) {
+ account += 1
+ } else {
+ storage += 1
+ }
+ rawdb.WriteTrieNode(dbw, op.owner, op.path, op.hash, op.blob, s.scheme)
}
- rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme)
- hash := s.membatch.hashes[path]
+ hash := op.hash
if s.bloom != nil {
s.bloom.Add(hash[:])
}
@@ -405,13 +449,6 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
accountNodeSyncedGauge.Inc(int64(account))
storageNodeSyncedGauge.Inc(int64(storage))
- // Flush the pending node deletes into the database batch.
- // Please note that each written and deleted node has a
- // unique path, ensuring no duplication occurs.
- for path := range s.membatch.deletes {
- owner, inner := ResolvePath([]byte(path))
- rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme)
- }
// Flush the pending code writes into database batch.
for hash, value := range s.membatch.codes {
rawdb.WriteCode(dbw, hash, value)
@@ -421,7 +458,7 @@ func (s *Sync) Commit(dbw ethdb.Batch) error {
}
codeSyncedGauge.Inc(int64(len(s.membatch.codes)))
- s.membatch = newSyncMemBatch() // reset the batch
+ s.membatch = newSyncMemBatch(s.scheme) // reset the batch
return nil
}
@@ -489,12 +526,15 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
// child as invalid. This is essential in the case of path mode
// scheme; otherwise, state healing might overwrite existing child
// nodes silently while leaving a dangling parent node within the
- // range of this internal path on disk. This would break the
- // guarantee for state healing.
+ // range of this internal path on disk and the persistent state
+ // ends up with a very weird situation that nodes on the same path
+ // are not inconsistent while they all present in disk. This property
+ // would break the guarantee for state healing.
//
// While it's possible for this shortNode to overwrite a previously
// existing full node, the other branches of the fullNode can be
- // retained as they remain untouched and complete.
+ // retained as they are not accessible with the new shortNode, and
+ // also the whole sub-trie is still untouched and complete.
//
// This step is only necessary for path mode, as there is no deletion
// in hash mode at all.
@@ -511,8 +551,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...))
}
if exists {
- req.deletes = append(req.deletes, key[:i])
- deletionGauge.Inc(1)
+ s.membatch.delNode(owner, append(inner, key[:i]...))
log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...))
}
}
@@ -532,6 +571,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
}
// Iterate over the children, and request all unknown ones
requests := make([]*nodeRequest, 0, len(children))
+ var batchMu sync.Mutex
for _, child := range children {
// Notify any external watcher of a new key/value node
if req.callback != nil {
@@ -548,20 +588,24 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
}
}
}
- // If the child references another node, resolve or schedule
+ // If the child references another node, resolve or schedule.
+ // We check all children concurrently.
if node, ok := (child.node).(hashNode); ok {
- // Try to resolve the node from the local database
- if s.membatch.hasNode(child.path) {
- continue
- }
- chash := common.BytesToHash(node)
+ path := child.path
+ hash := common.BytesToHash(node)
if s.bloom == nil || s.bloom.Contains(node) {
// Bloom filter says this might be a duplicate, double check.
// If database says yes, then at least the trie node is present
// and we hold the assumption that it's NOT legacy contract code.
- owner, inner := ResolvePath(child.path)
- if rawdb.HasTrieNode(s.database, owner, inner, chash, s.scheme) {
+ owner, inner := ResolvePath(path)
+ exist, inconsistent := s.hasNode(owner, inner, hash)
+ if exist {
continue
+ } else if inconsistent {
+ // There is a pre-existing node with the wrong hash in DB, remove it.
+ batchMu.Lock()
+ s.membatch.delNode(owner, inner)
+ batchMu.Unlock()
}
// False positive, bump fault meter
@@ -569,8 +613,8 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
}
// Locally unknown node, schedule for retrieval
requests = append(requests, &nodeRequest{
- path: child.path,
- hash: chash,
+ path: path,
+ hash: hash,
parent: req,
callback: req.callback,
})
@@ -584,14 +628,10 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
// committed themselves.
func (s *Sync) commitNodeRequest(req *nodeRequest) error {
// Write the node content to the membatch
- s.membatch.nodes[string(req.path)] = req.data
- s.membatch.hashes[string(req.path)] = req.hash
+ owner, path := ResolvePath(req.path)
+ s.membatch.addNode(owner, path, req.data, req.hash)
- // Delete the internal nodes which are marked as invalid
- for _, segment := range req.deletes {
- path := append(req.path, segment...)
- s.membatch.deletes[string(path)] = struct{}{}
- }
+ // Removed the completed node request
delete(s.nodeReqs, string(req.path))
s.fetches[len(req.path)]--
@@ -612,7 +652,9 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error {
// committed themselves.
func (s *Sync) commitCodeRequest(req *codeRequest) error {
// Write the node content to the membatch
- s.membatch.codes[req.hash] = req.data
+ s.membatch.addCode(req.hash, req.data)
+
+ // Removed the completed code request
delete(s.codeReqs, req.hash)
s.fetches[len(req.path)]--
@@ -628,6 +670,28 @@ func (s *Sync) commitCodeRequest(req *codeRequest) error {
return nil
}
+// hasNode reports whether the specified trie node is present in the database.
+// 'exists' is true when the node exists in the database and matches the given root
+// hash. The 'inconsistent' return value is true when the node exists but does not
+// match the expected hash.
+func (s *Sync) hasNode(owner common.Hash, path []byte, hash common.Hash) (exists bool, inconsistent bool) {
+ // If node is running with hash scheme, check the presence with node hash.
+ if s.scheme == rawdb.HashScheme {
+ return rawdb.HasLegacyTrieNode(s.database, hash), false
+ }
+ // If node is running with path scheme, check the presence with node path.
+ var blob []byte
+ var dbHash common.Hash
+ if owner == (common.Hash{}) {
+ blob, dbHash = rawdb.ReadAccountTrieNode(s.database, path)
+ } else {
+ blob, dbHash = rawdb.ReadStorageTrieNode(s.database, owner, path)
+ }
+ exists = hash == dbHash
+ inconsistent = !exists && len(blob) != 0
+ return exists, inconsistent
+}
+
// ResolvePath resolves the provided composite node path by separating the
// path in account trie if it's existent.
func ResolvePath(path []byte) (common.Hash, []byte) {
diff --git a/trie/sync_test.go b/trie/sync_test.go
index a07dbccd87..75eb5809a2 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -689,8 +689,11 @@ func testSyncOrdering(t *testing.T, scheme string) {
}
}
}
-
func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database) {
+ syncWithHookWriter(t, root, db, srcDb, nil)
+}
+
+func syncWithHookWriter(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database, hookWriter ethdb.KeyValueWriter) {
// Create a destination trie and sync with the scheduler
sched := NewSync(root, db, nil, NewSyncBloom(1, db), srcDb.Scheme())
@@ -728,8 +731,11 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database
if err := sched.Commit(batch); err != nil {
t.Fatalf("failed to commit data: %v", err)
}
- batch.Write()
-
+ if hookWriter != nil {
+ batch.Replay(hookWriter)
+ } else {
+ batch.Write()
+ }
paths, nodes, _ = sched.Missing(0)
elements = elements[:0]
for i := 0; i < len(paths); i++ {
@@ -899,3 +905,116 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
syncWith(t, rootC, destDisk, srcTrieDB)
checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true)
}
+
+func TestSyncAbort(t *testing.T) {
+ testSyncAbort(t, rawdb.PathScheme)
+ testSyncAbort(t, rawdb.HashScheme)
+}
+
+type hookWriter struct {
+ db ethdb.KeyValueStore
+ filter func(key []byte, value []byte) bool
+}
+
+// Put inserts the given value into the key-value data store.
+func (w *hookWriter) Put(key []byte, value []byte) error {
+ if w.filter != nil && w.filter(key, value) {
+ return nil
+ }
+ return w.db.Put(key, value)
+}
+
+// Delete removes the key from the key-value data store.
+func (w *hookWriter) Delete(key []byte) error {
+ return w.db.Delete(key)
+}
+
+func testSyncAbort(t *testing.T, scheme string) {
+ var (
+ srcDisk = rawdb.NewMemoryDatabase()
+ srcTrieDB = newTestDatabase(srcDisk, scheme)
+ srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB)
+
+ deleteFn = func(key []byte, tr *Trie, states map[string][]byte) {
+ tr.Delete(key)
+ delete(states, string(key))
+ }
+ writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) {
+ if val == nil {
+ val = randBytes(32)
+ }
+ tr.Update(key, val)
+ states[string(key)] = common.CopyBytes(val)
+ }
+ copyStates = func(states map[string][]byte) map[string][]byte {
+ cpy := make(map[string][]byte)
+ for k, v := range states {
+ cpy[k] = v
+ }
+ return cpy
+ }
+ )
+ var (
+ stateA = make(map[string][]byte)
+ key = randBytes(32)
+ val = randBytes(32)
+ )
+ for i := 0; i < 256; i++ {
+ writeFn(randBytes(32), nil, srcTrie, stateA)
+ }
+ writeFn(key, val, srcTrie, stateA)
+
+ rootA, nodesA, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootA, false); err != nil {
+ panic(err)
+ }
+ // Create a destination trie and sync with the scheduler
+ destDisk := rawdb.NewMemoryDatabase()
+ syncWith(t, rootA, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true)
+
+ // Delete the element from the trie
+ stateB := copyStates(stateA)
+ srcTrie, _ = New(TrieID(rootA), srcTrieDB)
+ deleteFn(key, srcTrie, stateB)
+
+ rootB, nodesB, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootB, false); err != nil {
+ panic(err)
+ }
+
+ // Sync the new state, but never persist the new root node. Before the
+ // fix #28595, the original old root node will still be left in database
+ // which breaks the next healing cycle.
+ syncWithHookWriter(t, rootB, destDisk, srcTrieDB, &hookWriter{db: destDisk, filter: func(key []byte, value []byte) bool {
+ if scheme == rawdb.HashScheme {
+ return false
+ }
+ if len(value) == 0 {
+ return false
+ }
+ ok, path := rawdb.ResolveAccountTrieNodeKey(key)
+ return ok && len(path) == 0
+ }})
+
+ // Add elements to expand trie
+ stateC := copyStates(stateB)
+ srcTrie, _ = New(TrieID(rootB), srcTrieDB)
+
+ writeFn(key, val, srcTrie, stateC)
+ rootC, nodesC, _ := srcTrie.Commit(false)
+ if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil {
+ panic(err)
+ }
+ if err := srcTrieDB.Commit(rootC, false); err != nil {
+ panic(err)
+ }
+ syncWith(t, rootC, destDisk, srcTrieDB)
+ checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true)
+}
diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go
index ea5c50ab6a..7843f78de0 100644
--- a/trie/triedb/pathdb/database.go
+++ b/trie/triedb/pathdb/database.go
@@ -167,14 +167,31 @@ func New(diskdb ethdb.Database, config *Config) *Database {
log.Crit("Failed to open state history freezer", "err", err)
}
- // Truncate the extra state histories above the current diskLayer
- // in freezer in case it's not aligned with the disk layer.
- pruned, err := truncateFromHead(db.diskdb, db.freezer, db.tree.bottom().stateID())
- if err != nil {
- log.Crit("Failed to truncate state history freezer", "err", err)
- }
- if pruned > 0 {
- log.Warn("Truncated extra state histories from freezer", "count", pruned)
+ diskLayerID := db.tree.bottom().stateID()
+ if diskLayerID == 0 {
+ // Reset the entire state histories in case the trie database is
+ // not initialized yet, as these state histories are not expected.
+ frozen, err := db.freezer.Ancients()
+ if err != nil {
+ log.Crit("Failed to retrieve head of state history", "err", err)
+ }
+ if frozen != 0 {
+ err := db.freezer.Reset()
+ if err != nil {
+ log.Crit("Failed to reset state histories", "err", err)
+ }
+ log.Info("Truncated extraneous state history")
+ }
+ } else {
+ // Truncate the extra state histories above in freezer in case
+ // it's not aligned with the disk layer.
+ pruned, err := truncateFromHead(db.diskdb, db.freezer, diskLayerID)
+ if err != nil {
+ log.Crit("Failed to truncate extra state histories", "err", err)
+ }
+ if pruned != 0 {
+ log.Warn("Truncated extra state histories", "number", pruned)
+ }
}
}
// Disable database in case node is still in the initial state sync stage.
@@ -410,6 +427,9 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
inited = true
}
})
+ if !inited {
+ inited = rawdb.ReadSnapSyncStatusFlag(db.diskdb) != rawdb.StateSyncUnknown
+ }
return inited
}
From 190f2ffb94ff23ded26560f4f6ba5ff2499a179e Mon Sep 17 00:00:00 2001
From: Francesco4203 <100074926+Francesco4203@users.noreply.github.com>
Date: Thu, 14 Nov 2024 23:29:48 +0700
Subject: [PATCH 41/41] v2/consortium_test: fix for pbss: do not insert
inserted blocks (#613)
* v2/consortium_test: only insert newly created blocks
* consortium_test.go: add comments
---
consensus/consortium/v2/consortium_test.go | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/consensus/consortium/v2/consortium_test.go b/consensus/consortium/v2/consortium_test.go
index 394e7b22c6..deb8dc6f92 100644
--- a/consensus/consortium/v2/consortium_test.go
+++ b/consensus/consortium/v2/consortium_test.go
@@ -2218,7 +2218,7 @@ func testSystemTransactionOrder(t *testing.T, scheme string) {
}
func TestIsPeriodBlock(t *testing.T) {
- //testIsPeriodBlock(t, rawdb.PathScheme)
+ testIsPeriodBlock(t, rawdb.PathScheme)
testIsPeriodBlock(t, rawdb.HashScheme)
}
@@ -2301,7 +2301,11 @@ func testIsPeriodBlock(t *testing.T, scheme string) {
block, _ := core.GenerateChain(&chainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, callback, true)
bs = append(bs, block...)
}
- if _, err := chain.InsertChain(bs[:], nil); err != nil {
+ // Only the new blocks are inserted here
+ // For path scheme, the number of db diff layers corresponding to blocks are limited to 128
+ // So just the newest 128 blocks can be retrieved from the db
+ // Therefore, the handling of the inserted blocks can result in error since the older blocks can not be retrieved for checking
+ if _, err := chain.InsertChain(bs[399:], nil); err != nil {
panic(err)
}
@@ -2321,15 +2325,9 @@ func testIsPeriodBlock(t *testing.T, scheme string) {
}
}
-/*
-Got issues related to parent layer missing in the test
-panic: triedb parent [0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421] layer missing [recovered]
-panic: triedb parent [0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421] layer missing
-Will disable this test firstly for further investigation.
-*/
func TestIsTrippEffective(t *testing.T) {
testIsTrippEffective(t, rawdb.HashScheme)
- // testIsTrippEffective(t, rawdb.PathScheme)
+ testIsTrippEffective(t, rawdb.PathScheme)
}
@@ -2416,7 +2414,11 @@ func testIsTrippEffective(t *testing.T, scheme string) {
block, _ := core.GenerateChain(&chainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, callback, true)
bs = append(bs, block...)
}
- if _, err := chain.InsertChain(bs[:], nil); err != nil {
+ // Only the new blocks are inserted here
+ // For path scheme, the number of db diff layers corresponding to blocks are limited to 128
+ // So just the newest 128 blocks can be retrieved from the db
+ // Therefore, the handling of the inserted blocks can result in error since the older blocks can not be retrieved for checking
+ if _, err := chain.InsertChain(bs[399:], nil); err != nil {
panic(err)
}