From afaac28da1d799fd9f1740a1456d3cf9335e1ac7 Mon Sep 17 00:00:00 2001 From: Ori Newman Date: Mon, 13 Sep 2021 14:22:00 +0300 Subject: [PATCH] Validate each level parents (#1827) * Create BlockParentBuilder. * Implement BuildParents. * Explictly set level 0 blocks to be the same as direct parents. * Add checkIndirectParents to validateBlockHeaderInContext. * Fix test_block_builder.go and BlockLevelParents::Equal. * Don't check indirect parents for blocks with trusted data. * Handle pruned blocks when building block level parents. * Fix bad deletions from unprocessedXxxParents. * Fix merge errors. * Fix bad pruning point parent replaces. * Fix duplicates in newBlockLevelParents. * Skip checkIndirectParents * Get rid of staging constant IDs * Fix BuildParents * Fix tests * Add comments * Change order of directParentHashes * Get rid of maybeAddDirectParentParents * Add comments * Add blockToReferences type * Use ParentsAtLevel Co-authored-by: stasatdaglabs --- .../acceptance_data_staging_shard.go | 2 +- .../acceptance_data_store.go | 15 +- .../block_header_staging_shard.go | 2 +- .../blockheaderstore/block_header_store.go | 11 +- .../block_relation_staging_shard.go | 2 +- .../block_relation_store.go | 15 +- .../block_status_staging_shard.go | 2 +- .../blockstatusstore/block_status_store.go | 15 +- .../blockstore/block_staging_shard.go | 2 +- .../datastructures/blockstore/block_store.go | 11 +- .../consensus_state_staging_shard.go | 2 +- .../consensus_state_store.go | 13 +- .../daa_blocks_staging_shard.go | 2 +- .../daablocksstore/daa_blocks_store.go | 11 +- .../daa_window_staging_shard.go | 2 +- .../daawindowstore/daa_window_store.go | 15 +- .../finalitystore/finality_staging_shard.go | 2 +- .../finalitystore/finality_store.go | 11 +- .../ghostadag_data_staging_shard.go | 2 +- .../ghostdagdatastore/ghostdag_data_store.go | 11 +- .../headers_selected_chain_staging_shard.go | 2 +- .../headers_selected_chain_store.go | 12 +- .../headers_selected_tip_staging_shard.go | 2 +- .../headers_selected_tips_store.go | 13 +- .../multisetstore/multiset_staging_shard.go | 2 +- .../multisetstore/multiset_store.go | 15 +- .../pruningstore/pruning_staging_shard.go | 2 +- .../pruningstore/pruning_store.go | 20 +- .../reachability_data_staging_shard.go | 2 +- .../reachability_data_store.go | 11 +- .../utxodiffstore/utxo_diff_staging_shard.go | 2 +- .../utxodiffstore/utxo_diff_store.go | 11 +- domain/consensus/factory.go | 43 ++-- domain/consensus/model/externalapi/block.go | 1 + .../model/externalapi/blocklevelparents.go | 27 ++- .../interface_processes_blockparentbuilder.go | 9 + domain/consensus/model/staging_area.go | 33 +-- .../processes/blockbuilder/block_builder.go | 5 +- .../blockbuilder/test_block_builder.go | 12 +- .../blockparentbuilder/blockparentbuilder.go | 219 ++++++++++++++++++ ..._and_insert_imported_pruning_point_test.go | 5 +- .../blockvalidator/block_header_in_context.go | 20 ++ .../blockvalidator/blockvalidator.go | 3 + ..._violation_proof_of_work_and_difficulty.go | 2 +- .../dagtraversalmanager/window_test.go | 46 ++-- .../processes/pruningmanager/pruning_test.go | 4 +- domain/consensus/ruleerrors/rule_error.go | 2 + .../utils/blockheader/blockheader.go | 11 +- domain/consensus/utils/pow/pow.go | 5 +- util/staging/commit_all_changes.go | 8 + 50 files changed, 504 insertions(+), 193 deletions(-) create mode 100644 domain/consensus/model/interface_processes_blockparentbuilder.go create mode 100644 domain/consensus/processes/blockparentbuilder/blockparentbuilder.go diff --git a/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go index 9381ddb00b..1f767d287c 100644 --- a/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go +++ b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_staging_shard.go @@ -12,7 +12,7 @@ type acceptanceDataStagingShard struct { } func (ads *acceptanceDataStore) stagingShard(stagingArea *model.StagingArea) *acceptanceDataStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDAcceptanceData, func() model.StagingShard { + return stagingArea.GetOrCreateShard(ads.shardID, func() model.StagingShard { return &acceptanceDataStagingShard{ store: ads, toAdd: make(map[externalapi.DomainHash]externalapi.AcceptanceData), diff --git a/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go index 2fb8c0ad7e..ef9240ff56 100644 --- a/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go +++ b/domain/consensus/datastructures/acceptancedatastore/acceptance_data_store.go @@ -1,12 +1,11 @@ package acceptancedatastore import ( - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" "google.golang.org/protobuf/proto" ) @@ -14,15 +13,17 @@ var bucketName = []byte("acceptance-data") // acceptanceDataStore represents a store of AcceptanceData type acceptanceDataStore struct { - cache *lrucache.LRUCache - bucket model.DBBucket + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket } // New instantiates a new AcceptanceDataStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.AcceptanceDataStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.AcceptanceDataStore { return &acceptanceDataStore{ - cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), } } diff --git a/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go b/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go index 0ecccafd87..bcfc6f8d45 100644 --- a/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go +++ b/domain/consensus/datastructures/blockheaderstore/block_header_staging_shard.go @@ -12,7 +12,7 @@ type blockHeaderStagingShard struct { } func (bhs *blockHeaderStore) stagingShard(stagingArea *model.StagingArea) *blockHeaderStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDBlockHeader, func() model.StagingShard { + return stagingArea.GetOrCreateShard(bhs.shardID, func() model.StagingShard { return &blockHeaderStagingShard{ store: bhs, toAdd: make(map[externalapi.DomainHash]externalapi.BlockHeader), diff --git a/domain/consensus/datastructures/blockheaderstore/block_header_store.go b/domain/consensus/datastructures/blockheaderstore/block_header_store.go index 92776842cf..beedfb1478 100644 --- a/domain/consensus/datastructures/blockheaderstore/block_header_store.go +++ b/domain/consensus/datastructures/blockheaderstore/block_header_store.go @@ -2,12 +2,11 @@ package blockheaderstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var bucketName = []byte("block-headers") @@ -15,6 +14,7 @@ var countKeyName = []byte("block-headers-count") // blockHeaderStore represents a store of blocks type blockHeaderStore struct { + shardID model.StagingShardID cache *lrucache.LRUCache countCached uint64 bucket model.DBBucket @@ -22,11 +22,12 @@ type blockHeaderStore struct { } // New instantiates a new BlockHeaderStore -func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) { +func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockHeaderStore, error) { blockHeaderStore := &blockHeaderStore{ + shardID: staging.GenerateShardingID(), cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), - countKey: database.MakeBucket(prefix.Serialize()).Key(countKeyName), + bucket: prefixBucket.Bucket(bucketName), + countKey: prefixBucket.Key(countKeyName), } err := blockHeaderStore.initializeCount(dbContext) diff --git a/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go b/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go index 63b06b39a8..280d9f751d 100644 --- a/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go +++ b/domain/consensus/datastructures/blockrelationstore/block_relation_staging_shard.go @@ -11,7 +11,7 @@ type blockRelationStagingShard struct { } func (brs *blockRelationStore) stagingShard(stagingArea *model.StagingArea) *blockRelationStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDBlockRelation, func() model.StagingShard { + return stagingArea.GetOrCreateShard(brs.shardID, func() model.StagingShard { return &blockRelationStagingShard{ store: brs, toAdd: make(map[externalapi.DomainHash]*model.BlockRelations), diff --git a/domain/consensus/datastructures/blockrelationstore/block_relation_store.go b/domain/consensus/datastructures/blockrelationstore/block_relation_store.go index 1027118892..74e11628d8 100644 --- a/domain/consensus/datastructures/blockrelationstore/block_relation_store.go +++ b/domain/consensus/datastructures/blockrelationstore/block_relation_store.go @@ -2,27 +2,28 @@ package blockrelationstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var bucketName = []byte("block-relations") // blockRelationStore represents a store of BlockRelations type blockRelationStore struct { - cache *lrucache.LRUCache - bucket model.DBBucket + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket } // New instantiates a new BlockRelationStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockRelationStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockRelationStore { return &blockRelationStore{ - cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), } } diff --git a/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go b/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go index 9ccbace774..0d403208f7 100644 --- a/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go +++ b/domain/consensus/datastructures/blockstatusstore/block_status_staging_shard.go @@ -11,7 +11,7 @@ type blockStatusStagingShard struct { } func (bss *blockStatusStore) stagingShard(stagingArea *model.StagingArea) *blockStatusStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDBlockStatus, func() model.StagingShard { + return stagingArea.GetOrCreateShard(bss.shardID, func() model.StagingShard { return &blockStatusStagingShard{ store: bss, toAdd: make(map[externalapi.DomainHash]externalapi.BlockStatus), diff --git a/domain/consensus/datastructures/blockstatusstore/block_status_store.go b/domain/consensus/datastructures/blockstatusstore/block_status_store.go index 7d800292b3..76bf51e307 100644 --- a/domain/consensus/datastructures/blockstatusstore/block_status_store.go +++ b/domain/consensus/datastructures/blockstatusstore/block_status_store.go @@ -2,27 +2,28 @@ package blockstatusstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var bucketName = []byte("block-statuses") // blockStatusStore represents a store of BlockStatuses type blockStatusStore struct { - cache *lrucache.LRUCache - bucket model.DBBucket + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket } // New instantiates a new BlockStatusStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlockStatusStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlockStatusStore { return &blockStatusStore{ - cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), } } diff --git a/domain/consensus/datastructures/blockstore/block_staging_shard.go b/domain/consensus/datastructures/blockstore/block_staging_shard.go index e26ce112d3..12596abaf2 100644 --- a/domain/consensus/datastructures/blockstore/block_staging_shard.go +++ b/domain/consensus/datastructures/blockstore/block_staging_shard.go @@ -12,7 +12,7 @@ type blockStagingShard struct { } func (bs *blockStore) stagingShard(stagingArea *model.StagingArea) *blockStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDBlock, func() model.StagingShard { + return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard { return &blockStagingShard{ store: bs, toAdd: make(map[externalapi.DomainHash]*externalapi.DomainBlock), diff --git a/domain/consensus/datastructures/blockstore/block_store.go b/domain/consensus/datastructures/blockstore/block_store.go index 791c3f46b0..aa3adfc02e 100644 --- a/domain/consensus/datastructures/blockstore/block_store.go +++ b/domain/consensus/datastructures/blockstore/block_store.go @@ -2,12 +2,11 @@ package blockstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" "github.com/pkg/errors" ) @@ -15,6 +14,7 @@ var bucketName = []byte("blocks") // blockStore represents a store of blocks type blockStore struct { + shardID model.StagingShardID cache *lrucache.LRUCache countCached uint64 bucket model.DBBucket @@ -22,11 +22,12 @@ type blockStore struct { } // New instantiates a new BlockStore -func New(dbContext model.DBReader, prefix *prefix.Prefix, cacheSize int, preallocate bool) (model.BlockStore, error) { +func New(dbContext model.DBReader, prefixBucket model.DBBucket, cacheSize int, preallocate bool) (model.BlockStore, error) { blockStore := &blockStore{ + shardID: staging.GenerateShardingID(), cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), - countKey: database.MakeBucket(prefix.Serialize()).Key([]byte("blocks-count")), + bucket: prefixBucket.Bucket(bucketName), + countKey: prefixBucket.Key([]byte("blocks-count")), } err := blockStore.initializeCount(dbContext) diff --git a/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go b/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go index 31dee494fc..8d0803b072 100644 --- a/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go +++ b/domain/consensus/datastructures/consensusstatestore/consensus_state_staging_shard.go @@ -12,7 +12,7 @@ type consensusStateStagingShard struct { } func (bs *consensusStateStore) stagingShard(stagingArea *model.StagingArea) *consensusStateStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDConsensusState, func() model.StagingShard { + return stagingArea.GetOrCreateShard(bs.shardID, func() model.StagingShard { return &consensusStateStagingShard{ store: bs, tipsStaging: nil, diff --git a/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go b/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go index d1ea4a466f..f1ad909ffe 100644 --- a/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go +++ b/domain/consensus/datastructures/consensusstatestore/consensus_state_store.go @@ -1,17 +1,17 @@ package consensusstatestore import ( - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/utxolrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var importingPruningPointUTXOSetKeyName = []byte("importing-pruning-point-utxo-set") // consensusStateStore represents a store for the current consensus state type consensusStateStore struct { + shardID model.StagingShardID virtualUTXOSetCache *utxolrucache.LRUCache tipsCache []*externalapi.DomainHash tipsKey model.DBKey @@ -20,12 +20,13 @@ type consensusStateStore struct { } // New instantiates a new ConsensusStateStore -func New(prefix *prefix.Prefix, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore { +func New(prefixBucket model.DBBucket, utxoSetCacheSize int, preallocate bool) model.ConsensusStateStore { return &consensusStateStore{ + shardID: staging.GenerateShardingID(), virtualUTXOSetCache: utxolrucache.New(utxoSetCacheSize, preallocate), - tipsKey: database.MakeBucket(prefix.Serialize()).Key(tipsKeyName), - importingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(importingPruningPointUTXOSetKeyName), - utxoSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoSetBucketName), + tipsKey: prefixBucket.Key(tipsKeyName), + importingPruningPointUTXOSetKey: prefixBucket.Key(importingPruningPointUTXOSetKeyName), + utxoSetBucket: prefixBucket.Bucket(utxoSetBucketName), } } diff --git a/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go b/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go index 26f4d1374a..e305056837 100644 --- a/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go +++ b/domain/consensus/datastructures/daablocksstore/daa_blocks_staging_shard.go @@ -15,7 +15,7 @@ type daaBlocksStagingShard struct { } func (daas *daaBlocksStore) stagingShard(stagingArea *model.StagingArea) *daaBlocksStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDDAABlocks, func() model.StagingShard { + return stagingArea.GetOrCreateShard(daas.shardID, func() model.StagingShard { return &daaBlocksStagingShard{ store: daas, daaScoreToAdd: make(map[externalapi.DomainHash]uint64), diff --git a/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go b/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go index 4e2c8c6aa1..2bb36e9cf1 100644 --- a/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go +++ b/domain/consensus/datastructures/daablocksstore/daa_blocks_store.go @@ -1,12 +1,11 @@ package daablocksstore import ( - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var daaScoreBucketName = []byte("daa-score") @@ -14,6 +13,7 @@ var daaAddedBlocksBucketName = []byte("daa-added-blocks") // daaBlocksStore represents a store of DAABlocksStore type daaBlocksStore struct { + shardID model.StagingShardID daaScoreLRUCache *lrucache.LRUCache daaAddedBlocksLRUCache *lrucache.LRUCache daaScoreBucket model.DBBucket @@ -21,12 +21,13 @@ type daaBlocksStore struct { } // New instantiates a new DAABlocksStore -func New(prefix *prefix.Prefix, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore { +func New(prefixBucket model.DBBucket, daaScoreCacheSize int, daaAddedBlocksCacheSize int, preallocate bool) model.DAABlocksStore { return &daaBlocksStore{ + shardID: staging.GenerateShardingID(), daaScoreLRUCache: lrucache.New(daaScoreCacheSize, preallocate), daaAddedBlocksLRUCache: lrucache.New(daaAddedBlocksCacheSize, preallocate), - daaScoreBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaScoreBucketName), - daaAddedBlocksBucket: database.MakeBucket(prefix.Serialize()).Bucket(daaAddedBlocksBucketName), + daaScoreBucket: prefixBucket.Bucket(daaScoreBucketName), + daaAddedBlocksBucket: prefixBucket.Bucket(daaAddedBlocksBucketName), } } diff --git a/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go b/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go index 5192446e4d..2f15248d65 100644 --- a/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go +++ b/domain/consensus/datastructures/daawindowstore/daa_window_staging_shard.go @@ -25,7 +25,7 @@ type daaWindowStagingShard struct { } func (daaws *daaWindowStore) stagingShard(stagingArea *model.StagingArea) *daaWindowStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDDAAWindow, func() model.StagingShard { + return stagingArea.GetOrCreateShard(daaws.shardID, func() model.StagingShard { return &daaWindowStagingShard{ store: daaws, toAdd: make(map[dbKey]*externalapi.BlockGHOSTDAGDataHashPair), diff --git a/domain/consensus/datastructures/daawindowstore/daa_window_store.go b/domain/consensus/datastructures/daawindowstore/daa_window_store.go index 4c305b9fe8..a12b71db08 100644 --- a/domain/consensus/datastructures/daawindowstore/daa_window_store.go +++ b/domain/consensus/datastructures/daawindowstore/daa_window_store.go @@ -3,26 +3,27 @@ package daawindowstore import ( "encoding/binary" "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucachehashpairtoblockghostdagdatahashpair" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var bucketName = []byte("daa-window") type daaWindowStore struct { - cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache - bucket model.DBBucket + shardID model.StagingShardID + cache *lrucachehashpairtoblockghostdagdatahashpair.LRUCache + bucket model.DBBucket } // New instantiates a new BlocksWithTrustedDataDAAWindowStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.BlocksWithTrustedDataDAAWindowStore { return &daaWindowStore{ - cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), + shardID: staging.GenerateShardingID(), + cache: lrucachehashpairtoblockghostdagdatahashpair.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), } } diff --git a/domain/consensus/datastructures/finalitystore/finality_staging_shard.go b/domain/consensus/datastructures/finalitystore/finality_staging_shard.go index 5763ecc816..70d3124602 100644 --- a/domain/consensus/datastructures/finalitystore/finality_staging_shard.go +++ b/domain/consensus/datastructures/finalitystore/finality_staging_shard.go @@ -11,7 +11,7 @@ type finalityStagingShard struct { } func (fs *finalityStore) stagingShard(stagingArea *model.StagingArea) *finalityStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDFinality, func() model.StagingShard { + return stagingArea.GetOrCreateShard(fs.shardID, func() model.StagingShard { return &finalityStagingShard{ store: fs, toAdd: make(map[externalapi.DomainHash]*externalapi.DomainHash), diff --git a/domain/consensus/datastructures/finalitystore/finality_store.go b/domain/consensus/datastructures/finalitystore/finality_store.go index 4821f7bd73..85da7aa36a 100644 --- a/domain/consensus/datastructures/finalitystore/finality_store.go +++ b/domain/consensus/datastructures/finalitystore/finality_store.go @@ -1,25 +1,24 @@ package finalitystore import ( - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" ) var bucketName = []byte("finality-points") type finalityStore struct { - cache *lrucache.LRUCache - bucket model.DBBucket + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket } // New instantiates a new FinalityStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.FinalityStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.FinalityStore { return &finalityStore{ cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), + bucket: prefixBucket.Bucket(bucketName), } } diff --git a/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go b/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go index f5030f0d8c..d553287a02 100644 --- a/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go +++ b/domain/consensus/datastructures/ghostdagdatastore/ghostadag_data_staging_shard.go @@ -23,7 +23,7 @@ type ghostdagDataStagingShard struct { } func (gds *ghostdagDataStore) stagingShard(stagingArea *model.StagingArea) *ghostdagDataStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDGHOSTDAG, func() model.StagingShard { + return stagingArea.GetOrCreateShard(gds.shardID, func() model.StagingShard { return &ghostdagDataStagingShard{ store: gds, toAdd: make(map[key]*externalapi.BlockGHOSTDAGData), diff --git a/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go b/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go index a8b76358f0..c2d1c563a4 100644 --- a/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go +++ b/domain/consensus/datastructures/ghostdagdatastore/ghostdag_data_store.go @@ -2,12 +2,11 @@ package ghostdagdatastore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheghostdagdata" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var ghostdagDataBucketName = []byte("block-ghostdag-data") @@ -15,17 +14,19 @@ var trustedDataBucketName = []byte("block-with-trusted-data-ghostdag-data") // ghostdagDataStore represents a store of BlockGHOSTDAGData type ghostdagDataStore struct { + shardID model.StagingShardID cache *lrucacheghostdagdata.LRUCache ghostdagDataBucket model.DBBucket trustedDataBucket model.DBBucket } // New instantiates a new GHOSTDAGDataStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.GHOSTDAGDataStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.GHOSTDAGDataStore { return &ghostdagDataStore{ + shardID: staging.GenerateShardingID(), cache: lrucacheghostdagdata.New(cacheSize, preallocate), - ghostdagDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(ghostdagDataBucketName), - trustedDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(trustedDataBucketName), + ghostdagDataBucket: prefixBucket.Bucket(ghostdagDataBucketName), + trustedDataBucket: prefixBucket.Bucket(trustedDataBucketName), } } diff --git a/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go index 295bc5175e..3bf26f0e0f 100644 --- a/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go +++ b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_staging_shard.go @@ -15,7 +15,7 @@ type headersSelectedChainStagingShard struct { } func (hscs *headersSelectedChainStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedChainStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedChain, func() model.StagingShard { + return stagingArea.GetOrCreateShard(hscs.shardID, func() model.StagingShard { return &headersSelectedChainStagingShard{ store: hscs, addedByHash: make(map[externalapi.DomainHash]uint64), diff --git a/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go index 1d38ca4ac7..6048a83848 100644 --- a/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go +++ b/domain/consensus/datastructures/headersselectedchainstore/headers_selected_chain_store.go @@ -2,7 +2,7 @@ package headersselectedchainstore import ( "encoding/binary" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization" @@ -18,6 +18,7 @@ var bucketChainBlockIndexByHashName = []byte("chain-block-index-by-hash") var highestChainBlockIndexKeyName = []byte("highest-chain-block-index") type headersSelectedChainStore struct { + shardID model.StagingShardID cacheByIndex *lrucacheuint64tohash.LRUCache cacheByHash *lrucache.LRUCache cacheHighestChainBlockIndex uint64 @@ -27,13 +28,14 @@ type headersSelectedChainStore struct { } // New instantiates a new HeadersSelectedChainStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.HeadersSelectedChainStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.HeadersSelectedChainStore { return &headersSelectedChainStore{ + shardID: staging.GenerateShardingID(), cacheByIndex: lrucacheuint64tohash.New(cacheSize, preallocate), cacheByHash: lrucache.New(cacheSize, preallocate), - bucketChainBlockHashByIndex: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockHashByIndexName), - bucketChainBlockIndexByHash: database.MakeBucket(prefix.Serialize()).Bucket(bucketChainBlockIndexByHashName), - highestChainBlockIndexKey: database.MakeBucket(prefix.Serialize()).Key(highestChainBlockIndexKeyName), + bucketChainBlockHashByIndex: prefixBucket.Bucket(bucketChainBlockHashByIndexName), + bucketChainBlockIndexByHash: prefixBucket.Bucket(bucketChainBlockIndexByHashName), + highestChainBlockIndexKey: prefixBucket.Key(highestChainBlockIndexKeyName), } } diff --git a/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go index 4372cdd909..245c5bd8d7 100644 --- a/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go +++ b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tip_staging_shard.go @@ -11,7 +11,7 @@ type headersSelectedTipStagingShard struct { } func (hsts *headerSelectedTipStore) stagingShard(stagingArea *model.StagingArea) *headersSelectedTipStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDHeadersSelectedTip, func() model.StagingShard { + return stagingArea.GetOrCreateShard(hsts.shardID, func() model.StagingShard { return &headersSelectedTipStagingShard{ store: hsts, newSelectedTip: nil, diff --git a/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go index 04128b38e3..41cd6e321d 100644 --- a/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go +++ b/domain/consensus/datastructures/headersselectedtipstore/headers_selected_tips_store.go @@ -2,24 +2,25 @@ package headersselectedtipstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var keyName = []byte("headers-selected-tip") type headerSelectedTipStore struct { - cache *externalapi.DomainHash - key model.DBKey + shardID model.StagingShardID + cache *externalapi.DomainHash + key model.DBKey } // New instantiates a new HeaderSelectedTipStore -func New(prefix *prefix.Prefix) model.HeaderSelectedTipStore { +func New(prefixBucket model.DBBucket) model.HeaderSelectedTipStore { return &headerSelectedTipStore{ - key: database.MakeBucket(prefix.Serialize()).Key(keyName), + shardID: staging.GenerateShardingID(), + key: prefixBucket.Key(keyName), } } diff --git a/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go b/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go index d4cfaf11ba..f7a6c45639 100644 --- a/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go +++ b/domain/consensus/datastructures/multisetstore/multiset_staging_shard.go @@ -12,7 +12,7 @@ type multisetStagingShard struct { } func (ms *multisetStore) stagingShard(stagingArea *model.StagingArea) *multisetStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDMultiset, func() model.StagingShard { + return stagingArea.GetOrCreateShard(ms.shardID, func() model.StagingShard { return &multisetStagingShard{ store: ms, toAdd: make(map[externalapi.DomainHash]model.Multiset), diff --git a/domain/consensus/datastructures/multisetstore/multiset_store.go b/domain/consensus/datastructures/multisetstore/multiset_store.go index 314157be7f..8d228caec2 100644 --- a/domain/consensus/datastructures/multisetstore/multiset_store.go +++ b/domain/consensus/datastructures/multisetstore/multiset_store.go @@ -2,27 +2,28 @@ package multisetstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var bucketName = []byte("multisets") // multisetStore represents a store of Multisets type multisetStore struct { - cache *lrucache.LRUCache - bucket model.DBBucket + shardID model.StagingShardID + cache *lrucache.LRUCache + bucket model.DBBucket } // New instantiates a new MultisetStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.MultisetStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.MultisetStore { return &multisetStore{ - cache: lrucache.New(cacheSize, preallocate), - bucket: database.MakeBucket(prefix.Serialize()).Bucket(bucketName), + shardID: staging.GenerateShardingID(), + cache: lrucache.New(cacheSize, preallocate), + bucket: prefixBucket.Bucket(bucketName), } } diff --git a/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go b/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go index baff5cb033..1298f2f448 100644 --- a/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go +++ b/domain/consensus/datastructures/pruningstore/pruning_staging_shard.go @@ -15,7 +15,7 @@ type pruningStagingShard struct { } func (ps *pruningStore) stagingShard(stagingArea *model.StagingArea) *pruningStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDPruning, func() model.StagingShard { + return stagingArea.GetOrCreateShard(ps.shardID, func() model.StagingShard { return &pruningStagingShard{ store: ps, pruningPointByIndex: map[uint64]*externalapi.DomainHash{}, diff --git a/domain/consensus/datastructures/pruningstore/pruning_store.go b/domain/consensus/datastructures/pruningstore/pruning_store.go index 0d5b0cf112..fcd8bb1a52 100644 --- a/domain/consensus/datastructures/pruningstore/pruning_store.go +++ b/domain/consensus/datastructures/pruningstore/pruning_store.go @@ -9,7 +9,7 @@ import ( "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var currentPruningPointIndexKeyName = []byte("pruning-block-index") @@ -20,6 +20,7 @@ var pruningPointByIndexBucketName = []byte("pruning-point-by-index") // pruningStore represents a store for the current pruning state type pruningStore struct { + shardID model.StagingShardID pruningPointByIndexCache *lrucacheuint64tohash.LRUCache currentPruningPointIndexCache *uint64 pruningPointCandidateCache *externalapi.DomainHash @@ -34,16 +35,17 @@ type pruningStore struct { } // New instantiates a new PruningStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.PruningStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.PruningStore { return &pruningStore{ + shardID: staging.GenerateShardingID(), pruningPointByIndexCache: lrucacheuint64tohash.New(cacheSize, preallocate), - currentPruningPointIndexKey: database.MakeBucket(prefix.Serialize()).Key(currentPruningPointIndexKeyName), - candidatePruningPointHashKey: database.MakeBucket(prefix.Serialize()).Key(candidatePruningPointHashKeyName), - pruningPointUTXOSetBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointUTXOSetBucketName), - importedPruningPointUTXOsBucket: database.MakeBucket(prefix.Serialize()).Bucket(importedPruningPointUTXOsBucketName), - updatingPruningPointUTXOSetKey: database.MakeBucket(prefix.Serialize()).Key(updatingPruningPointUTXOSetKeyName), - importedPruningPointMultisetKey: database.MakeBucket(prefix.Serialize()).Key(importedPruningPointMultisetKeyName), - pruningPointByIndexBucket: database.MakeBucket(prefix.Serialize()).Bucket(pruningPointByIndexBucketName), + currentPruningPointIndexKey: prefixBucket.Key(currentPruningPointIndexKeyName), + candidatePruningPointHashKey: prefixBucket.Key(candidatePruningPointHashKeyName), + pruningPointUTXOSetBucket: prefixBucket.Bucket(pruningPointUTXOSetBucketName), + importedPruningPointUTXOsBucket: prefixBucket.Bucket(importedPruningPointUTXOsBucketName), + updatingPruningPointUTXOSetKey: prefixBucket.Key(updatingPruningPointUTXOSetKeyName), + importedPruningPointMultisetKey: prefixBucket.Key(importedPruningPointMultisetKeyName), + pruningPointByIndexBucket: prefixBucket.Bucket(pruningPointByIndexBucketName), } } diff --git a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go index 04779b19ee..864a0cd502 100644 --- a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go +++ b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_staging_shard.go @@ -12,7 +12,7 @@ type reachabilityDataStagingShard struct { } func (rds *reachabilityDataStore) stagingShard(stagingArea *model.StagingArea) *reachabilityDataStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDReachabilityData, func() model.StagingShard { + return stagingArea.GetOrCreateShard(rds.shardID, func() model.StagingShard { return &reachabilityDataStagingShard{ store: rds, reachabilityData: make(map[externalapi.DomainHash]model.ReachabilityData), diff --git a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go index 0fecaa1359..31f901f642 100644 --- a/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go +++ b/domain/consensus/datastructures/reachabilitydatastore/reachability_data_store.go @@ -2,12 +2,11 @@ package reachabilitydatastore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" ) var reachabilityDataBucketName = []byte("reachability-data") @@ -15,6 +14,7 @@ var reachabilityReindexRootKeyName = []byte("reachability-reindex-root") // reachabilityDataStore represents a store of ReachabilityData type reachabilityDataStore struct { + shardID model.StagingShardID reachabilityDataCache *lrucache.LRUCache reachabilityReindexRootCache *externalapi.DomainHash @@ -23,11 +23,12 @@ type reachabilityDataStore struct { } // New instantiates a new ReachabilityDataStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.ReachabilityDataStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.ReachabilityDataStore { return &reachabilityDataStore{ + shardID: staging.GenerateShardingID(), reachabilityDataCache: lrucache.New(cacheSize, preallocate), - reachabilityDataBucket: database.MakeBucket(prefix.Serialize()).Bucket(reachabilityDataBucketName), - reachabilityReindexRootKey: database.MakeBucket(prefix.Serialize()).Key(reachabilityReindexRootKeyName), + reachabilityDataBucket: prefixBucket.Bucket(reachabilityDataBucketName), + reachabilityReindexRootKey: prefixBucket.Key(reachabilityReindexRootKeyName), } } diff --git a/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go b/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go index 20c481281a..1d8dd0b48e 100644 --- a/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go +++ b/domain/consensus/datastructures/utxodiffstore/utxo_diff_staging_shard.go @@ -13,7 +13,7 @@ type utxoDiffStagingShard struct { } func (uds *utxoDiffStore) stagingShard(stagingArea *model.StagingArea) *utxoDiffStagingShard { - return stagingArea.GetOrCreateShard(model.StagingShardIDUTXODiff, func() model.StagingShard { + return stagingArea.GetOrCreateShard(uds.shardID, func() model.StagingShard { return &utxoDiffStagingShard{ store: uds, utxoDiffToAdd: make(map[externalapi.DomainHash]externalapi.UTXODiff), diff --git a/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go b/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go index 8f8f31f24b..b91eb8edbf 100644 --- a/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go +++ b/domain/consensus/datastructures/utxodiffstore/utxo_diff_store.go @@ -2,12 +2,11 @@ package utxodiffstore import ( "github.com/golang/protobuf/proto" - "github.com/kaspanet/kaspad/domain/consensus/database" "github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" - "github.com/kaspanet/kaspad/domain/prefixmanager/prefix" + "github.com/kaspanet/kaspad/util/staging" "github.com/pkg/errors" ) @@ -16,6 +15,7 @@ var utxoDiffChildBucketName = []byte("utxo-diff-children") // utxoDiffStore represents a store of UTXODiffs type utxoDiffStore struct { + shardID model.StagingShardID utxoDiffCache *lrucache.LRUCache utxoDiffChildCache *lrucache.LRUCache utxoDiffBucket model.DBBucket @@ -23,12 +23,13 @@ type utxoDiffStore struct { } // New instantiates a new UTXODiffStore -func New(prefix *prefix.Prefix, cacheSize int, preallocate bool) model.UTXODiffStore { +func New(prefixBucket model.DBBucket, cacheSize int, preallocate bool) model.UTXODiffStore { return &utxoDiffStore{ + shardID: staging.GenerateShardingID(), utxoDiffCache: lrucache.New(cacheSize, preallocate), utxoDiffChildCache: lrucache.New(cacheSize, preallocate), - utxoDiffBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffBucketName), - utxoDiffChildBucket: database.MakeBucket(prefix.Serialize()).Bucket(utxoDiffChildBucketName), + utxoDiffBucket: prefixBucket.Bucket(utxoDiffBucketName), + utxoDiffChildBucket: prefixBucket.Bucket(utxoDiffChildBucketName), } } diff --git a/domain/consensus/factory.go b/domain/consensus/factory.go index d0a8dbf584..c79dd2b929 100644 --- a/domain/consensus/factory.go +++ b/domain/consensus/factory.go @@ -2,6 +2,7 @@ package consensus import ( "github.com/kaspanet/kaspad/domain/consensus/datastructures/daawindowstore" + "github.com/kaspanet/kaspad/domain/consensus/processes/blockparentbuilder" "io/ioutil" "os" "sync" @@ -103,6 +104,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas externalapi.Consensus, error) { dbManager := consensusdatabase.New(db) + prefixBucket := consensusdatabase.MakeBucket(dbPrefix.Serialize()) pruningWindowSizeForCaches := int(config.PruningDepth()) @@ -118,24 +120,24 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas pruningWindowSizePlusFinalityDepthForCache := int(config.PruningDepth() + config.FinalityDepth()) // Data Structures - daaWindowStore := daawindowstore.New(dbPrefix, 10_000, preallocateCaches) - acceptanceDataStore := acceptancedatastore.New(dbPrefix, 200, preallocateCaches) - blockStore, err := blockstore.New(dbManager, dbPrefix, 200, preallocateCaches) + daaWindowStore := daawindowstore.New(prefixBucket, 10_000, preallocateCaches) + acceptanceDataStore := acceptancedatastore.New(prefixBucket, 200, preallocateCaches) + blockStore, err := blockstore.New(dbManager, prefixBucket, 200, preallocateCaches) if err != nil { return nil, err } - blockHeaderStore, err := blockheaderstore.New(dbManager, dbPrefix, 10_000, preallocateCaches) + blockHeaderStore, err := blockheaderstore.New(dbManager, prefixBucket, 10_000, preallocateCaches) if err != nil { return nil, err } - blockRelationStore := blockrelationstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) + blockRelationStore := blockrelationstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) - blockStatusStore := blockstatusstore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) - multisetStore := multisetstore.New(dbPrefix, 200, preallocateCaches) - pruningStore := pruningstore.New(dbPrefix, 2, preallocateCaches) - reachabilityDataStore := reachabilitydatastore.New(dbPrefix, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) - utxoDiffStore := utxodiffstore.New(dbPrefix, 200, preallocateCaches) - consensusStateStore := consensusstatestore.New(dbPrefix, 10_000, preallocateCaches) + blockStatusStore := blockstatusstore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) + multisetStore := multisetstore.New(prefixBucket, 200, preallocateCaches) + pruningStore := pruningstore.New(prefixBucket, 2, preallocateCaches) + reachabilityDataStore := reachabilitydatastore.New(prefixBucket, pruningWindowSizePlusFinalityDepthForCache, preallocateCaches) + utxoDiffStore := utxodiffstore.New(prefixBucket, 200, preallocateCaches) + consensusStateStore := consensusstatestore.New(prefixBucket, 10_000, preallocateCaches) // Some tests artificially decrease the pruningWindowSize, thus making the GhostDagStore cache too small for a // a single DifficultyAdjustmentWindow. To alleviate this problem we make sure that the cache size is at least @@ -144,12 +146,12 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas if ghostdagDataCacheSize < config.DifficultyAdjustmentWindowSize { ghostdagDataCacheSize = config.DifficultyAdjustmentWindowSize } - ghostdagDataStore := ghostdagdatastore.New(dbPrefix, ghostdagDataCacheSize, preallocateCaches) + ghostdagDataStore := ghostdagdatastore.New(prefixBucket, ghostdagDataCacheSize, preallocateCaches) - headersSelectedTipStore := headersselectedtipstore.New(dbPrefix) - finalityStore := finalitystore.New(dbPrefix, 200, preallocateCaches) - headersSelectedChainStore := headersselectedchainstore.New(dbPrefix, pruningWindowSizeForCaches, preallocateCaches) - daaBlocksStore := daablocksstore.New(dbPrefix, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches) + headersSelectedTipStore := headersselectedtipstore.New(prefixBucket) + finalityStore := finalitystore.New(prefixBucket, 200, preallocateCaches) + headersSelectedChainStore := headersselectedchainstore.New(prefixBucket, pruningWindowSizeForCaches, preallocateCaches) + daaBlocksStore := daablocksstore.New(prefixBucket, pruningWindowSizeForCaches, int(config.FinalityDepth()), preallocateCaches) // Processes reachabilityManager := reachabilitymanager.New( @@ -161,6 +163,13 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas reachabilityManager, blockRelationStore, ghostdagDataStore) + blockParentBuilder := blockparentbuilder.New( + dbManager, + blockHeaderStore, + dagTopologyManager, + reachabilityDataStore, + pruningStore, + ) ghostdagManager := f.ghostdagConstructor( dbManager, dagTopologyManager, @@ -316,6 +325,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas mergeDepthManager, reachabilityManager, finalityManager, + blockParentBuilder, pruningManager, pruningStore, @@ -355,6 +365,7 @@ func (f *factory) NewConsensus(config *Config, db infrastructuredatabase.Databas ghostdagManager, transactionValidator, finalityManager, + blockParentBuilder, pruningManager, acceptanceDataStore, diff --git a/domain/consensus/model/externalapi/block.go b/domain/consensus/model/externalapi/block.go index 95c19141ca..15fae97635 100644 --- a/domain/consensus/model/externalapi/block.go +++ b/domain/consensus/model/externalapi/block.go @@ -58,6 +58,7 @@ type BlockHeader interface { type BaseBlockHeader interface { Version() uint16 Parents() []BlockLevelParents + ParentsAtLevel(level int) BlockLevelParents DirectParents() BlockLevelParents HashMerkleRoot() *DomainHash AcceptedIDMerkleRoot() *DomainHash diff --git a/domain/consensus/model/externalapi/blocklevelparents.go b/domain/consensus/model/externalapi/blocklevelparents.go index 3d496649d2..a4768e49b9 100644 --- a/domain/consensus/model/externalapi/blocklevelparents.go +++ b/domain/consensus/model/externalapi/blocklevelparents.go @@ -6,7 +6,22 @@ type BlockLevelParents []*DomainHash // Equal returns true if this BlockLevelParents is equal to `other` func (sl BlockLevelParents) Equal(other BlockLevelParents) bool { - return HashesEqual(sl, other) + if len(sl) != len(other) { + return false + } + for _, thisHash := range sl { + found := false + for _, otherHash := range other { + if thisHash.Equal(otherHash) { + found = true + break + } + } + if !found { + return false + } + } + return true } // Clone creates a clone of this BlockLevelParents @@ -14,6 +29,16 @@ func (sl BlockLevelParents) Clone() BlockLevelParents { return CloneHashes(sl) } +// Contains returns true if this BlockLevelParents contains the given blockHash +func (sl BlockLevelParents) Contains(blockHash *DomainHash) bool { + for _, blockLevelParent := range sl { + if blockLevelParent.Equal(blockHash) { + return true + } + } + return false +} + // ParentsEqual returns true if all the BlockLevelParents in `a` and `b` are // equal pairwise func ParentsEqual(a, b []BlockLevelParents) bool { diff --git a/domain/consensus/model/interface_processes_blockparentbuilder.go b/domain/consensus/model/interface_processes_blockparentbuilder.go new file mode 100644 index 0000000000..fd42b2d3e1 --- /dev/null +++ b/domain/consensus/model/interface_processes_blockparentbuilder.go @@ -0,0 +1,9 @@ +package model + +import "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" + +// BlockParentBuilder exposes a method to build super-block parents for +// a given set of direct parents +type BlockParentBuilder interface { + BuildParents(stagingArea *StagingArea, directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) +} diff --git a/domain/consensus/model/staging_area.go b/domain/consensus/model/staging_area.go index 56b77f7a11..00f6262c04 100644 --- a/domain/consensus/model/staging_area.go +++ b/domain/consensus/model/staging_area.go @@ -9,29 +9,7 @@ type StagingShard interface { } // StagingShardID is used to identify each of the store's staging shards -type StagingShardID byte - -// StagingShardID constants -const ( - StagingShardIDAcceptanceData StagingShardID = iota - StagingShardIDBlockHeader - StagingShardIDBlockRelation - StagingShardIDBlockStatus - StagingShardIDBlock - StagingShardIDConsensusState - StagingShardIDDAABlocks - StagingShardIDFinality - StagingShardIDGHOSTDAG - StagingShardIDHeadersSelectedChain - StagingShardIDHeadersSelectedTip - StagingShardIDMultiset - StagingShardIDPruning - StagingShardIDReachabilityData - StagingShardIDUTXODiff - StagingShardIDDAAWindow - // Always leave StagingShardIDLen as the last constant - StagingShardIDLen -) +type StagingShardID uint64 // StagingArea is single changeset inside the consensus database, similar to a transaction in a classic database. // Each StagingArea consists of multiple StagingShards, one for each dataStore that has any changes within it. @@ -41,16 +19,14 @@ const ( // When the StagingArea is being Committed, it goes over all it's shards, and commits those one-by-one. // Since Commit happens in a DatabaseTransaction, a StagingArea is atomic. type StagingArea struct { - // shards is deliberately an array and not a map, as an optimization - since it's being read a lot of time, and - // reads from maps are relatively slow. - shards [StagingShardIDLen]StagingShard + shards []StagingShard isCommitted bool } // NewStagingArea creates a new, empty staging area. func NewStagingArea() *StagingArea { return &StagingArea{ - shards: [StagingShardIDLen]StagingShard{}, + shards: []StagingShard{}, isCommitted: false, } } @@ -58,6 +34,9 @@ func NewStagingArea() *StagingArea { // GetOrCreateShard attempts to retrieve a shard with the given name. // If it does not exist - a new shard is created using `createFunc`. func (sa *StagingArea) GetOrCreateShard(shardID StagingShardID, createFunc func() StagingShard) StagingShard { + for uint64(len(sa.shards)) <= uint64(shardID) { + sa.shards = append(sa.shards, nil) + } if sa.shards[shardID] == nil { sa.shards[shardID] = createFunc() } diff --git a/domain/consensus/processes/blockbuilder/block_builder.go b/domain/consensus/processes/blockbuilder/block_builder.go index 78fed3edf2..26fb57cc4f 100644 --- a/domain/consensus/processes/blockbuilder/block_builder.go +++ b/domain/consensus/processes/blockbuilder/block_builder.go @@ -29,6 +29,7 @@ type blockBuilder struct { transactionValidator model.TransactionValidator finalityManager model.FinalityManager pruningManager model.PruningManager + blockParentBuilder model.BlockParentBuilder acceptanceDataStore model.AcceptanceDataStore blockRelationStore model.BlockRelationStore @@ -49,6 +50,7 @@ func New( ghostdagManager model.GHOSTDAGManager, transactionValidator model.TransactionValidator, finalityManager model.FinalityManager, + blockParentBuilder model.BlockParentBuilder, pruningManager model.PruningManager, acceptanceDataStore model.AcceptanceDataStore, @@ -69,6 +71,7 @@ func New( ghostdagManager: ghostdagManager, transactionValidator: transactionValidator, finalityManager: finalityManager, + blockParentBuilder: blockParentBuilder, pruningManager: pruningManager, acceptanceDataStore: acceptanceDataStore, @@ -235,7 +238,7 @@ func (bb *blockBuilder) newBlockParents(stagingArea *model.StagingArea) ([]exter if err != nil { return nil, err } - return []externalapi.BlockLevelParents{virtualBlockRelations.Parents}, nil + return bb.blockParentBuilder.BuildParents(stagingArea, virtualBlockRelations.Parents) } func (bb *blockBuilder) newBlockTime(stagingArea *model.StagingArea) (int64, error) { diff --git a/domain/consensus/processes/blockbuilder/test_block_builder.go b/domain/consensus/processes/blockbuilder/test_block_builder.go index ebab2f87f4..a798f53dda 100644 --- a/domain/consensus/processes/blockbuilder/test_block_builder.go +++ b/domain/consensus/processes/blockbuilder/test_block_builder.go @@ -12,6 +12,7 @@ import ( "github.com/kaspanet/kaspad/infrastructure/logger" "github.com/pkg/errors" "math/big" + "sort" ) type testBlockBuilder struct { @@ -82,7 +83,16 @@ func (bb *testBlockBuilder) buildUTXOInvalidHeader(stagingArea *model.StagingAre return nil, err } - parents := []externalapi.BlockLevelParents{parentHashes} + parents, err := bb.blockParentBuilder.BuildParents(stagingArea, parentHashes) + if err != nil { + return nil, err + } + + for _, blockLevelParents := range parents { + sort.Slice(blockLevelParents, func(i, j int) bool { + return blockLevelParents[i].Less(blockLevelParents[j]) + }) + } bb.nonceCounter++ return blockheader.NewImmutableBlockHeader( diff --git a/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go b/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go new file mode 100644 index 0000000000..b8f40eb155 --- /dev/null +++ b/domain/consensus/processes/blockparentbuilder/blockparentbuilder.go @@ -0,0 +1,219 @@ +package blockparentbuilder + +import ( + "github.com/kaspanet/kaspad/domain/consensus/model" + "github.com/kaspanet/kaspad/domain/consensus/model/externalapi" + "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" + "github.com/kaspanet/kaspad/domain/consensus/utils/hashset" + "github.com/kaspanet/kaspad/domain/consensus/utils/pow" + "github.com/pkg/errors" +) + +type blockParentBuilder struct { + databaseContext model.DBManager + blockHeaderStore model.BlockHeaderStore + dagTopologyManager model.DAGTopologyManager + reachabilityDataStore model.ReachabilityDataStore + pruningStore model.PruningStore +} + +// New creates a new instance of a BlockParentBuilder +func New( + databaseContext model.DBManager, + blockHeaderStore model.BlockHeaderStore, + dagTopologyManager model.DAGTopologyManager, + reachabilityDataStore model.ReachabilityDataStore, + pruningStore model.PruningStore, +) model.BlockParentBuilder { + return &blockParentBuilder{ + databaseContext: databaseContext, + blockHeaderStore: blockHeaderStore, + dagTopologyManager: dagTopologyManager, + reachabilityDataStore: reachabilityDataStore, + pruningStore: pruningStore, + } +} + +func (bpb *blockParentBuilder) BuildParents(stagingArea *model.StagingArea, + directParentHashes []*externalapi.DomainHash) ([]externalapi.BlockLevelParents, error) { + + // Late on we'll mutate direct parent hashes, so we first clone it. + directParentHashesCopy := make([]*externalapi.DomainHash, len(directParentHashes)) + copy(directParentHashesCopy, directParentHashes) + + pruningPoint, err := bpb.pruningStore.PruningPoint(bpb.databaseContext, stagingArea) + if err != nil { + return nil, err + } + + // The first candidates to be added should be from a parent in the future of the pruning + // point, so later on we'll know that every block that doesn't have reachability data + // (i.e. pruned) is necessarily in the past of the current candidates and cannot be + // considered as a valid candidate. + // This is why we sort the direct parent headers in a way that the first one will be + // in the future of the pruning point. + directParentHeaders := make([]externalapi.BlockHeader, len(directParentHashesCopy)) + firstParentInFutureOfPruningPointIndex := 0 + foundFirstParentInFutureOfPruningPoint := false + for i, directParentHash := range directParentHashesCopy { + isInFutureOfPruningPoint, err := bpb.dagTopologyManager.IsAncestorOf(stagingArea, pruningPoint, directParentHash) + if err != nil { + return nil, err + } + + if !isInFutureOfPruningPoint { + continue + } + + firstParentInFutureOfPruningPointIndex = i + foundFirstParentInFutureOfPruningPoint = true + break + } + + if !foundFirstParentInFutureOfPruningPoint { + return nil, errors.New("BuildParents should get at least one parent in the future of the pruning point") + } + + oldFirstDirectParent := directParentHashesCopy[0] + directParentHashesCopy[0] = directParentHashesCopy[firstParentInFutureOfPruningPointIndex] + directParentHashesCopy[firstParentInFutureOfPruningPointIndex] = oldFirstDirectParent + + for i, directParentHash := range directParentHashesCopy { + directParentHeader, err := bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, directParentHash) + if err != nil { + return nil, err + } + directParentHeaders[i] = directParentHeader + } + + type blockToReferences map[externalapi.DomainHash][]*externalapi.DomainHash + candidatesByLevelToReferenceBlocksMap := make(map[int]blockToReferences) + + // Direct parents are guaranteed to be in one other's anticones so add them all to + // all the block levels they occupy + for _, directParentHeader := range directParentHeaders { + directParentHash := consensushashing.HeaderHash(directParentHeader) + proofOfWorkValue := pow.CalculateProofOfWorkValue(directParentHeader.ToMutable()) + for blockLevel := 0; ; blockLevel++ { + if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists { + candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) + } + candidatesByLevelToReferenceBlocksMap[blockLevel][*directParentHash] = []*externalapi.DomainHash{directParentHash} + if proofOfWorkValue.Bit(blockLevel+1) != 0 { + break + } + } + } + + virtualGenesisChildren, err := bpb.dagTopologyManager.Children(stagingArea, model.VirtualGenesisBlockHash) + if err != nil { + return nil, err + } + + virtualGenesisChildrenHeaders := make(map[externalapi.DomainHash]externalapi.BlockHeader, len(virtualGenesisChildren)) + for _, child := range virtualGenesisChildren { + virtualGenesisChildrenHeaders[*child], err = bpb.blockHeaderStore.BlockHeader(bpb.databaseContext, stagingArea, child) + if err != nil { + return nil, err + } + } + + for _, directParentHeader := range directParentHeaders { + for blockLevel, blockLevelParentsInHeader := range directParentHeader.Parents() { + isEmptyLevel := false + if _, exists := candidatesByLevelToReferenceBlocksMap[blockLevel]; !exists { + candidatesByLevelToReferenceBlocksMap[blockLevel] = make(map[externalapi.DomainHash][]*externalapi.DomainHash) + isEmptyLevel = true + } + + for _, parent := range blockLevelParentsInHeader { + hasReachabilityData, err := bpb.reachabilityDataStore.HasReachabilityData(bpb.databaseContext, stagingArea, parent) + if err != nil { + return nil, err + } + + // Reference blocks are the blocks that are used in reachability queries to check if + // a candidate is in the future of another candidate. In most cases this is just the + // block itself, but in the case where a block doesn't have reachability data we need + // to use some blocks in its future as reference instead. + // If we make sure to add a parent in the future of the pruning point first, we can + // know that any pruned candidate that is in the past of some blocks in the pruning + // point anticone should have should be a parent (in the relevant level) of one of + // the virtual genesis children in the pruning point anticone. So we can check which + // virtual genesis children have this block as parent and use those block as + // reference blocks. + var referenceBlocks []*externalapi.DomainHash + if hasReachabilityData { + referenceBlocks = []*externalapi.DomainHash{parent} + } else { + for childHash, childHeader := range virtualGenesisChildrenHeaders { + childHash := childHash // Assign to a new pointer to avoid `range` pointer reuse + if childHeader.ParentsAtLevel(blockLevel).Contains(parent) { + referenceBlocks = append(referenceBlocks, &childHash) + } + } + } + + if isEmptyLevel { + candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks + continue + } + + if !hasReachabilityData { + continue + } + + toRemove := hashset.New() + isAncestorOfAnyCandidate := false + for candidate, candidateReferences := range candidatesByLevelToReferenceBlocksMap[blockLevel] { + candidate := candidate // Assign to a new pointer to avoid `range` pointer reuse + isInFutureOfCurrentCandidate, err := bpb.dagTopologyManager.IsAnyAncestorOf(stagingArea, candidateReferences, parent) + if err != nil { + return nil, err + } + + if isInFutureOfCurrentCandidate { + toRemove.Add(&candidate) + continue + } + + if isAncestorOfAnyCandidate { + continue + } + + isAncestorOfCurrentCandidate, err := bpb.dagTopologyManager.IsAncestorOfAny(stagingArea, parent, candidateReferences) + if err != nil { + return nil, err + } + + if isAncestorOfCurrentCandidate { + isAncestorOfAnyCandidate = true + } + } + + if toRemove.Length() > 0 { + for hash := range toRemove { + delete(candidatesByLevelToReferenceBlocksMap[blockLevel], hash) + } + } + + // We should add the block as a candidate if it's in the future of another candidate + // or in the anticone of all candidates. + if !isAncestorOfAnyCandidate || toRemove.Length() > 0 { + candidatesByLevelToReferenceBlocksMap[blockLevel][*parent] = referenceBlocks + } + } + } + } + + parents := make([]externalapi.BlockLevelParents, len(candidatesByLevelToReferenceBlocksMap)) + for blockLevel := 0; blockLevel < len(candidatesByLevelToReferenceBlocksMap); blockLevel++ { + levelBlocks := make(externalapi.BlockLevelParents, 0, len(candidatesByLevelToReferenceBlocksMap[blockLevel])) + for block := range candidatesByLevelToReferenceBlocksMap[blockLevel] { + block := block // Assign to a new pointer to avoid `range` pointer reuse + levelBlocks = append(levelBlocks, &block) + } + parents[blockLevel] = levelBlocks + } + return parents, nil +} diff --git a/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go index 25d1d31826..66b2a819db 100644 --- a/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go +++ b/domain/consensus/processes/blockprocessor/validate_and_insert_imported_pruning_point_test.go @@ -26,7 +26,6 @@ func addBlock(tc testapi.TestConsensus, parentHashes []*externalapi.DomainHash, } blockHash := consensushashing.BlockHash(block) - _, err = tc.ValidateAndInsertBlock(block, true) if err != nil { t.Fatalf("ValidateAndInsertBlock: %+v", err) @@ -75,7 +74,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) { t.Fatalf("GetHashesBetween: %+v", err) } - for _, blocksHash := range missingHeaderHashes { + for i, blocksHash := range missingHeaderHashes { blockInfo, err := tcSyncee.GetBlockInfo(blocksHash) if err != nil { t.Fatalf("GetBlockInfo: %+v", err) @@ -92,7 +91,7 @@ func TestValidateAndInsertImportedPruningPoint(t *testing.T) { _, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{Header: header}, false) if err != nil { - t.Fatalf("ValidateAndInsertBlock: %+v", err) + t.Fatalf("ValidateAndInsertBlock %d: %+v", i, err) } } diff --git a/domain/consensus/processes/blockvalidator/block_header_in_context.go b/domain/consensus/processes/blockvalidator/block_header_in_context.go index b8d32bc8a5..ac1d5efcde 100644 --- a/domain/consensus/processes/blockvalidator/block_header_in_context.go +++ b/domain/consensus/processes/blockvalidator/block_header_in_context.go @@ -68,6 +68,13 @@ func (v *blockValidator) ValidateHeaderInContext(stagingArea *model.StagingArea, } } + if !isBlockWithTrustedData { + err = v.checkIndirectParents(stagingArea, header) + if err != nil { + return err + } + } + err = v.mergeDepthManager.CheckBoundedMergeDepth(stagingArea, blockHash, isBlockWithTrustedData) if err != nil { return err @@ -183,6 +190,19 @@ func (v *blockValidator) checkMergeSizeLimit(stagingArea *model.StagingArea, has return nil } +func (v *blockValidator) checkIndirectParents(stagingArea *model.StagingArea, header externalapi.BlockHeader) error { + expectedParents, err := v.blockParentBuilder.BuildParents(stagingArea, header.DirectParents()) + if err != nil { + return err + } + + areParentsEqual := externalapi.ParentsEqual(header.Parents(), expectedParents) + if !areParentsEqual { + return errors.Wrapf(ruleerrors.ErrUnexpectedParents, "unexpected indirect block parents") + } + return nil +} + func (v *blockValidator) checkDAAScore(stagingArea *model.StagingArea, blockHash *externalapi.DomainHash, header externalapi.BlockHeader) error { diff --git a/domain/consensus/processes/blockvalidator/blockvalidator.go b/domain/consensus/processes/blockvalidator/blockvalidator.go index 7a731a1460..ecd4759b8b 100644 --- a/domain/consensus/processes/blockvalidator/blockvalidator.go +++ b/domain/consensus/processes/blockvalidator/blockvalidator.go @@ -35,6 +35,7 @@ type blockValidator struct { pruningStore model.PruningStore reachabilityManager model.ReachabilityManager finalityManager model.FinalityManager + blockParentBuilder model.BlockParentBuilder pruningManager model.PruningManager blockStore model.BlockStore @@ -69,6 +70,7 @@ func New(powMax *big.Int, mergeDepthManager model.MergeDepthManager, reachabilityManager model.ReachabilityManager, finalityManager model.FinalityManager, + blockParentBuilder model.BlockParentBuilder, pruningManager model.PruningManager, pruningStore model.PruningStore, @@ -104,6 +106,7 @@ func New(powMax *big.Int, mergeDepthManager: mergeDepthManager, reachabilityManager: reachabilityManager, finalityManager: finalityManager, + blockParentBuilder: blockParentBuilder, pruningManager: pruningManager, pruningStore: pruningStore, diff --git a/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go index 53da35a10e..286e5bdd85 100644 --- a/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go +++ b/domain/consensus/processes/blockvalidator/pruning_violation_proof_of_work_and_difficulty.go @@ -75,7 +75,7 @@ func (v *blockValidator) setParents(stagingArea *model.StagingArea, if !exists { if !isBlockWithTrustedData { - return errors.Errorf("only block with prefilled information can have some missing parents") + return errors.Errorf("direct parent %s is missing: only block with prefilled information can have some missing parents", currentParent) } continue } diff --git a/domain/consensus/processes/dagtraversalmanager/window_test.go b/domain/consensus/processes/dagtraversalmanager/window_test.go index d25310e549..fc80d212f6 100644 --- a/domain/consensus/processes/dagtraversalmanager/window_test.go +++ b/domain/consensus/processes/dagtraversalmanager/window_test.go @@ -132,37 +132,37 @@ func TestBlockWindow(t *testing.T) { { parents: []string{"H", "F"}, id: "I", - expectedWindow: []string{"F", "C", "D", "H", "B", "G"}, + expectedWindow: []string{"F", "C", "H", "D", "B", "G"}, }, { parents: []string{"I"}, id: "J", - expectedWindow: []string{"I", "F", "C", "D", "H", "B", "G"}, + expectedWindow: []string{"I", "F", "C", "H", "D", "B", "G"}, }, { parents: []string{"J"}, id: "K", - expectedWindow: []string{"J", "I", "F", "C", "D", "H", "B", "G"}, + expectedWindow: []string{"J", "I", "F", "C", "H", "D", "B", "G"}, }, { parents: []string{"K"}, id: "L", - expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "B", "G"}, + expectedWindow: []string{"K", "J", "I", "F", "C", "H", "D", "B", "G"}, }, { parents: []string{"L"}, id: "M", - expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "B", "G"}, + expectedWindow: []string{"L", "K", "J", "I", "F", "C", "H", "D", "B", "G"}, }, { parents: []string{"M"}, id: "N", - expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "B"}, + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "H", "D", "B"}, }, { parents: []string{"N"}, id: "O", - expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"}, + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "H", "D"}, }, }, dagconfig.DevnetParams.Name: { @@ -184,12 +184,12 @@ func TestBlockWindow(t *testing.T) { { parents: []string{"C", "D"}, id: "E", - expectedWindow: []string{"D", "C", "B"}, + expectedWindow: []string{"C", "D", "B"}, }, { parents: []string{"C", "D"}, id: "F", - expectedWindow: []string{"D", "C", "B"}, + expectedWindow: []string{"C", "D", "B"}, }, { parents: []string{"A"}, @@ -204,37 +204,37 @@ func TestBlockWindow(t *testing.T) { { parents: []string{"H", "F"}, id: "I", - expectedWindow: []string{"F", "H", "D", "C", "B", "G"}, + expectedWindow: []string{"F", "C", "D", "H", "G", "B"}, }, { parents: []string{"I"}, id: "J", - expectedWindow: []string{"I", "F", "H", "D", "C", "B", "G"}, + expectedWindow: []string{"I", "F", "C", "D", "H", "G", "B"}, }, { parents: []string{"J"}, id: "K", - expectedWindow: []string{"J", "I", "F", "H", "D", "C", "B", "G"}, + expectedWindow: []string{"J", "I", "F", "C", "D", "H", "G", "B"}, }, { parents: []string{"K"}, id: "L", - expectedWindow: []string{"K", "J", "I", "F", "H", "D", "C", "B", "G"}, + expectedWindow: []string{"K", "J", "I", "F", "C", "D", "H", "G", "B"}, }, { parents: []string{"L"}, id: "M", - expectedWindow: []string{"L", "K", "J", "I", "F", "H", "D", "C", "B", "G"}, + expectedWindow: []string{"L", "K", "J", "I", "F", "C", "D", "H", "G", "B"}, }, { parents: []string{"M"}, id: "N", - expectedWindow: []string{"M", "L", "K", "J", "I", "F", "H", "D", "C", "B"}, + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "C", "D", "H", "G"}, }, { parents: []string{"N"}, id: "O", - expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "H", "D", "C"}, + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "C", "D", "H"}, }, }, dagconfig.SimnetParams.Name: { @@ -276,37 +276,37 @@ func TestBlockWindow(t *testing.T) { { parents: []string{"H", "F"}, id: "I", - expectedWindow: []string{"F", "D", "H", "C", "G", "B"}, + expectedWindow: []string{"F", "D", "C", "H", "G", "B"}, }, { parents: []string{"I"}, id: "J", - expectedWindow: []string{"I", "F", "D", "H", "C", "G", "B"}, + expectedWindow: []string{"I", "F", "D", "C", "H", "G", "B"}, }, { parents: []string{"J"}, id: "K", - expectedWindow: []string{"J", "I", "F", "D", "H", "C", "G", "B"}, + expectedWindow: []string{"J", "I", "F", "D", "C", "H", "G", "B"}, }, { parents: []string{"K"}, id: "L", - expectedWindow: []string{"K", "J", "I", "F", "D", "H", "C", "G", "B"}, + expectedWindow: []string{"K", "J", "I", "F", "D", "C", "H", "G", "B"}, }, { parents: []string{"L"}, id: "M", - expectedWindow: []string{"L", "K", "J", "I", "F", "D", "H", "C", "G", "B"}, + expectedWindow: []string{"L", "K", "J", "I", "F", "D", "C", "H", "G", "B"}, }, { parents: []string{"M"}, id: "N", - expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "H", "C", "G"}, + expectedWindow: []string{"M", "L", "K", "J", "I", "F", "D", "C", "H", "G"}, }, { parents: []string{"N"}, id: "O", - expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "H", "C"}, + expectedWindow: []string{"N", "M", "L", "K", "J", "I", "F", "D", "C", "H"}, }, }, } diff --git a/domain/consensus/processes/pruningmanager/pruning_test.go b/domain/consensus/processes/pruningmanager/pruning_test.go index d8c49afb1e..fafd8c15e8 100644 --- a/domain/consensus/processes/pruningmanager/pruning_test.go +++ b/domain/consensus/processes/pruningmanager/pruning_test.go @@ -37,8 +37,8 @@ func TestPruning(t *testing.T) { }, "dag-for-test-pruning.json": { dagconfig.MainnetParams.Name: "502", - dagconfig.TestnetParams.Name: "502", - dagconfig.DevnetParams.Name: "503", + dagconfig.TestnetParams.Name: "503", + dagconfig.DevnetParams.Name: "502", dagconfig.SimnetParams.Name: "502", }, } diff --git a/domain/consensus/ruleerrors/rule_error.go b/domain/consensus/ruleerrors/rule_error.go index addf6fa756..72220d9537 100644 --- a/domain/consensus/ruleerrors/rule_error.go +++ b/domain/consensus/ruleerrors/rule_error.go @@ -200,6 +200,8 @@ var ( //ErrPruningPointViolation indicates that the pruning point isn't in the block past. ErrPruningPointViolation = newRuleError("ErrPruningPointViolation") + ErrUnexpectedParents = newRuleError("ErrUnexpectedParents") + ErrUnexpectedPruningPoint = newRuleError("ErrUnexpectedPruningPoint") ErrInvalidPruningPointsChain = newRuleError("ErrInvalidPruningPointsChain") diff --git a/domain/consensus/utils/blockheader/blockheader.go b/domain/consensus/utils/blockheader/blockheader.go index 8e8172ea2c..c23f003037 100644 --- a/domain/consensus/utils/blockheader/blockheader.go +++ b/domain/consensus/utils/blockheader/blockheader.go @@ -56,11 +56,16 @@ func (bh *blockHeader) Parents() []externalapi.BlockLevelParents { return bh.parents } -func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents { - if len(bh.parents) == 0 { +func (bh *blockHeader) ParentsAtLevel(level int) externalapi.BlockLevelParents { + if len(bh.parents) <= level { return externalapi.BlockLevelParents{} } - return bh.parents[0] + + return bh.parents[level] +} + +func (bh *blockHeader) DirectParents() externalapi.BlockLevelParents { + return bh.ParentsAtLevel(0) } func (bh *blockHeader) HashMerkleRoot() *externalapi.DomainHash { diff --git a/domain/consensus/utils/pow/pow.go b/domain/consensus/utils/pow/pow.go index faea01951a..59842e87ae 100644 --- a/domain/consensus/utils/pow/pow.go +++ b/domain/consensus/utils/pow/pow.go @@ -15,7 +15,7 @@ import ( // it does not check if the difficulty itself is valid or less than the maximum for the appropriate network func CheckProofOfWorkWithTarget(header externalapi.MutableBlockHeader, target *big.Int) bool { // The block pow must be less than the claimed target - powNum := calcPowValue(header) + powNum := CalculateProofOfWorkValue(header) // The block hash must be less or equal than the claimed target. return powNum.Cmp(target) <= 0 @@ -27,7 +27,8 @@ func CheckProofOfWorkByBits(header externalapi.MutableBlockHeader) bool { return CheckProofOfWorkWithTarget(header, difficulty.CompactToBig(header.Bits())) } -func calcPowValue(header externalapi.MutableBlockHeader) *big.Int { +// CalculateProofOfWorkValue hashes the given header and returns its big.Int value +func CalculateProofOfWorkValue(header externalapi.MutableBlockHeader) *big.Int { // Zero out the time and nonce. timestamp, nonce := header.TimeInMilliseconds(), header.Nonce() header.SetTimeInMilliseconds(0) diff --git a/util/staging/commit_all_changes.go b/util/staging/commit_all_changes.go index f2678bb0ee..8e7c2cae94 100644 --- a/util/staging/commit_all_changes.go +++ b/util/staging/commit_all_changes.go @@ -3,6 +3,7 @@ package staging import ( "github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/infrastructure/logger" + "sync/atomic" ) // CommitAllChanges creates a transaction in `databaseContext`, and commits all changes in `stagingArea` through it. @@ -22,3 +23,10 @@ func CommitAllChanges(databaseContext model.DBManager, stagingArea *model.Stagin return dbTx.Commit() } + +var lastShardingID uint64 + +// GenerateShardingID generates a unique staging sharding ID. +func GenerateShardingID() model.StagingShardID { + return model.StagingShardID(atomic.AddUint64(&lastShardingID, 1)) +}