diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index 76f88a0c4d..c8f88b96d4 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -32,12 +32,7 @@ use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; use parking_lot::RwLock; use rand::Rng; -use std::{ - cmp::max, - mem::size_of, - ops::{Deref, DerefMut}, - sync::Arc, -}; +use std::{cmp::max, mem::size_of, ops::DerefMut, sync::Arc}; pub struct ConsensusStorage { // DB @@ -45,7 +40,7 @@ pub struct ConsensusStorage { // Locked stores pub statuses_store: Arc>, - pub relations_stores: Arc<[DbRelationsStore]>, + pub relations_stores: Arc>>, pub reachability_store: Arc>, pub reachability_relations_store: Arc>, pub pruning_point_store: Arc>, @@ -112,13 +107,14 @@ impl ConsensusStorage { // Headers let statuses_store = Arc::new(RwLock::new(DbStatusesStore::new(db.clone(), noise(statuses_cache_size)))); - let relations_stores: Arc<[_]> = (0..=params.max_block_level) - .map(|level| { - let cache_size = max(relations_cache_size.checked_shr(level as u32).unwrap_or(0), 2 * params.pruning_proof_m); - DbRelationsStore::new(db.clone(), level, noise(cache_size)) - }) - .collect_vec() - .into(); + let relations_stores = Arc::new(RwLock::new( + (0..=params.max_block_level) + .map(|level| { + let cache_size = max(relations_cache_size.checked_shr(level as u32).unwrap_or(0), 2 * params.pruning_proof_m); + DbRelationsStore::new(db.clone(), level, noise(cache_size)) + }) + .collect_vec(), + )); let reachability_store = Arc::new(RwLock::new(DbReachabilityStore::new(db.clone(), noise(reachability_cache_size)))); let reachability_relations_store = Arc::new(RwLock::new(DbRelationsStore::with_prefix( @@ -166,7 +162,7 @@ impl ConsensusStorage { // Ensure that reachability stores are initialized reachability::init(reachability_store.write().deref_mut()).unwrap(); - relations::init(&mut reachability_relations_store.write().deref()); + relations::init(reachability_relations_store.write().deref_mut()); Arc::new(Self { db, diff --git a/consensus/src/model/services/relations.rs b/consensus/src/model/services/relations.rs index eed9ba0134..b1fcaa0e35 100644 --- a/consensus/src/model/services/relations.rs +++ b/consensus/src/model/services/relations.rs @@ -2,36 +2,36 @@ use crate::model::stores::relations::RelationsStoreReader; use kaspa_consensus_core::BlockHashSet; use kaspa_database::prelude::{ReadLock, StoreError, StoreResult}; use kaspa_hashes::Hash; +use parking_lot::RwLock; use std::sync::Arc; /// Multi-threaded block-relations service imp #[derive(Clone)] pub struct MTRelationsService { - // TODO: Remove this wrapper - store: Arc<[T]>, + store: Arc>>, level: usize, } impl MTRelationsService { - pub fn new(store: Arc<[T]>, level: u8) -> Self { + pub fn new(store: Arc>>, level: u8) -> Self { Self { store, level: level as usize } } } impl RelationsStoreReader for MTRelationsService { fn get_parents(&self, hash: Hash) -> Result { - self.store[self.level].get_parents(hash) + self.store.read()[self.level].get_parents(hash) } fn get_children(&self, hash: Hash) -> StoreResult> { - self.store[self.level].get_children(hash) + self.store.read()[self.level].get_children(hash) } fn has(&self, hash: Hash) -> Result { - self.store[self.level].has(hash) + self.store.read()[self.level].has(hash) } fn counts(&self) -> Result<(usize, usize), StoreError> { - self.store[self.level].counts() + self.store.read()[self.level].counts() } } diff --git a/consensus/src/model/stores/children.rs b/consensus/src/model/stores/children.rs index b138ff75b0..937d2e9e50 100644 --- a/consensus/src/model/stores/children.rs +++ b/consensus/src/model/stores/children.rs @@ -112,7 +112,7 @@ impl ChildrenStoreReader for DbChildrenStore { } } -impl ChildrenStore for &DbChildrenStore { +impl ChildrenStore for DbChildrenStore { fn insert_child(&mut self, writer: impl DbWriter, parent: Hash, child: Hash) -> Result<(), StoreError> { self.access.write(writer, parent, child)?; Ok(()) diff --git a/consensus/src/model/stores/relations.rs b/consensus/src/model/stores/relations.rs index c85e809034..c860df037b 100644 --- a/consensus/src/model/stores/relations.rs +++ b/consensus/src/model/stores/relations.rs @@ -124,18 +124,18 @@ impl RelationsStoreReader for DbRelationsStore { /// /// The trait methods itself must remain `&mut self` in order to support staging implementations /// which are indeed mutated locally -impl ChildrenStore for &DbRelationsStore { +impl ChildrenStore for DbRelationsStore { fn insert_child(&mut self, writer: impl DbWriter, parent: Hash, child: Hash) -> Result<(), StoreError> { - (&self.children_store).insert_child(writer, parent, child) + self.children_store.insert_child(writer, parent, child) } fn delete_child(&mut self, writer: impl DbWriter, parent: Hash, child: Hash) -> Result<(), StoreError> { - (&self.children_store).delete_child(writer, parent, child) + self.children_store.delete_child(writer, parent, child) } } /// The comment above over `impl ChildrenStore` applies here as well -impl RelationsStore for &DbRelationsStore { +impl RelationsStore for DbRelationsStore { type DefaultWriter = DirectDbWriter<'static>; fn default_writer(&self) -> Self::DefaultWriter { @@ -154,7 +154,7 @@ impl RelationsStore for &DbRelationsStore { pub struct StagingRelationsStore<'a> { // The underlying DB store to commit to - store: &'a DbRelationsStore, + store: &'a mut DbRelationsStore, /// Full entry deletions (including parents and all children) /// Assumed to be final, i.e., no other mutations to this entry @@ -205,7 +205,7 @@ impl<'a> ChildrenStore for StagingRelationsStore<'a> { } impl<'a> StagingRelationsStore<'a> { - pub fn new(store: &'a DbRelationsStore) -> Self { + pub fn new(store: &'a mut DbRelationsStore) -> Self { Self { store, parents_overrides: Default::default(), @@ -215,14 +215,14 @@ impl<'a> StagingRelationsStore<'a> { } } - pub fn commit(mut self, batch: &mut WriteBatch) -> Result<(), StoreError> { - for (k, v) in self.parents_overrides { - self.store.parents_access.write(BatchDbWriter::new(batch), k, v)? + pub fn commit(&mut self, batch: &mut WriteBatch) -> Result<(), StoreError> { + for (k, v) in self.parents_overrides.iter() { + self.store.parents_access.write(BatchDbWriter::new(batch), *k, (*v).clone())? } - for (parent, children) in self.children_insertions { + for (parent, children) in self.children_insertions.iter() { for child in children { - self.store.insert_child(BatchDbWriter::new(batch), parent, child)?; + self.store.insert_child(BatchDbWriter::new(batch), *parent, *child)?; } } @@ -234,14 +234,14 @@ impl<'a> StagingRelationsStore<'a> { self.store.parents_access.delete_many(BatchDbWriter::new(batch), &mut self.entry_deletions.iter().copied())?; // For deleted entries, delete all children - for parent in self.entry_deletions { + for parent in self.entry_deletions.iter().copied() { self.store.delete_children(BatchDbWriter::new(batch), parent)?; } // Delete only the requested children - for (parent, children_to_delete) in self.children_deletions { + for (parent, children_to_delete) in self.children_deletions.iter() { for child in children_to_delete { - self.store.delete_child(BatchDbWriter::new(batch), parent, child)?; + self.store.delete_child(BatchDbWriter::new(batch), *parent, *child)?; } } @@ -435,7 +435,7 @@ mod tests { #[test] fn test_db_relations_store() { let (lt, db) = create_temp_db!(kaspa_database::prelude::ConnBuilder::default().with_files_limit(10)); - test_relations_store(&DbRelationsStore::new(db, 0, 2)); + test_relations_store(DbRelationsStore::new(db, 0, 2)); drop(lt) } diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 81ff3a1647..6d73a79e86 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -43,10 +43,7 @@ use kaspa_utils::vec::VecExtensions; use parking_lot::RwLock; use rayon::ThreadPool; use rocksdb::WriteBatch; -use std::{ - ops::Deref, - sync::{atomic::Ordering, Arc}, -}; +use std::sync::{atomic::Ordering, Arc}; use super::super::ProcessingCounters; @@ -127,7 +124,7 @@ pub struct HeaderProcessor { db: Arc, // Stores - pub(super) relations_stores: Arc<[DbRelationsStore]>, + pub(super) relations_stores: Arc>>, // TODO: Remove RwLock pub(super) reachability_store: Arc>, pub(super) reachability_relations_store: Arc>, pub(super) ghostdag_stores: Arc>>, @@ -331,6 +328,7 @@ impl HeaderProcessor { /// Collects the known parents for all block levels fn collect_known_parents(&self, header: &Header, block_level: BlockLevel) -> Vec>> { + let relations_read = self.relations_stores.read(); (0..=block_level) .map(|level| { Arc::new( @@ -338,7 +336,7 @@ impl HeaderProcessor { .parents_at_level(header, level) .iter() .copied() - .filter(|parent| self.relations_stores[level as usize].has(*parent).unwrap()) + .filter(|parent| relations_read[level as usize].has(*parent).unwrap()) .collect_vec() // This kicks-in only for trusted blocks or for level > 0. If an ordinary block is // missing direct parents it will fail validation. @@ -417,13 +415,14 @@ impl HeaderProcessor { let reachability_parents = ctx.known_parents[0].clone(); + let mut relations_write = self.relations_stores.write(); ctx.known_parents.into_iter().enumerate().for_each(|(level, parents_by_level)| { - (&self.relations_stores[level]).insert_batch(&mut batch, header.hash, parents_by_level).unwrap(); + relations_write[level].insert_batch(&mut batch, header.hash, parents_by_level).unwrap(); }); // Write reachability relations. These relations are only needed during header pruning - let reachability_relations_write = self.reachability_relations_store.write(); - reachability_relations_write.deref().insert_batch(&mut batch, ctx.hash, reachability_parents).unwrap(); + let mut reachability_relations_write = self.reachability_relations_store.write(); + reachability_relations_write.insert_batch(&mut batch, ctx.hash, reachability_parents).unwrap(); let statuses_write = self.statuses_store.set_batch(&mut batch, ctx.hash, StatusHeaderOnly).unwrap(); @@ -439,6 +438,7 @@ impl HeaderProcessor { drop(reachability_write); drop(statuses_write); drop(reachability_relations_write); + drop(relations_write); drop(hst_write); } @@ -494,17 +494,18 @@ impl HeaderProcessor { } pub fn init(&self) { - if self.relations_stores[0].has(ORIGIN).unwrap() { + if self.relations_stores.read()[0].has(ORIGIN).unwrap() { return; } let mut batch = WriteBatch::default(); - (0..=self.max_block_level).for_each(|level| { - (&self.relations_stores[level as usize]).insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap() - }); + let mut relations_write = self.relations_stores.write(); + (0..=self.max_block_level) + .for_each(|level| relations_write[level as usize].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap()); let mut hst_write = self.headers_selected_tip_store.write(); hst_write.set_batch(&mut batch, SortableBlock::new(ORIGIN, 0.into())).unwrap(); self.db.write(batch).unwrap(); drop(hst_write); + drop(relations_write); } } diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 6d4d19a913..4b76ab1c0c 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -348,8 +348,9 @@ impl PruningProcessor { if !keep_blocks.contains(¤t) { let mut batch = WriteBatch::default(); - let reachability_relations_write = self.reachability_relations_store.write(); - let mut staging_relations = StagingRelationsStore::new(&reachability_relations_write); + let mut level_relations_write = self.relations_stores.write(); + let mut reachability_relations_write = self.reachability_relations_store.write(); + let mut staging_relations = StagingRelationsStore::new(&mut reachability_relations_write); let mut staging_reachability = StagingReachabilityStore::new(reachability_read); let mut statuses_write = self.statuses_store.write(); @@ -381,7 +382,7 @@ impl PruningProcessor { // TODO: consider adding block level to compact header data let block_level = self.headers_store.get_header_with_block_level(current).unwrap().block_level; (0..=block_level as usize).for_each(|level| { - let mut staging_level_relations = StagingRelationsStore::new(&self.relations_stores[level]); + let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[level]); relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); staging_level_relations.commit(&mut batch).unwrap(); self.ghostdag_stores[level].delete_batch(&mut batch, current).unwrap_option(); @@ -409,6 +410,7 @@ impl PruningProcessor { drop(reachability_write); drop(statuses_write); drop(reachability_relations_write); + drop(level_relations_write); reachability_read = self.reachability_store.upgradable_read(); } diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 986a15ac9c..49b3822d94 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -480,8 +480,8 @@ mod tests { } let reachability_service = MTReachabilityService::new(Arc::new(RwLock::new(reachability_store))); - let relations_store: Arc<[_]> = - vec![RelationsStoreMock { children: BlockHashes::new(vec![pruning_point, pp_anticone_block]) }].into(); + let relations_store = + Arc::new(RwLock::new(vec![RelationsStoreMock { children: BlockHashes::new(vec![pruning_point, pp_anticone_block]) }])); let relations_service = MTRelationsService::new(relations_store, 0); let parents_manager = ParentsManager::new(250, genesis_hash, headers_store, reachability_service, relations_service); @@ -583,7 +583,7 @@ mod tests { } let reachability_service = MTReachabilityService::new(Arc::new(RwLock::new(reachability_store))); - let relations_store: Arc<[_]> = vec![RelationsStoreMock { children: BlockHashes::new(vec![pruning_point]) }].into(); + let relations_store = Arc::new(RwLock::new(vec![RelationsStoreMock { children: BlockHashes::new(vec![pruning_point]) }])); let relations_service = MTRelationsService::new(relations_store, 0); let parents_manager = ParentsManager::new(250, genesis_hash, headers_store, reachability_service, relations_service); diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs index 841fe1d6e8..6b090a1f6a 100644 --- a/consensus/src/processes/pruning_proof/mod.rs +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -93,7 +93,7 @@ pub struct PruningProofManager { reachability_relations_store: Arc>, reachability_service: MTReachabilityService, ghostdag_stores: Arc>>, - relations_stores: Arc<[DbRelationsStore]>, + relations_stores: Arc>>, pruning_point_store: Arc>, past_pruning_points_store: Arc, virtual_stores: Arc>, @@ -234,7 +234,7 @@ impl PruningProofManager { .push_if_empty(ORIGIN), ); - (&self.relations_stores[level]).insert(header.hash, parents.clone()).unwrap(); + self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); let gd = if header.hash == self.genesis_hash { self.ghostdag_managers[level].genesis_ghostdag_data() } else if level == 0 { @@ -352,9 +352,9 @@ impl PruningProofManager { // Prepare batch let mut batch = WriteBatch::default(); - let reachability_relations_write = self.reachability_relations_store.write(); + let mut reachability_relations_write = self.reachability_relations_store.write(); let mut staging_reachability = StagingReachabilityStore::new(reachability_read); - let mut staging_reachability_relations = StagingRelationsStore::new(&reachability_relations_write); + let mut staging_reachability_relations = StagingRelationsStore::new(&mut reachability_relations_write); // Stage staging_reachability_relations.insert(hash, reachability_parents_hashes.clone()).unwrap(); @@ -392,7 +392,7 @@ impl PruningProofManager { let ghostdag_stores = (0..=self.max_block_level) .map(|level| Arc::new(DbGhostdagStore::new(db.clone(), level, 2 * self.pruning_proof_m))) .collect_vec(); - let relations_stores = + let mut relations_stores = (0..=self.max_block_level).map(|level| DbRelationsStore::new(db.clone(), level, 2 * self.pruning_proof_m)).collect_vec(); let reachability_stores = (0..=self.max_block_level) .map(|level| Arc::new(RwLock::new(DbReachabilityStore::with_block_level(db.clone(), 2 * self.pruning_proof_m, level)))) @@ -423,7 +423,7 @@ impl PruningProofManager { for level in 0..=self.max_block_level { let level = level as usize; reachability::init(reachability_stores[level].write().deref_mut()).unwrap(); - (&relations_stores[level]).insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); + relations_stores[level].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap(); ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); } @@ -462,7 +462,7 @@ impl PruningProofManager { return Err(PruningImportError::PruningProofDuplicateHeaderAtLevel(header.hash, level)); } - (&relations_stores[level_idx]).insert(header.hash, parents.clone()).unwrap(); + relations_stores[level_idx].insert(header.hash, parents.clone()).unwrap(); let ghostdag_data = Arc::new(ghostdag_managers[level_idx].ghostdag(&parents)); ghostdag_stores[level_idx].insert(header.hash, ghostdag_data.clone()).unwrap(); selected_tip = Some(match selected_tip { @@ -515,6 +515,7 @@ impl PruningProofManager { } let pruning_read = self.pruning_point_store.read(); + let relations_read = self.relations_stores.read(); let current_pp = pruning_read.get().unwrap().pruning_point; let current_pp_header = self.headers_store.get_header(current_pp).unwrap(); @@ -575,7 +576,7 @@ impl PruningProofManager { for level in (0..=self.max_block_level).rev() { let level_idx = level as usize; - match self.relations_stores[level_idx].get_parents(current_pp).unwrap_option() { + match relations_read[level_idx].get_parents(current_pp).unwrap_option() { Some(parents) => { if parents .iter() @@ -593,6 +594,7 @@ impl PruningProofManager { } drop(pruning_read); + drop(relations_read); drop(db_lifetime); Err(PruningImportError::PruningProofNotEnoughHeaders) @@ -666,7 +668,7 @@ impl PruningProofManager { } headers.push(self.headers_store.get_header(current).unwrap()); - for child in self.relations_stores[level].get_children(current).unwrap().read().iter().copied() { + for child in self.relations_stores.read()[level].get_children(current).unwrap().read().iter().copied() { queue.push(Reverse(SortableBlock::new(child, self.ghostdag_stores[level].get_blue_work(child).unwrap()))); } } @@ -820,7 +822,7 @@ impl PruningProofManager { let ghostdag = (&*self.ghostdag_stores[0].get_data(current).unwrap()).into(); e.insert(TrustedHeader { header, ghostdag }); } - let parents = self.relations_stores[0].get_parents(current).unwrap(); + let parents = self.relations_stores.read()[0].get_parents(current).unwrap(); for parent in parents.iter().copied() { if visited.insert(parent) { queue.push_back(parent); diff --git a/consensus/src/processes/reachability/inquirer.rs b/consensus/src/processes/reachability/inquirer.rs index e71ceb026e..1bc25c9086 100644 --- a/consensus/src/processes/reachability/inquirer.rs +++ b/consensus/src/processes/reachability/inquirer.rs @@ -389,12 +389,12 @@ mod tests { let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_size = test.blocks.len() as u64 / 3; let reachability = RwLock::new(DbReachabilityStore::new(db.clone(), cache_size)); - let relations = DbRelationsStore::with_prefix(db.clone(), &[], 0); + let mut relations = DbRelationsStore::with_prefix(db.clone(), &[], 0); // Add blocks via a staging store { let mut staging_reachability = StagingReachabilityStore::new(reachability.upgradable_read()); - let mut staging_relations = StagingRelationsStore::new(&relations); + let mut staging_relations = StagingRelationsStore::new(&mut relations); let mut builder = DagBuilder::new(&mut staging_reachability, &mut staging_relations); builder.init(); builder.add_block(DagBlock::new(test.genesis.into(), vec![ORIGIN])); @@ -443,7 +443,7 @@ mod tests { let mut batch = WriteBatch::default(); let mut staging_reachability = StagingReachabilityStore::new(reachability.upgradable_read()); - let mut staging_relations = StagingRelationsStore::new(&relations); + let mut staging_relations = StagingRelationsStore::new(&mut relations); for (i, block) in test.ids().choose_multiple(&mut rand::thread_rng(), test.blocks.len()).into_iter().chain(once(test.genesis)).enumerate() @@ -475,7 +475,7 @@ mod tests { // Recapture staging stores batch = WriteBatch::default(); staging_reachability = StagingReachabilityStore::new(reachability.upgradable_read()); - staging_relations = StagingRelationsStore::new(&relations); + staging_relations = StagingRelationsStore::new(&mut relations); } } } @@ -523,8 +523,8 @@ mod tests { let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_size = test.blocks.len() as u64 / 3; let mut reachability = DbReachabilityStore::new(db.clone(), cache_size); - let relations = DbRelationsStore::new(db, 0, cache_size); - run_dag_test_case(&mut &relations, &mut reachability, &test); + let mut relations = DbRelationsStore::new(db, 0, cache_size); + run_dag_test_case(&mut relations, &mut reachability, &test); // Run with a staging process run_dag_test_case_with_staging(&test); diff --git a/consensus/src/processes/relations.rs b/consensus/src/processes/relations.rs index 07e4cc2012..34a0e58c04 100644 --- a/consensus/src/processes/relations.rs +++ b/consensus/src/processes/relations.rs @@ -161,8 +161,7 @@ mod tests { fn test_delete_level_relations_zero_cache() { let (_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); let cache_size = 0; - let relations_store = DbRelationsStore::new(db.clone(), 0, cache_size); - let mut relations = &relations_store; + let mut relations = DbRelationsStore::new(db.clone(), 0, cache_size); relations.insert(ORIGIN, Default::default()).unwrap(); relations.insert(1.into(), Arc::new(vec![ORIGIN])).unwrap(); relations.insert(2.into(), Arc::new(vec![1.into()])).unwrap(); @@ -184,7 +183,7 @@ mod tests { ); let mut batch = WriteBatch::default(); - let mut staging_relations = StagingRelationsStore::new(relations); + let mut staging_relations = StagingRelationsStore::new(&mut relations); delete_level_relations(MemoryWriter, &mut staging_relations, 1.into()).unwrap(); staging_relations.commit(&mut batch).unwrap(); db.write(batch).unwrap(); @@ -204,7 +203,7 @@ mod tests { ); let mut batch = WriteBatch::default(); - let mut staging_relations = StagingRelationsStore::new(relations); + let mut staging_relations = StagingRelationsStore::new(&mut relations); delete_level_relations(MemoryWriter, &mut staging_relations, 2.into()).unwrap(); staging_relations.commit(&mut batch).unwrap(); db.write(batch).unwrap(); diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 78c5f19bb9..37302fbb87 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -369,8 +369,9 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - let mut queue: VecDeque = std::iter::once(genesis_hash).collect(); let mut visited = BlockHashSet::new(); let mut vec = Vec::new(); + let relations = src_consensus.relations_stores.read(); while let Some(current) = queue.pop_front() { - for child in src_consensus.relations_stores[0].get_children(current).unwrap().read().iter() { + for child in relations[0].get_children(current).unwrap().read().iter() { if visited.insert(*child) { queue.push_back(*child); vec.push(*child); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index efaa748a4b..997b92fee8 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -115,9 +115,8 @@ fn reachability_stretch_test(use_attack_json: bool) { // Act let (_temp_db_lifetime, db) = create_temp_db!(ConnBuilder::default().with_files_limit(10)); let mut store = DbReachabilityStore::new(db.clone(), 100000); - let relations = DbRelationsStore::new(db, 0, 100000); // TODO: remove level - let mut binding = &relations; - let mut builder = DagBuilder::new(&mut store, &mut binding); + let mut relations = DbRelationsStore::new(db, 0, 100000); // TODO: remove level + let mut builder = DagBuilder::new(&mut store, &mut relations); builder.init();