diff --git a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs index 6f9ef9f6a9f83..4e23030aa4990 100644 --- a/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/pov_requester/mod.rs @@ -139,6 +139,7 @@ mod tests { use futures::{executor, future}; use parity_scale_codec::Encode; + use sc_network::ProtocolName; use sp_core::testing::TaskExecutor; use polkadot_node_primitives::BlockData; @@ -231,7 +232,10 @@ mod tests { Some(Requests::PoVFetchingV1(outgoing)) => {outgoing} ); req.pending_response - .send(Ok(PoVFetchingResponse::PoV(pov.clone()).encode())) + .send(Ok(( + PoVFetchingResponse::PoV(pov.clone()).encode(), + ProtocolName::from(""), + ))) .unwrap(); break }, diff --git a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs index 460f20499ed59..a5a81082e39ad 100644 --- a/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/fetch_task/tests.rs @@ -25,7 +25,7 @@ use futures::{ Future, FutureExt, StreamExt, }; -use sc_network as network; +use sc_network::{self as network, ProtocolName}; use sp_keyring::Sr25519Keyring; use polkadot_node_network_protocol::request_response::{v1, Recipient}; @@ -252,7 +252,7 @@ impl TestRun { } } req.pending_response - .send(response.map(Encode::encode)) + .send(response.map(|r| (r.encode(), ProtocolName::from("")))) .expect("Sending response should succeed"); } return (valid_responses == 0) && self.valid_chunks.is_empty() diff --git a/polkadot/node/network/availability-distribution/src/tests/state.rs b/polkadot/node/network/availability-distribution/src/tests/state.rs index e95c1c3a27c2f..66a8d8fcdcf9a 100644 --- a/polkadot/node/network/availability-distribution/src/tests/state.rs +++ b/polkadot/node/network/availability-distribution/src/tests/state.rs @@ -19,6 +19,7 @@ use std::{ time::Duration, }; +use network::ProtocolName; use polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; use polkadot_node_subsystem_util::TimeoutExt; @@ -324,7 +325,11 @@ fn to_incoming_req( let response = rx.await; let payload = response.expect("Unexpected canceled request").result; pending_response - .send(payload.map_err(|_| network::RequestFailure::Refused)) + .send( + payload + .map_err(|_| network::RequestFailure::Refused) + .map(|r| (r, ProtocolName::from(""))), + ) .expect("Sending response is expected to work"); } .boxed(), diff --git a/polkadot/node/network/availability-recovery/src/tests.rs b/polkadot/node/network/availability-recovery/src/tests.rs index 1cb52757bac92..f1dc5b98c09b8 100644 --- a/polkadot/node/network/availability-recovery/src/tests.rs +++ b/polkadot/node/network/availability-recovery/src/tests.rs @@ -22,13 +22,14 @@ use futures_timer::Delay; use parity_scale_codec::Encode; use polkadot_node_network_protocol::request_response::{ - self as req_res, IncomingRequest, Recipient, ReqProtocolNames, Requests, + self as req_res, v1::AvailableDataFetchingRequest, IncomingRequest, Protocol, Recipient, + ReqProtocolNames, Requests, }; use polkadot_node_subsystem_test_helpers::derive_erasure_chunks_with_proofs_and_root; use super::*; -use sc_network::{config::RequestResponseConfig, IfDisconnected, OutboundFailure, RequestFailure}; +use sc_network::{IfDisconnected, OutboundFailure, ProtocolName, RequestFailure}; use polkadot_node_primitives::{BlockData, PoV, Proof}; use polkadot_node_subsystem::messages::{ @@ -48,8 +49,18 @@ type VirtualOverseer = TestSubsystemContextHandle; // Deterministic genesis hash for protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); -fn test_harness_fast_path>( - test: impl FnOnce(VirtualOverseer, RequestResponseConfig) -> T, +fn request_receiver( + req_protocol_names: &ReqProtocolNames, +) -> IncomingRequestReceiver { + let receiver = IncomingRequest::get_config_receiver(req_protocol_names); + // Don't close the sending end of the request protocol. Otherwise, the subsystem will terminate. + std::mem::forget(receiver.1.inbound_queue); + receiver.0 +} + +fn test_harness>( + subsystem: AvailabilityRecoverySubsystem, + test: impl FnOnce(VirtualOverseer) -> T, ) { let _ = env_logger::builder() .is_test(true) @@ -60,101 +71,23 @@ fn test_harness_fast_path>( - test: impl FnOnce(VirtualOverseer, RequestResponseConfig) -> T, -) { - let _ = env_logger::builder() - .is_test(true) - .filter(Some("polkadot_availability_recovery"), log::LevelFilter::Trace) - .try_init(); - - let pool = sp_core::testing::TaskExecutor::new(); - - let (context, virtual_overseer) = make_subsystem_context(pool.clone()); - - let (collation_req_receiver, req_cfg) = - IncomingRequest::get_config_receiver(&ReqProtocolNames::new(&GENESIS_HASH, None)); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( - collation_req_receiver, - Metrics::new_dummy(), - ); - let subsystem = subsystem.run(context); - - let test_fut = test(virtual_overseer, req_cfg); + let test_fut = test(virtual_overseer); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); executor::block_on(future::join( async move { - let (mut overseer, _req_cfg) = test_fut.await; + let mut overseer = test_fut.await; overseer_signal(&mut overseer, OverseerSignal::Conclude).await; }, subsystem, )) .1 - .unwrap(); -} - -fn test_harness_chunks_if_pov_large< - T: Future, ->( - test: impl FnOnce(VirtualOverseer, RequestResponseConfig) -> T, -) { - let _ = env_logger::builder() - .is_test(true) - .filter(Some("polkadot_availability_recovery"), log::LevelFilter::Trace) - .try_init(); - - let pool = sp_core::testing::TaskExecutor::new(); - - let (context, virtual_overseer) = make_subsystem_context(pool.clone()); - - let (collation_req_receiver, req_cfg) = - IncomingRequest::get_config_receiver(&ReqProtocolNames::new(&GENESIS_HASH, None)); - let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( - collation_req_receiver, - Metrics::new_dummy(), - ); - let subsystem = subsystem.run(context); - - let test_fut = test(virtual_overseer, req_cfg); - - futures::pin_mut!(test_fut); - futures::pin_mut!(subsystem); - - executor::block_on(future::join( - async move { - let (mut overseer, _req_cfg) = test_fut.await; - overseer_signal(&mut overseer, OverseerSignal::Conclude).await; - }, - subsystem, - )) - .1 - .unwrap(); } const TIMEOUT: Duration = Duration::from_millis(300); @@ -342,11 +275,12 @@ impl TestState { async fn test_chunk_requests( &self, + req_protocol_names: &ReqProtocolNames, candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, n: usize, who_has: impl Fn(usize) -> Has, - ) -> Vec, RequestFailure>>> { + ) -> Vec, ProtocolName), RequestFailure>>> { // arbitrary order. let mut i = 0; let mut senders = Vec::new(); @@ -380,7 +314,7 @@ impl TestState { let _ = req.pending_response.send( available_data.map(|r| - req_res::v1::ChunkFetchingResponse::from(r).encode() + (req_res::v1::ChunkFetchingResponse::from(r).encode(), req_protocol_names.get_name(Protocol::ChunkFetchingV1)) ) ); } @@ -394,10 +328,11 @@ impl TestState { async fn test_full_data_requests( &self, + req_protocol_names: &ReqProtocolNames, candidate_hash: CandidateHash, virtual_overseer: &mut VirtualOverseer, who_has: impl Fn(usize) -> Has, - ) -> Vec, RequestFailure>>> { + ) -> Vec, ProtocolName), RequestFailure>>> { let mut senders = Vec::new(); for _ in 0..self.validators.len() { // Receive a request for a chunk. @@ -433,9 +368,10 @@ impl TestState { let done = available_data.as_ref().ok().map_or(false, |x| x.is_some()); let _ = req.pending_response.send( - available_data.map(|r| - req_res::v1::AvailableDataFetchingResponse::from(r).encode() - ) + available_data.map(|r|( + req_res::v1::AvailableDataFetchingResponse::from(r).encode(), + req_protocol_names.get_name(Protocol::AvailableDataFetchingV1) + )) ); if done { break } @@ -532,8 +468,13 @@ impl Default for TestState { #[test] fn availability_is_recovered_from_chunks_if_no_group_provided() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_fast_path(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -565,6 +506,7 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -600,6 +542,7 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { test_state .test_chunk_requests( + &req_protocol_names, new_candidate.hash(), &mut virtual_overseer, test_state.impossibility_threshold(), @@ -609,15 +552,20 @@ fn availability_is_recovered_from_chunks_if_no_group_provided() { // A request times out with `Unavailable` error. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunks_only() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -649,6 +597,7 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -684,6 +633,7 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk test_state .test_chunk_requests( + &req_protocol_names, new_candidate.hash(), &mut virtual_overseer, test_state.impossibility_threshold(), @@ -693,15 +643,20 @@ fn availability_is_recovered_from_chunks_even_if_backing_group_supplied_if_chunk // A request times out with `Unavailable` error. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn bad_merkle_path_leads_to_recovery_error() { let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_fast_path(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -740,6 +695,7 @@ fn bad_merkle_path_leads_to_recovery_error() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.impossibility_threshold(), @@ -749,15 +705,20 @@ fn bad_merkle_path_leads_to_recovery_error() { // A request times out with `Unavailable` error. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn wrong_chunk_index_leads_to_recovery_error() { let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_fast_path(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -796,6 +757,7 @@ fn wrong_chunk_index_leads_to_recovery_error() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.impossibility_threshold(), @@ -805,15 +767,20 @@ fn wrong_chunk_index_leads_to_recovery_error() { // A request times out with `Unavailable` error as there are no good peers. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn invalid_erasure_coding_leads_to_invalid_error() { let mut test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_fast_path(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { let pov = PoV { block_data: BlockData(vec![69; 64]) }; let (bad_chunks, bad_erasure_root) = derive_erasure_chunks_with_proofs_and_root( @@ -859,6 +826,7 @@ fn invalid_erasure_coding_leads_to_invalid_error() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -868,15 +836,20 @@ fn invalid_erasure_coding_leads_to_invalid_error() { // f+1 'valid' chunks can't produce correct data. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Invalid); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn fast_path_backing_group_recovers() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_fast_path(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -911,20 +884,30 @@ fn fast_path_backing_group_recovers() { test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state - .test_full_data_requests(candidate_hash, &mut virtual_overseer, who_has) + .test_full_data_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + who_has, + ) .await; // Recovered data should match the original one. assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn recovers_from_only_chunks_if_pov_large() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_if_pov_large(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -965,6 +948,7 @@ fn recovers_from_only_chunks_if_pov_large() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -1009,6 +993,7 @@ fn recovers_from_only_chunks_if_pov_large() { test_state .test_chunk_requests( + &req_protocol_names, new_candidate.hash(), &mut virtual_overseer, test_state.impossibility_threshold(), @@ -1018,15 +1003,20 @@ fn recovers_from_only_chunks_if_pov_large() { // A request times out with `Unavailable` error. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn fast_path_backing_group_recovers_if_pov_small() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_if_pov_large( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_if_pov_large(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1070,20 +1060,30 @@ fn fast_path_backing_group_recovers_if_pov_small() { test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state - .test_full_data_requests(candidate_hash, &mut virtual_overseer, who_has) + .test_full_data_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + who_has, + ) .await; // Recovered data should match the original one. assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn no_answers_in_fast_path_causes_chunk_requests() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_fast_path( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_fast_path(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1119,13 +1119,19 @@ fn no_answers_in_fast_path_causes_chunk_requests() { test_state.respond_to_available_data_query(&mut virtual_overseer, false).await; test_state - .test_full_data_requests(candidate_hash, &mut virtual_overseer, who_has) + .test_full_data_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + who_has, + ) .await; test_state.respond_to_query_all_request(&mut virtual_overseer, |_| false).await; test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -1135,15 +1141,20 @@ fn no_answers_in_fast_path_causes_chunk_requests() { // Recovered data should match the original one. assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn task_canceled_when_receivers_dropped() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1170,7 +1181,7 @@ fn task_canceled_when_receivers_dropped() { for _ in 0..test_state.validators.len() { match virtual_overseer.recv().timeout(TIMEOUT).await { - None => return (virtual_overseer, req_cfg), + None => return virtual_overseer, Some(_) => continue, } } @@ -1182,8 +1193,13 @@ fn task_canceled_when_receivers_dropped() { #[test] fn chunks_retry_until_all_nodes_respond() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1215,6 +1231,7 @@ fn chunks_retry_until_all_nodes_respond() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.validators.len() - test_state.threshold(), @@ -1225,6 +1242,7 @@ fn chunks_retry_until_all_nodes_respond() { // we get to go another round! test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.impossibility_threshold(), @@ -1234,15 +1252,20 @@ fn chunks_retry_until_all_nodes_respond() { // Recovered data should match the original one. assert_eq!(rx.await.unwrap().unwrap_err(), RecoveryError::Unavailable); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn not_returning_requests_wont_stall_retrieval() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1277,13 +1300,18 @@ fn not_returning_requests_wont_stall_retrieval() { // Not returning senders won't cause the retrieval to stall: let _senders = test_state - .test_chunk_requests(candidate_hash, &mut virtual_overseer, not_returning_count, |_| { - Has::DoesNotReturn - }) + .test_chunk_requests( + &req_protocol_names, + candidate_hash, + &mut virtual_overseer, + not_returning_count, + |_| Has::DoesNotReturn, + ) .await; test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, // Should start over: @@ -1295,6 +1323,7 @@ fn not_returning_requests_wont_stall_retrieval() { // we get to go another round! test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -1304,15 +1333,20 @@ fn not_returning_requests_wont_stall_retrieval() { // Recovered data should match the original one: assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn all_not_returning_requests_still_recovers_on_return() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1344,6 +1378,7 @@ fn all_not_returning_requests_still_recovers_on_return() { let senders = test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.validators.len(), @@ -1358,6 +1393,7 @@ fn all_not_returning_requests_still_recovers_on_return() { std::mem::drop(senders); }, test_state.test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, // Should start over: @@ -1370,6 +1406,7 @@ fn all_not_returning_requests_still_recovers_on_return() { // we get to go another round! test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold(), @@ -1379,15 +1416,20 @@ fn all_not_returning_requests_still_recovers_on_return() { // Recovered data should match the original one: assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn returns_early_if_we_have_the_data() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1414,15 +1456,20 @@ fn returns_early_if_we_have_the_data() { test_state.respond_to_available_data_query(&mut virtual_overseer, true).await; assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn does_not_query_local_validator() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1453,6 +1500,7 @@ fn does_not_query_local_validator() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.validators.len(), @@ -1463,6 +1511,7 @@ fn does_not_query_local_validator() { // second round, make sure it uses the local chunk. test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold() - 1, @@ -1471,15 +1520,20 @@ fn does_not_query_local_validator() { .await; assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } #[test] fn invalid_local_chunk_is_ignored() { let test_state = TestState::default(); + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let subsystem = AvailabilityRecoverySubsystem::with_chunks_only( + request_receiver(&req_protocol_names), + Metrics::new_dummy(), + ); - test_harness_chunks_only(|mut virtual_overseer, req_cfg| async move { + test_harness(subsystem, |mut virtual_overseer| async move { overseer_signal( &mut virtual_overseer, OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf( @@ -1512,6 +1566,7 @@ fn invalid_local_chunk_is_ignored() { test_state .test_chunk_requests( + &req_protocol_names, candidate_hash, &mut virtual_overseer, test_state.threshold() - 1, @@ -1520,6 +1575,6 @@ fn invalid_local_chunk_is_ignored() { .await; assert_eq!(rx.await.unwrap().unwrap(), test_state.available_data); - (virtual_overseer, req_cfg) + virtual_overseer }); } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs index 3a9740149948f..1ba6389212cc5 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -17,6 +17,7 @@ use super::*; use assert_matches::assert_matches; use futures::{executor, future, Future}; +use sc_network::ProtocolName; use sp_core::{crypto::Pair, Encode}; use sp_keyring::Sr25519Keyring; use sp_keystore::Keystore; @@ -559,11 +560,11 @@ fn act_on_advertisement_v2() { .await; response_channel - .send(Ok(request_v1::CollationFetchingResponse::Collation( - candidate_a.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -761,11 +762,11 @@ fn fetch_one_collation_at_a_time() { candidate_a.descriptor.relay_parent = test_state.relay_parent; candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel - .send(Ok(request_v1::CollationFetchingResponse::Collation( - candidate_a.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -885,19 +886,19 @@ fn fetches_next_collation() { // First request finishes now: response_channel_non_exclusive - .send(Ok(request_v1::CollationFetchingResponse::Collation( - candidate_a.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); response_channel - .send(Ok(request_v1::CollationFetchingResponse::Collation( - candidate_a.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -1023,11 +1024,11 @@ fn fetch_next_collation_on_invalid_collation() { candidate_a.descriptor.relay_parent = test_state.relay_parent; candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel - .send(Ok(request_v1::CollationFetchingResponse::Collation( - candidate_a.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v1::CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); let receipt = assert_candidate_backing_second( diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index c5236ef3eb211..23963e65554eb 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -314,11 +314,11 @@ fn v1_advertisement_accepted_and_seconded() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_v2::CollationFetchingResponse::Collation( - candidate.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v2::CollationFetchingResponse::Collation(candidate.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -565,11 +565,14 @@ fn second_multiple_candidates_per_relay_parent() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_v2::CollationFetchingResponse::Collation( - candidate.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate.clone(), + pov.clone(), + ) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -717,11 +720,11 @@ fn fetched_collation_sanity_check() { let pov = PoV { block_data: BlockData(vec![1]) }; response_channel - .send(Ok(request_v2::CollationFetchingResponse::Collation( - candidate.clone(), - pov.clone(), - ) - .encode())) + .send(Ok(( + request_v2::CollationFetchingResponse::Collation(candidate.clone(), pov.clone()) + .encode(), + ProtocolName::from(""), + ))) .expect("Sending response should succeed"); // PVD request. diff --git a/polkadot/node/network/dispute-distribution/src/tests/mod.rs b/polkadot/node/network/dispute-distribution/src/tests/mod.rs index a3520bf35f802..880d1b18032cc 100644 --- a/polkadot/node/network/dispute-distribution/src/tests/mod.rs +++ b/polkadot/node/network/dispute-distribution/src/tests/mod.rs @@ -32,7 +32,7 @@ use futures::{ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; -use sc_network::config::RequestResponseConfig; +use sc_network::{config::RequestResponseConfig, ProtocolName}; use polkadot_node_network_protocol::{ request_response::{v1::DisputeRequest, IncomingRequest, ReqProtocolNames}, @@ -832,7 +832,7 @@ async fn check_sent_requests( if confirm_receive { for req in reqs { req.pending_response.send( - Ok(DisputeResponse::Confirmed.encode()) + Ok((DisputeResponse::Confirmed.encode(), ProtocolName::from(""))) ) .expect("Subsystem should be listening for a response."); } diff --git a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs index 8ac9895ec5ad2..2766ec9815af1 100644 --- a/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/polkadot/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -50,6 +50,7 @@ use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng, }; use sc_keystore::LocalKeystore; +use sc_network::ProtocolName; use sp_application_crypto::{sr25519::Pair, AppCrypto, Pair as TraitPair}; use sp_authority_discovery::AuthorityPair; use sp_keyring::Sr25519Keyring; @@ -1330,7 +1331,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( bad }; let response = StatementFetchingResponse::Statement(bad_candidate); - outgoing.pending_response.send(Ok(response.encode())).unwrap(); + outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); @@ -1382,7 +1383,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( // On retry, we should have reverse order: assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); let response = StatementFetchingResponse::Statement(candidate.clone()); - outgoing.pending_response.send(Ok(response.encode())).unwrap(); + outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); @@ -1869,7 +1870,7 @@ fn delay_reputation_changes() { bad }; let response = StatementFetchingResponse::Statement(bad_candidate); - outgoing.pending_response.send(Ok(response.encode())).unwrap(); + outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); @@ -1913,7 +1914,7 @@ fn delay_reputation_changes() { // On retry, we should have reverse order: assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); let response = StatementFetchingResponse::Statement(candidate.clone()); - outgoing.pending_response.send(Ok(response.encode())).unwrap(); + outgoing.pending_response.send(Ok((response.encode(), ProtocolName::from("")))).unwrap(); } ); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index c34cf20d716ca..cd2751c80ef9f 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -38,6 +38,7 @@ use polkadot_primitives::{ SessionIndex, SessionInfo, ValidatorPair, }; use sc_keystore::LocalKeystore; +use sc_network::ProtocolName; use sp_application_crypto::Pair as PairT; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; use sp_keyring::Sr25519Keyring; @@ -604,7 +605,7 @@ async fn handle_sent_request( persisted_validation_data, statements, }; - outgoing.pending_response.send(Ok(res.encode())).unwrap(); + outgoing.pending_response.send(Ok((res.encode(), ProtocolName::from("")))).unwrap(); } ); }