Skip to content

Commit

Permalink
chore(network): remove bad node checks at places as we will never con…
Browse files Browse the repository at this point in the history
…nect to bad nodes
  • Loading branch information
RolandSherwin authored and maqi committed Jul 18, 2024
1 parent c0516c9 commit 52a3903
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 51 deletions.
1 change: 0 additions & 1 deletion sn_networking/src/cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,6 @@ impl SwarmDriver {

SwarmCmd::RecordNodeIssue { peer_id, issue } => {
cmd_string = "RecordNodeIssues";
let _ = self.bad_nodes_ongoing_verifications.remove(&peer_id);
self.record_node_issue(peer_id, issue);
}
SwarmCmd::IsPeerShunned { target, sender } => {
Expand Down
4 changes: 1 addition & 3 deletions sn_networking/src/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ use sn_protocol::{
};
use sn_transfers::PaymentQuote;
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
collections::{btree_map::Entry, BTreeMap, HashMap, HashSet},
fmt::Debug,
net::SocketAddr,
num::NonZeroUsize,
Expand Down Expand Up @@ -615,7 +615,6 @@ impl NetworkBuilder {
handled_times: 0,
hard_disk_write_error: 0,
bad_nodes: Default::default(),
bad_nodes_ongoing_verifications: Default::default(),
quotes_history: Default::default(),
replication_targets: Default::default(),
};
Expand Down Expand Up @@ -665,7 +664,6 @@ pub struct SwarmDriver {
handled_times: usize,
pub(crate) hard_disk_write_error: usize,
pub(crate) bad_nodes: BadNodes,
pub(crate) bad_nodes_ongoing_verifications: BTreeSet<PeerId>,
pub(crate) quotes_history: BTreeMap<PeerId, PaymentQuote>,
pub(crate) replication_targets: BTreeMap<PeerId, Instant>,
}
Expand Down
44 changes: 18 additions & 26 deletions sn_networking/src/event/swarm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ impl SwarmDriver {
&peer_id,
&addrs,
&info.protocols,
&self.bad_nodes,
);
}

Expand Down Expand Up @@ -254,32 +253,25 @@ impl SwarmDriver {

// If we are not local, we care only for peers that we dialed and thus are reachable.
if self.local || has_dialed {
// To reduce the bad_node check resource usage,
// during the connection establish process, only check cached black_list
// The periodical check, which involves network queries shall filter
// out bad_nodes eventually.
if let Some((_issues, true)) = self.bad_nodes.get(&peer_id) {
info!("Peer {peer_id:?} is considered as bad, blocking it.");
} else {
self.remove_bootstrap_from_full(peer_id);

// Avoid have `direct link format` addrs co-exists with `relay` addr
if has_relayed {
addrs.retain(|multiaddr| {
multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit))
});
}
// A bad node cannot establish a connection with us. So we can add it to the RT directly.
self.remove_bootstrap_from_full(peer_id);

// Avoid have `direct link format` addrs co-exists with `relay` addr
if has_relayed {
addrs.retain(|multiaddr| {
multiaddr.iter().any(|p| matches!(p, Protocol::P2pCircuit))
});
}

trace!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table");
trace!(%peer_id, ?addrs, "identify: attempting to add addresses to routing table");

// Attempt to add the addresses to the routing table.
for multiaddr in addrs {
let _routing_update = self
.swarm
.behaviour_mut()
.kademlia
.add_address(&peer_id, multiaddr);
}
// Attempt to add the addresses to the routing table.
for multiaddr in addrs {
let _routing_update = self
.swarm
.behaviour_mut()
.kademlia
.add_address(&peer_id, multiaddr);
}
}
trace!(
Expand Down Expand Up @@ -665,7 +657,7 @@ impl SwarmDriver {
}

// skip if the peer is a relay server that we're connected to
if self.relay_manager.keep_alive_peer(peer_id, &self.bad_nodes) {
if self.relay_manager.keep_alive_peer(peer_id) {
return true; // retain peer
}

Expand Down
25 changes: 4 additions & 21 deletions sn_networking/src/relay_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,19 +57,10 @@ impl RelayManager {
self.enable_client = enable;
}

/// Should we keep this peer alive?
/// If a peer is considered as a bad node, closing it's connection would remove that server from the listen addr.
#[allow(clippy::nonminimal_bool)]
pub(crate) fn keep_alive_peer(&self, peer_id: &PeerId, bad_nodes: &BadNodes) -> bool {
let is_not_bad = if let Some((_, is_bad)) = bad_nodes.get(peer_id) {
!*is_bad
} else {
true
};

// we disconnect from bad server
(self.connected_relays.contains_key(peer_id) && is_not_bad)
|| (self.waiting_for_reservation.contains_key(peer_id) && is_not_bad)
/// Should we keep this peer alive? Closing a connection to that peer would remove that server from the listen addr.
pub(crate) fn keep_alive_peer(&self, peer_id: &PeerId) -> bool {
self.connected_relays.contains_key(peer_id)
|| self.waiting_for_reservation.contains_key(peer_id)
// but servers provide connections to bad nodes.
|| self.reserved_by.contains(peer_id)
}
Expand All @@ -81,20 +72,12 @@ impl RelayManager {
peer_id: &PeerId,
addrs: &HashSet<Multiaddr>,
stream_protocols: &Vec<StreamProtocol>,
bad_nodes: &BadNodes,
) {
if self.candidates.len() >= MAX_POTENTIAL_CANDIDATES {
trace!("Got max relay candidates");
return;
}

if let Some((_, is_bad)) = bad_nodes.get(peer_id) {
if *is_bad {
debug!("Not adding peer {peer_id:?} as relay candidate as it is a bad node.");
return;
}
}

if Self::does_it_support_relay_server_protocol(stream_protocols) {
// todo: collect and manage multiple addrs
if let Some(addr) = addrs.iter().next() {
Expand Down

0 comments on commit 52a3903

Please sign in to comment.