From 9bd127f71feed5f104a7a1f4fe93aca019b98767 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 31 Oct 2023 08:09:02 +1100 Subject: [PATCH 01/33] refactor(swarm-test): don't implicitly add external addresses This is a safer default than always adding the memory address as external. Kademlia for example depends on whether we have an external address. Motivated-by: #4735. Pull-Request: #4743. --- misc/allow-block-list/src/lib.rs | 16 +- misc/connection-limits/src/lib.rs | 2 +- protocols/autonat/tests/test_client.rs | 2 +- protocols/dcutr/tests/lib.rs | 14 +- protocols/gossipsub/tests/smoke.rs | 2 +- protocols/identify/tests/smoke.rs | 7 +- protocols/kad/tests/client_mode.rs | 17 +- protocols/perf/tests/lib.rs | 2 +- protocols/ping/tests/ping.rs | 4 +- protocols/rendezvous/tests/rendezvous.rs | 8 +- .../request-response/tests/error_reporting.rs | 12 +- protocols/request-response/tests/ping.rs | 6 +- swarm-test/src/lib.rs | 148 ++++++++++++------ swarm/tests/connection_close.rs | 2 +- 14 files changed, 141 insertions(+), 101 deletions(-) diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index 42d3c0ef613..9f2524733e6 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -284,7 +284,7 @@ mod tests { async fn cannot_dial_blocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().block_peer(*listener.local_peer_id()); @@ -298,7 +298,7 @@ mod tests { async fn can_dial_unblocked_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().block_peer(*listener.local_peer_id()); dialer @@ -312,7 +312,7 @@ mod tests { async fn blocked_peer_cannot_dial_us() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; listener.behaviour_mut().block_peer(*dialer.local_peer_id()); dial(&mut dialer, &listener).unwrap(); @@ -334,7 +334,7 @@ mod tests { async fn connections_get_closed_upon_blocked() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.connect(&mut listener).await; dialer.behaviour_mut().block_peer(*listener.local_peer_id()); @@ -360,7 +360,7 @@ mod tests { async fn cannot_dial_peer_unless_allowed() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; let DialError::Denied { cause } = dial(&mut dialer, &listener).unwrap_err() else { panic!("unexpected dial error") @@ -375,7 +375,7 @@ mod tests { async fn cannot_dial_disallowed_peer() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); dialer @@ -392,7 +392,7 @@ mod tests { async fn not_allowed_peer_cannot_dial_us() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer .dial( @@ -429,7 +429,7 @@ mod tests { async fn connections_get_closed_upon_disallow() { let mut dialer = Swarm::new_ephemeral(|_| Behaviour::::default()); let mut listener = Swarm::new_ephemeral(|_| Behaviour::::default()); - listener.listen().await; + listener.listen().with_memory_addr_external().await; dialer.behaviour_mut().allow_peer(*listener.local_peer_id()); listener.behaviour_mut().allow_peer(*dialer.local_peer_id()); diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index b873da76be7..d0ea3436177 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -448,7 +448,7 @@ mod tests { }); async_std::task::block_on(async { - let (listen_addr, _) = swarm1.listen().await; + let (listen_addr, _) = swarm1.listen().with_memory_addr_external().await; for _ in 0..limit { swarm2.connect(&mut swarm1).await; diff --git a/protocols/autonat/tests/test_client.rs b/protocols/autonat/tests/test_client.rs index 743f4cc1b51..7509d3ef425 100644 --- a/protocols/autonat/tests/test_client.rs +++ b/protocols/autonat/tests/test_client.rs @@ -155,7 +155,7 @@ async fn test_confidence() { // Randomly test either for public or for private status the confidence. let test_public = rand::random::(); if test_public { - client.listen().await; + client.listen().with_memory_addr_external().await; } else { let unreachable_addr = "/ip4/127.0.0.1/tcp/42".parse().unwrap(); client.behaviour_mut().probe_address(unreachable_addr); diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 6078b101fa2..93661f1cba5 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -40,15 +40,9 @@ async fn connect() { let mut src = build_client(); // Have all swarms listen on a local TCP address. - let (memory_addr, relay_addr) = relay.listen().await; - relay.remove_external_address(&memory_addr); - relay.add_external_address(relay_addr.clone()); - - let (dst_mem_addr, dst_tcp_addr) = dst.listen().await; - let (src_mem_addr, _) = src.listen().await; - - dst.remove_external_address(&dst_mem_addr); - src.remove_external_address(&src_mem_addr); + let (_, relay_tcp_addr) = relay.listen().with_tcp_addr_external().await; + let (_, dst_tcp_addr) = dst.listen().await; + src.listen().await; assert!(src.external_addresses().next().is_none()); assert!(dst.external_addresses().next().is_none()); @@ -58,7 +52,7 @@ async fn connect() { async_std::task::spawn(relay.loop_on_next()); - let dst_relayed_addr = relay_addr + let dst_relayed_addr = relay_tcp_addr .with(Protocol::P2p(relay_peer_id)) .with(Protocol::P2pCircuit) .with(Protocol::P2p(dst_peer_id)); diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index e4e4c90d768..e8577bc78cf 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -122,7 +122,7 @@ async fn build_node() -> Swarm { .unwrap(); gossipsub::Behaviour::new(MessageAuthenticity::Author(peer_id), config).unwrap() }); - swarm.listen().await; + swarm.listen().with_memory_addr_external().await; swarm } diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index c1926b4125f..8d11ef96d50 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -24,7 +24,8 @@ async fn periodic_identify() { }); let swarm2_peer_id = *swarm2.local_peer_id(); - let (swarm1_memory_listen, swarm1_tcp_listen_addr) = swarm1.listen().await; + let (swarm1_memory_listen, swarm1_tcp_listen_addr) = + swarm1.listen().with_memory_addr_external().await; let (swarm2_memory_listen, swarm2_tcp_listen_addr) = swarm2.listen().await; swarm2.connect(&mut swarm1).await; @@ -92,7 +93,7 @@ async fn identify_push() { ) }); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; // First, let the periodic identify do its thing. @@ -142,7 +143,7 @@ async fn discover_peer_after_disconnect() { ) }); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let swarm1_peer_id = *swarm1.local_peer_id(); diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 13bf08bd288..5324e679ab9 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -14,7 +14,7 @@ async fn server_gets_added_to_routing_table_by_client() { let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; let server_peer_id = *server.local_peer_id(); @@ -37,7 +37,7 @@ async fn two_servers_add_each_other_to_routing_table() { let mut server1 = Swarm::new_ephemeral(MyBehaviour::new); let mut server2 = Swarm::new_ephemeral(MyBehaviour::new); - server2.listen().await; + server2.listen().with_memory_addr_external().await; server1.connect(&mut server2).await; let server1_peer_id = *server1.local_peer_id(); @@ -54,7 +54,7 @@ async fn two_servers_add_each_other_to_routing_table() { other => panic!("Unexpected events: {other:?}"), } - server1.listen().await; + server1.listen().with_memory_addr_external().await; server2.connect(&mut server1).await; async_std::task::spawn(server1.loop_on_next()); @@ -79,14 +79,11 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti let (memory_addr, _) = server.listen().await; - // Remove memory address to simulate a server that doesn't know its external address. - server.remove_external_address(&memory_addr); client.dial(memory_addr.clone()).unwrap(); - // Do the usual identify send/receive dance. This triggers a mode change to Mode::Client. + + // Do the usual identify send/receive dance. match libp2p_swarm_test::drive(&mut client, &mut server).await { - ([Identify(_), Identify(_)], [Kad(ModeChanged { new_mode }), Identify(_), Identify(_)]) => { - assert_eq!(new_mode, Mode::Client); - } + ([Identify(_), Identify(_)], [Identify(_), Identify(_)]) => {} other => panic!("Unexpected events: {other:?}"), } @@ -115,7 +112,7 @@ async fn set_client_to_server_mode() { let mut server = Swarm::new_ephemeral(MyBehaviour::new); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; let server_peer_id = *server.local_peer_id(); diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index a79e8dd36b3..447d8a06110 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -33,7 +33,7 @@ async fn perf() { let server_peer_id = *server.local_peer_id(); let mut client = Swarm::new_ephemeral(|_| client::Behaviour::new()); - server.listen().await; + server.listen().with_memory_addr_external().await; client.connect(&mut server).await; tokio::task::spawn(server.loop_on_next()); diff --git a/protocols/ping/tests/ping.rs b/protocols/ping/tests/ping.rs index 946a2daadb6..3ca469f16a8 100644 --- a/protocols/ping/tests/ping.rs +++ b/protocols/ping/tests/ping.rs @@ -36,7 +36,7 @@ fn ping_pong() { let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(cfg.clone())); async_std::task::block_on(async { - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; for _ in 0..count.get() { @@ -67,7 +67,7 @@ fn unsupported_doesnt_fail() { let mut swarm2 = Swarm::new_ephemeral(|_| ping::Behaviour::new(ping::Config::new())); let result = async_std::task::block_on(async { - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let swarm1_peer_id = *swarm1.local_peer_id(); async_std::task::spawn(swarm1.loop_on_next()); diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index 67b4bc6ad57..fec56365768 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -429,7 +429,7 @@ async fn new_server_with_connected_clients( async fn new_client() -> Swarm { let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); - client.listen().await; // we need to listen otherwise we don't have addresses to register + client.listen().with_memory_addr_external().await; // we need to listen otherwise we don't have addresses to register client } @@ -437,7 +437,7 @@ async fn new_client() -> Swarm { async fn new_server(config: rendezvous::server::Config) -> Swarm { let mut server = Swarm::new_ephemeral(|_| rendezvous::server::Behaviour::new(config)); - server.listen().await; + server.listen().with_memory_addr_external().await; server } @@ -447,7 +447,7 @@ async fn new_combined_node() -> Swarm { client: rendezvous::client::Behaviour::new(identity), server: rendezvous::server::Behaviour::new(rendezvous::server::Config::default()), }); - node.listen().await; + node.listen().with_memory_addr_external().await; node } @@ -458,7 +458,7 @@ async fn new_impersonating_client() -> Swarm { // As such, the best we can do is hand eve a completely different keypair from what she is using to authenticate her connection. let someone_else = identity::Keypair::generate_ed25519(); let mut eve = Swarm::new_ephemeral(move |_| rendezvous::client::Behaviour::new(someone_else)); - eve.listen().await; + eve.listen().with_memory_addr_external().await; eve } diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index cf651d395f5..2256403c0e4 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -21,7 +21,7 @@ async fn report_outbound_failure_on_read_response() { let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let server_task = async move { @@ -75,7 +75,7 @@ async fn report_outbound_failure_on_write_request() { let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; // Expects no events because `Event::Request` is produced after `read_request`. @@ -117,7 +117,7 @@ async fn report_outbound_timeout_on_read_response() { let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let server_task = async move { @@ -161,7 +161,7 @@ async fn report_inbound_failure_on_read_request() { let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; // Expects no events because `Event::Request` is produced after `read_request`. @@ -196,7 +196,7 @@ async fn report_inbound_failure_on_write_response() { let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; // Expects OutboundFailure::Io failure with `FailOnWriteResponse` error @@ -261,7 +261,7 @@ async fn report_inbound_timeout_on_write_response() { let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(200)); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; // Expects InboundFailure::Timeout diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index 37f21264d49..c751dc2b3dd 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -97,7 +97,7 @@ async fn ping_protocol() { }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; let expected_ping = ping.clone(); @@ -190,7 +190,7 @@ async fn emits_inbound_connection_closed_failure() { }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; swarm2.behaviour_mut().send_request(&peer1_id, ping.clone()); @@ -255,7 +255,7 @@ async fn emits_inbound_connection_closed_if_channel_is_dropped() { }); let peer2_id = *swarm2.local_peer_id(); - swarm1.listen().await; + swarm1.listen().with_memory_addr_external().await; swarm2.connect(&mut swarm1).await; swarm2.behaviour_mut().send_request(&peer1_id, ping.clone()); diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index 41a606b300c..85bd9c22e9a 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -19,8 +19,8 @@ // DEALINGS IN THE SOFTWARE. use async_trait::async_trait; -use futures::future::Either; -use futures::StreamExt; +use futures::future::{BoxFuture, Either}; +use futures::{FutureExt, StreamExt}; use libp2p_core::{ multiaddr::Protocol, transport::MemoryTransport, upgrade::Version, Multiaddr, Transport, }; @@ -32,6 +32,7 @@ use libp2p_swarm::{ }; use libp2p_yamux as yamux; use std::fmt::Debug; +use std::future::IntoFuture; use std::time::Duration; /// An extension trait for [`Swarm`] that makes it easier to set up a network of [`Swarm`]s for tests. @@ -49,6 +50,10 @@ pub trait SwarmExt { Self: Sized; /// Establishes a connection to the given [`Swarm`], polling both of them until the connection is established. + /// + /// This will take addresses from the `other` [`Swarm`] via [`Swarm::external_addresses`]. + /// By default, this iterator will not yield any addresses. + /// To add listen addresses as external addresses, use [`ListenFuture::with_memory_addr_external`] or [`ListenFuture::with_tcp_addr_external`]. async fn connect(&mut self, other: &mut Swarm) where T: NetworkBehaviour + Send, @@ -73,7 +78,7 @@ pub trait SwarmExt { /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. /// /// The first address is for the memory transport, the second one for the TCP transport. - async fn listen(&mut self) -> (Multiaddr, Multiaddr); + fn listen(&mut self) -> ListenFuture<&mut Self>; /// Returns the next [`SwarmEvent`] or times out after 10 seconds. /// @@ -292,53 +297,12 @@ where } } - async fn listen(&mut self) -> (Multiaddr, Multiaddr) { - let memory_addr_listener_id = self.listen_on(Protocol::Memory(0).into()).unwrap(); - - // block until we are actually listening - let memory_multiaddr = self - .wait(|e| match e { - SwarmEvent::NewListenAddr { - address, - listener_id, - } => (listener_id == memory_addr_listener_id).then_some(address), - other => { - log::debug!( - "Ignoring {:?} while waiting for listening to succeed", - other - ); - None - } - }) - .await; - - // Memory addresses are externally reachable because they all share the same memory-space. - self.add_external_address(memory_multiaddr.clone()); - - let tcp_addr_listener_id = self - .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) - .unwrap(); - - let tcp_multiaddr = self - .wait(|e| match e { - SwarmEvent::NewListenAddr { - address, - listener_id, - } => (listener_id == tcp_addr_listener_id).then_some(address), - other => { - log::debug!( - "Ignoring {:?} while waiting for listening to succeed", - other - ); - None - } - }) - .await; - - // We purposely don't add the TCP addr as an external one because we want to only use the memory transport for making connections in here. - // The TCP transport is only supported for protocols that manage their own connections. - - (memory_multiaddr, tcp_multiaddr) + fn listen(&mut self) -> ListenFuture<&mut Self> { + ListenFuture { + add_memory_external: false, + add_tcp_external: false, + swarm: self, + } } async fn next_swarm_event( @@ -373,3 +337,87 @@ where } } } + +pub struct ListenFuture { + add_memory_external: bool, + add_tcp_external: bool, + swarm: S, +} + +impl ListenFuture { + /// Adds the memory address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// + /// This is typically "safe" for tests because within a process, memory addresses are "globally" reachable. + /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + pub fn with_memory_addr_external(mut self) -> Self { + self.add_memory_external = true; + + self + } + + /// Adds the TCP address we are starting to listen on as an external address using [`Swarm::add_external_address`]. + /// + /// This is typically "safe" for tests because on the same machine, 127.0.0.1 is reachable for other [`Swarm`]s. + /// However, some tests depend on which addresses are external and need this to be configurable so it is not a good default. + pub fn with_tcp_addr_external(mut self) -> Self { + self.add_tcp_external = true; + + self + } +} + +impl<'s, B> IntoFuture for ListenFuture<&'s mut Swarm> +where + B: NetworkBehaviour + Send, + ::ToSwarm: Debug, +{ + type Output = (Multiaddr, Multiaddr); + type IntoFuture = BoxFuture<'s, Self::Output>; + + fn into_future(self) -> Self::IntoFuture { + async move { + let swarm = self.swarm; + + let memory_addr_listener_id = swarm.listen_on(Protocol::Memory(0).into()).unwrap(); + + // block until we are actually listening + let memory_multiaddr = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + address, + listener_id, + } => (listener_id == memory_addr_listener_id).then_some(address), + other => { + panic!("Unexpected event while waiting for `NewListenAddr`: {other:?}") + } + }) + .await; + + let tcp_addr_listener_id = swarm + .listen_on("/ip4/127.0.0.1/tcp/0".parse().unwrap()) + .unwrap(); + + let tcp_multiaddr = swarm + .wait(|e| match e { + SwarmEvent::NewListenAddr { + address, + listener_id, + } => (listener_id == tcp_addr_listener_id).then_some(address), + other => { + panic!("Unexpected event while waiting for `NewListenAddr`: {other:?}") + } + }) + .await; + + if self.add_memory_external { + swarm.add_external_address(memory_multiaddr.clone()); + } + if self.add_tcp_external { + swarm.add_external_address(tcp_multiaddr.clone()); + } + + (memory_multiaddr, tcp_multiaddr) + } + .boxed() + } +} diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs index a44518fa4ad..305e33c1804 100644 --- a/swarm/tests/connection_close.rs +++ b/swarm/tests/connection_close.rs @@ -16,7 +16,7 @@ async fn sends_remaining_events_to_behaviour_on_connection_close() { let mut swarm1 = Swarm::new_ephemeral(|_| Behaviour::new(3)); let mut swarm2 = Swarm::new_ephemeral(|_| Behaviour::new(3)); - swarm2.listen().await; + swarm2.listen().with_memory_addr_external().await; swarm1.connect(&mut swarm2).await; swarm1.disconnect_peer_id(*swarm2.local_peer_id()).unwrap(); From 3ea0f75c57b428a84cec82d9c817bba6e0d95306 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 31 Oct 2023 10:42:16 +1100 Subject: [PATCH 02/33] ci: only lint for changes in `src/` & `Cargo.toml` Sometimes, we need to adjust the tests of crates together with a feature change or fix in other crates. Changes to tests don't need to go into the changelog. Hence, we change the changelog-lint to only look at the diff in the `src/` directory and the `Cargo.toml` file. Pull-Request: #4748. --- scripts/ensure-version-bump-and-changelog.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/ensure-version-bump-and-changelog.sh b/scripts/ensure-version-bump-and-changelog.sh index 26a6668b3a1..164af5126eb 100755 --- a/scripts/ensure-version-bump-and-changelog.sh +++ b/scripts/ensure-version-bump-and-changelog.sh @@ -7,7 +7,7 @@ DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") MERGE_BASE=$(git merge-base "$HEAD_SHA" master) # Find the merge base. This ensures we only diff what was actually added in the PR. -DIFF_TO_MASTER=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE") +SRC_DIFF_TO_MASTER=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE/src" "$DIR_TO_CRATE/Cargo.toml") CHANGELOG_DIFF=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-only -- "$DIR_TO_CRATE/CHANGELOG.md") VERSION_IN_CHANGELOG=$(awk -F' ' '/^## [0-9]+\.[0-9]+\.[0-9]+/{print $2; exit}' "$DIR_TO_CRATE/CHANGELOG.md") @@ -19,8 +19,8 @@ if [[ "$VERSION_IN_CHANGELOG" != "$VERSION_IN_MANIFEST" ]]; then exit 1 fi -# If the crate wasn't touched in this PR, exit early. -if [ -z "$DIFF_TO_MASTER" ]; then +# If the source files of this crate weren't touched in this PR, exit early. +if [ -z "$SRC_DIFF_TO_MASTER" ]; then exit 0; fi From 3820a98780d55c63f235d12971c182d636c9a196 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 31 Oct 2023 10:59:14 +1100 Subject: [PATCH 03/33] chore(identify): remove deprecated symbols Pull-Request: #4735. --- protocols/identify/CHANGELOG.md | 3 +++ protocols/identify/src/behaviour.rs | 25 ------------------ protocols/identify/src/handler.rs | 4 +-- protocols/identify/tests/smoke.rs | 39 +++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 28 deletions(-) diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 0b47163b51a..e5d7dc98ece 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -2,6 +2,9 @@ - Add `Info` to the `libp2p-identify::Event::Pushed` to report pushed info. See [PR 4527](https://github.com/libp2p/rust-libp2p/pull/4527) +- Remove deprecated `initial_delay`. + Identify requests are always sent instantly after the connection has been established. + See [PR 4735](https://github.com/libp2p/rust-libp2p/pull/4735) ## 0.43.1 diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index d58bcb4f5eb..631a68d77a2 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -71,14 +71,6 @@ pub struct Config { /// /// Defaults to `rust-libp2p/`. pub agent_version: String, - /// The initial delay before the first identification request - /// is sent to a remote on a newly established connection. - /// - /// Defaults to 0ms. - #[deprecated(note = "The `initial_delay` is no longer necessary and will be - completely removed since a remote should be able to instantly - answer to an identify request")] - pub initial_delay: Duration, /// The interval at which identification requests are sent to /// the remote on established connections after the first request, /// i.e. the delay between identification requests. @@ -106,13 +98,11 @@ pub struct Config { impl Config { /// Creates a new configuration for the identify [`Behaviour`] that /// advertises the given protocol version and public key. - #[allow(deprecated)] pub fn new(protocol_version: String, local_public_key: PublicKey) -> Self { Self { protocol_version, agent_version: format!("rust-libp2p/{}", env!("CARGO_PKG_VERSION")), local_public_key, - initial_delay: Duration::from_millis(0), interval: Duration::from_secs(5 * 60), push_listen_addr_updates: false, cache_size: 100, @@ -125,17 +115,6 @@ impl Config { self } - /// Configures the initial delay before the first identification - /// request is sent on a newly established connection to a peer. - #[deprecated(note = "The `initial_delay` is no longer necessary and will be - completely removed since a remote should be able to instantly - answer to an identify request thus also this setter will be removed")] - #[allow(deprecated)] - pub fn with_initial_delay(mut self, d: Duration) -> Self { - self.initial_delay = d; - self - } - /// Configures the interval at which identification requests are /// sent to peers after the initial request. pub fn with_interval(mut self, d: Duration) -> Self { @@ -235,7 +214,6 @@ impl NetworkBehaviour for Behaviour { type ConnectionHandler = Handler; type ToSwarm = Event; - #[allow(deprecated)] fn handle_established_inbound_connection( &mut self, _: ConnectionId, @@ -244,7 +222,6 @@ impl NetworkBehaviour for Behaviour { remote_addr: &Multiaddr, ) -> Result, ConnectionDenied> { Ok(Handler::new( - self.config.initial_delay, self.config.interval, peer, self.config.local_public_key.clone(), @@ -255,7 +232,6 @@ impl NetworkBehaviour for Behaviour { )) } - #[allow(deprecated)] fn handle_established_outbound_connection( &mut self, _: ConnectionId, @@ -264,7 +240,6 @@ impl NetworkBehaviour for Behaviour { _: Endpoint, ) -> Result, ConnectionDenied> { Ok(Handler::new( - self.config.initial_delay, self.config.interval, peer, self.config.local_public_key.clone(), diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 51501d79f9c..966c7b378e0 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -117,9 +117,7 @@ pub enum Event { impl Handler { /// Creates a new `Handler`. - #[allow(clippy::too_many_arguments)] pub fn new( - initial_delay: Duration, interval: Duration, remote_peer_id: PeerId, public_key: PublicKey, @@ -135,7 +133,7 @@ impl Handler { STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), - trigger_next_identify: Delay::new(initial_delay), + trigger_next_identify: Delay::new(Duration::ZERO), exchanged_one_periodic_identify: false, interval, public_key, diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index 8d11ef96d50..2dc4ca9d9fd 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -3,6 +3,7 @@ use libp2p_identify as identify; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::iter; +use std::time::{Duration, Instant}; #[async_std::test] async fn periodic_identify() { @@ -179,3 +180,41 @@ async fn discover_peer_after_disconnect() { assert_eq!(connected_peer, swarm1_peer_id); } + +#[async_std::test] +async fn configured_interval_starts_after_first_identify() { + let _ = env_logger::try_init(); + + let identify_interval = Duration::from_secs(5); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_interval(identify_interval), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()), + ) + }); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let start = Instant::now(); + + // Wait until we identified. + swarm1 + .wait(|event| { + matches!(event, SwarmEvent::Behaviour(identify::Event::Sent { .. })).then_some(()) + }) + .await; + + let time_to_first_identify = Instant::now().duration_since(start); + + assert!(time_to_first_identify < identify_interval) +} From 5d4883f8bf81b5f5ecacce72828de996e3496f65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Oct 2023 00:42:28 +0000 Subject: [PATCH 04/33] deps: bump futures-util from 0.3.28 to 0.3.29 Pull-Request: #4766. --- misc/futures-bounded/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/futures-bounded/Cargo.toml b/misc/futures-bounded/Cargo.toml index 9332667e476..7689e9bdcbc 100644 --- a/misc/futures-bounded/Cargo.toml +++ b/misc/futures-bounded/Cargo.toml @@ -13,7 +13,7 @@ publish = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -futures-util = { version = "0.3.28" } +futures-util = { version = "0.3.29" } futures-timer = "3.0.2" [dev-dependencies] From 8600e68d11aff8797b03f023e19798fc0f4ab362 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Oct 2023 00:52:43 +0000 Subject: [PATCH 05/33] deps: bump tempfile from 3.8.0 to 3.8.1 Pull-Request: #4768. --- Cargo.lock | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0172af429ee..441e30b138d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -618,9 +618,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "blake2" @@ -2286,7 +2286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.4", + "rustix 0.38.21", "windows-sys", ] @@ -3376,9 +3376,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.3" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -4432,6 +4432,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -4752,14 +4761,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.4" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.3", + "linux-raw-sys 0.4.10", "windows-sys", ] @@ -5377,14 +5386,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix 0.38.4", + "redox_syscall 0.4.1", + "rustix 0.38.21", "windows-sys", ] @@ -5615,7 +5624,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "bitflags 2.3.3", + "bitflags 2.4.1", "bytes", "futures-core", "futures-util", From ec157171a7c1d733dcaff80b00ad9596c15701a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Oct 2023 01:02:33 +0000 Subject: [PATCH 06/33] deps: bump base64 from 0.21.4 to 0.21.5 Pull-Request: #4767. --- Cargo.lock | 26 +++++++++++++------------- identity/Cargo.toml | 2 +- misc/keygen/Cargo.toml | 2 +- protocols/gossipsub/Cargo.toml | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 441e30b138d..5d9d761316a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -576,9 +576,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "base64ct" @@ -2327,7 +2327,7 @@ dependencies = [ name = "keygen" version = "0.1.0" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "clap", "libp2p-core", "libp2p-identity", @@ -2568,7 +2568,7 @@ version = "0.46.0" dependencies = [ "async-std", "asynchronous-codec 0.6.2", - "base64 0.21.4", + "base64 0.21.5", "byteorder", "bytes", "either", @@ -2629,7 +2629,7 @@ name = "libp2p-identity" version = "0.2.7" dependencies = [ "asn1_der", - "base64 0.21.4", + "base64 0.21.5", "bs58", "criterion", "ed25519-dalek", @@ -3020,7 +3020,7 @@ dependencies = [ name = "libp2p-server" version = "0.12.3" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "clap", "env_logger 0.10.0", "futures", @@ -3942,7 +3942,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "serde", ] @@ -3952,7 +3952,7 @@ version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3163d2912b7c3b52d651a055f2c7eec9ba5cd22d26ef75b8dd3a59980b185923" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "serde", ] @@ -4527,7 +4527,7 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "encoding_rs", "futures-core", @@ -4814,7 +4814,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", ] [[package]] @@ -5280,7 +5280,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7beb1624a3ea34778d58d30e2b8606b4d29fe65e87c4d50b87ed30afd5c3830c" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "crc", "lazy_static", "md-5", @@ -5413,7 +5413,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf0fe180d5f1f7dd32bb5f1a8d19231bb63dc9bbb1985e1dbb6f07163b6a8578" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.21.5", "cookie", "fantoccini", "futures", @@ -5803,7 +5803,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "58f4fcb97da0426e8146fe0e9b78cc13120161087256198701d12d9df77f7701" dependencies = [ "async-trait", - "base64 0.21.4", + "base64 0.21.5", "futures", "log", "md-5", diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 0b9db73983d..6e25699d9ed 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -42,7 +42,7 @@ rand = ["dep:rand", "ed25519-dalek?/rand_core"] [dev-dependencies] quickcheck = { workspace = true } -base64 = "0.21.4" +base64 = "0.21.5" serde_json = "1.0" rmp-serde = "1.1" criterion = "0.5" diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 2c7cd68658b..20b94569f12 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -18,7 +18,7 @@ zeroize = "1" serde = { version = "1.0.190", features = ["derive"] } serde_json = "1.0.107" libp2p-core = { workspace = true } -base64 = "0.21.4" +base64 = "0.21.5" libp2p-identity = { workspace = true } [lints] diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index 77559fcb2fe..d76f9a3e364 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -15,7 +15,7 @@ wasm-bindgen = ["getrandom/js", "instant/wasm-bindgen"] [dependencies] asynchronous-codec = "0.6" -base64 = "0.21.4" +base64 = "0.21.5" byteorder = "1.5.0" bytes = "1.5" either = "1.9" From 57c2100db42288818395ec89f7564c50bb48ebe3 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 1 Nov 2023 00:19:02 +1100 Subject: [PATCH 07/33] fix(identify): only report observed address once per connection At the moment, `libp2p-identify` reports an observed address repeatedly as a **new** external address candidate even if it is the same address _from the same connection_. Unless the underlying transport supports roaming, a connection does not change its observed address. We change the behaviour of `libp2p-identify` to remember the observed address for a particular connection and not re-emit the `NewExternalAddrCandidate` event for it. This allows users to probabilistically promote a candidate to an external address based on its report frequency. If an address is reported twice, it means we have two connections where the remote observed this address. Chances are, we have port-reuse enabled for this connection and it might thus be dialable or at least a good candidate for hole-punching. Related: #4688. Pull-Request: #4721. --- misc/metrics/CHANGELOG.md | 2 + misc/metrics/src/swarm.rs | 51 ++++++- protocols/dcutr/tests/lib.rs | 10 +- protocols/identify/CHANGELOG.md | 4 + protocols/identify/src/behaviour.rs | 33 ++++- protocols/identify/tests/smoke.rs | 71 +++++++++ swarm/CHANGELOG.md | 2 + swarm/src/behaviour.rs | 9 +- swarm/src/lib.rs | 214 ++++++++++++++++------------ 9 files changed, 289 insertions(+), 107 deletions(-) diff --git a/misc/metrics/CHANGELOG.md b/misc/metrics/CHANGELOG.md index 7b8c01ae2f7..acad2043fc8 100644 --- a/misc/metrics/CHANGELOG.md +++ b/misc/metrics/CHANGELOG.md @@ -1,5 +1,7 @@ ## 0.14.0 - unreleased +- Add metrics for `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}`. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). ## 0.13.1 diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index 8837457d36a..20d3ce2eff3 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -23,7 +23,7 @@ use std::sync::{Arc, Mutex}; use crate::protocol_stack; use instant::Instant; -use libp2p_swarm::ConnectionId; +use libp2p_swarm::{ConnectionId, SwarmEvent}; use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; @@ -41,6 +41,10 @@ pub(crate) struct Metrics { new_listen_addr: Family, expired_listen_addr: Family, + external_addr_candidates: Family, + external_addr_confirmed: Family, + external_addr_expired: Family, + listener_closed: Family, listener_error: Counter, @@ -82,6 +86,27 @@ impl Metrics { expired_listen_addr.clone(), ); + let external_addr_candidates = Family::default(); + sub_registry.register( + "external_addr_candidates", + "Number of new external address candidates", + external_addr_candidates.clone(), + ); + + let external_addr_confirmed = Family::default(); + sub_registry.register( + "external_addr_confirmed", + "Number of confirmed external addresses", + external_addr_confirmed.clone(), + ); + + let external_addr_expired = Family::default(); + sub_registry.register( + "external_addr_expired", + "Number of expired external addresses", + external_addr_expired.clone(), + ); + let listener_closed = Family::default(); sub_registry.register( "listener_closed", @@ -146,6 +171,9 @@ impl Metrics { connections_established, new_listen_addr, expired_listen_addr, + external_addr_candidates, + external_addr_confirmed, + external_addr_expired, listener_closed, listener_error, dial_attempt, @@ -296,6 +324,27 @@ impl super::Recorder { self.dial_attempt.inc(); } + SwarmEvent::NewExternalAddrCandidate { address } => { + self.external_addr_candidates + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + SwarmEvent::ExternalAddrConfirmed { address } => { + self.external_addr_confirmed + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } + SwarmEvent::ExternalAddrExpired { address } => { + self.external_addr_expired + .get_or_create(&AddressLabels { + protocols: protocol_stack::as_string(address), + }) + .inc(); + } } } } diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 93661f1cba5..f43144154a7 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -69,13 +69,8 @@ async fn connect() { src.dial_and_wait(dst_relayed_addr.clone()).await; - loop { - match src - .next_swarm_event() - .await - .try_into_behaviour_event() - .unwrap() - { + while let Ok(event) = src.next_swarm_event().await.try_into_behaviour_event() { + match event { ClientEvent::Dcutr(dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { remote_peer_id, remote_relayed_addr, @@ -215,6 +210,7 @@ async fn wait_for_reservation( addr_observed = true; } SwarmEvent::Behaviour(ClientEvent::Identify(_)) => {} + SwarmEvent::NewExternalAddrCandidate { .. } => {} e => panic!("{e:?}"), } } diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index e5d7dc98ece..960ed530682 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -5,6 +5,10 @@ - Remove deprecated `initial_delay`. Identify requests are always sent instantly after the connection has been established. See [PR 4735](https://github.com/libp2p/rust-libp2p/pull/4735) +- Don't repeatedly report the same observed address as a `NewExternalAddrCandidate`. + Instead, only report each observed address once per connection. + This allows users to probabilistically deem an address as external if it gets reported as a candidate repeatedly. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). ## 0.43.1 diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index 631a68d77a2..4f017dd1a9e 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -30,6 +30,7 @@ use libp2p_swarm::{ }; use libp2p_swarm::{ConnectionId, THandler, THandlerOutEvent}; use lru::LruCache; +use std::collections::hash_map::Entry; use std::num::NonZeroUsize; use std::{ collections::{HashMap, HashSet, VecDeque}, @@ -48,6 +49,10 @@ pub struct Behaviour { config: Config, /// For each peer we're connected to, the observed address to send back to it. connected: HashMap>, + + /// The address a remote observed for us. + our_observed_addresses: HashMap, + /// Pending events to be emitted when polled. events: VecDeque>, /// The addresses of all peers that we have discovered. @@ -148,6 +153,7 @@ impl Behaviour { Self { config, connected: HashMap::new(), + our_observed_addresses: Default::default(), events: VecDeque::new(), discovered_peers, listen_addresses: Default::default(), @@ -253,7 +259,7 @@ impl NetworkBehaviour for Behaviour { fn on_connection_handler_event( &mut self, peer_id: PeerId, - _: ConnectionId, + id: ConnectionId, event: THandlerOutEvent, ) { match event { @@ -269,8 +275,27 @@ impl NetworkBehaviour for Behaviour { let observed = info.observed_addr.clone(); self.events .push_back(ToSwarm::GenerateEvent(Event::Received { peer_id, info })); - self.events - .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + + match self.our_observed_addresses.entry(id) { + Entry::Vacant(not_yet_observed) => { + not_yet_observed.insert(observed.clone()); + self.events + .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + } + Entry::Occupied(already_observed) if already_observed.get() == &observed => { + // No-op, we already observed this address. + } + Entry::Occupied(mut already_observed) => { + log::info!( + "Our observed address on connection {id} changed from {} to {observed}", + already_observed.get() + ); + + *already_observed.get_mut() = observed.clone(); + self.events + .push_back(ToSwarm::NewExternalAddrCandidate(observed)); + } + } } handler::Event::Identification => { self.events @@ -356,6 +381,8 @@ impl NetworkBehaviour for Behaviour { } else if let Some(addrs) = self.connected.get_mut(&peer_id) { addrs.remove(&connection_id); } + + self.our_observed_addresses.remove(&connection_id); } FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => { if let Some(entry) = peer_id.and_then(|id| self.discovered_peers.get_mut(&id)) { diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index 2dc4ca9d9fd..9a61ccccdd4 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -1,7 +1,9 @@ +use futures::StreamExt; use libp2p_core::multiaddr::Protocol; use libp2p_identify as identify; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use std::collections::HashSet; use std::iter; use std::time::{Duration, Instant}; @@ -79,6 +81,75 @@ async fn periodic_identify() { other => panic!("Unexpected events: {other:?}"), } } +#[async_std::test] +async fn only_emits_address_candidate_once_per_connection() { + let _ = env_logger::try_init(); + + let mut swarm1 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("a".to_string(), identity.public()) + .with_agent_version("b".to_string()) + .with_interval(Duration::from_secs(1)), + ) + }); + let mut swarm2 = Swarm::new_ephemeral(|identity| { + identify::Behaviour::new( + identify::Config::new("c".to_string(), identity.public()) + .with_agent_version("d".to_string()), + ) + }); + + swarm2.listen().with_memory_addr_external().await; + swarm1.connect(&mut swarm2).await; + + async_std::task::spawn(swarm2.loop_on_next()); + + let swarm_events = futures::stream::poll_fn(|cx| swarm1.poll_next_unpin(cx)) + .take(5) + .collect::>() + .await; + + let infos = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::Behaviour(identify::Event::Received { info, .. }) => Some(info.clone()), + _ => None, + }) + .collect::>(); + + assert!( + infos.len() > 1, + "should exchange identify payload more than once" + ); + + let varying_observed_addresses = infos + .iter() + .map(|i| i.observed_addr.clone()) + .collect::>(); + assert_eq!( + varying_observed_addresses.len(), + 1, + "Observed address should not vary on persistent connection" + ); + + let external_address_candidates = swarm_events + .iter() + .filter_map(|e| match e { + SwarmEvent::NewExternalAddrCandidate { address } => Some(address.clone()), + _ => None, + }) + .collect::>(); + + assert_eq!( + external_address_candidates.len(), + 1, + "To only have one external address candidate" + ); + assert_eq!( + &external_address_candidates[0], + varying_observed_addresses.iter().next().unwrap() + ); +} #[async_std::test] async fn identify_push() { diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index df85fbdcd49..6e2e9bb1c0c 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -16,6 +16,8 @@ See [PR 4715](https://github.com/libp2p/rust-libp2p/pull/4715). - Log `PeerId` of `Swarm` even when constructed with new `SwarmBuilder`. See [PR 4671](https://github.com/libp2p/rust-libp2p/pull/4671). +- Add `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}` variants. + See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). - Remove deprecated symbols. See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index c89796f8e25..27e62f71831 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -261,15 +261,20 @@ pub enum ToSwarm { event: TInEvent, }, - /// Reports a new candidate for an external address to the [`Swarm`](crate::Swarm). + /// Reports a **new** candidate for an external address to the [`Swarm`](crate::Swarm). /// + /// The emphasis on a **new** candidate is important. + /// Protocols MUST take care to only emit a candidate once per "source". + /// For example, the observed address of a TCP connection does not change throughout its lifetime. + /// Thus, only one candidate should be emitted per connection. + /// + /// This makes the report frequency of an address a meaningful data-point for consumers of this event. /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::NewExternalAddrCandidate`]. /// /// This address could come from a variety of sources: /// - A protocol such as identify obtained it from a remote. /// - The user provided it based on configuration. /// - We made an educated guess based on one of our listen addresses. - /// - We established a new relay connection. NewExternalAddrCandidate(Multiaddr), /// Indicates to the [`Swarm`](crate::Swarm) that the provided address is confirmed to be externally reachable. diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 908936069e0..228c8281a70 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -142,7 +142,7 @@ use libp2p_core::{ }; use libp2p_identity::PeerId; use smallvec::SmallVec; -use std::collections::{HashMap, HashSet}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::num::{NonZeroU32, NonZeroU8, NonZeroUsize}; use std::time::Duration; use std::{ @@ -294,6 +294,12 @@ pub enum SwarmEvent { /// Identifier of the connection. connection_id: ConnectionId, }, + /// We have discovered a new candidate for an external address for us. + NewExternalAddrCandidate { address: Multiaddr }, + /// An external address of the local node was confirmed. + ExternalAddrConfirmed { address: Multiaddr }, + /// An external address of the local node expired, i.e. is no-longer confirmed. + ExternalAddrExpired { address: Multiaddr }, } impl SwarmEvent { @@ -339,7 +345,9 @@ where /// Pending event to be delivered to connection handlers /// (or dropped if the peer disconnected) before the `behaviour` /// can be polled again. - pending_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, + pending_handler_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, + + pending_swarm_events: VecDeque>>, } impl Unpin for Swarm where TBehaviour: NetworkBehaviour {} @@ -366,7 +374,8 @@ where supported_protocols: Default::default(), confirmed_external_addr: Default::default(), listened_addrs: HashMap::new(), - pending_event: None, + pending_handler_event: None, + pending_swarm_events: VecDeque::default(), } } @@ -663,10 +672,7 @@ where &mut self.behaviour } - fn handle_pool_event( - &mut self, - event: PoolEvent>, - ) -> Option>> { + fn handle_pool_event(&mut self, event: PoolEvent>) { match event { PoolEvent::ConnectionEstablished { peer_id, @@ -698,11 +704,14 @@ where }, )); - return Some(SwarmEvent::OutgoingConnectionError { - peer_id: Some(peer_id), - connection_id: id, - error: dial_error, - }); + self.pending_swarm_events.push_back( + SwarmEvent::OutgoingConnectionError { + peer_id: Some(peer_id), + connection_id: id, + error: dial_error, + }, + ); + return; } } } @@ -728,12 +737,15 @@ where }, )); - return Some(SwarmEvent::IncomingConnectionError { - connection_id: id, - send_back_addr, - local_addr, - error: listen_error, - }); + self.pending_swarm_events.push_back( + SwarmEvent::IncomingConnectionError { + connection_id: id, + send_back_addr, + local_addr, + error: listen_error, + }, + ); + return; } } } @@ -783,14 +795,15 @@ where }, )); self.supported_protocols = supported_protocols; - return Some(SwarmEvent::ConnectionEstablished { - peer_id, - connection_id: id, - num_established, - endpoint, - concurrent_dial_errors, - established_in, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ConnectionEstablished { + peer_id, + connection_id: id, + num_established, + endpoint, + concurrent_dial_errors, + established_in, + }); } PoolEvent::PendingOutboundConnectionError { id: connection_id, @@ -812,11 +825,12 @@ where log::debug!("Connection attempt to unknown peer failed with {:?}", error); } - return Some(SwarmEvent::OutgoingConnectionError { - peer_id: peer, - connection_id, - error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::OutgoingConnectionError { + peer_id: peer, + connection_id, + error, + }); } PoolEvent::PendingInboundConnectionError { id, @@ -834,12 +848,13 @@ where error: &error, connection_id: id, })); - return Some(SwarmEvent::IncomingConnectionError { - connection_id: id, - local_addr, - send_back_addr, - error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnectionError { + connection_id: id, + local_addr, + send_back_addr, + error, + }); } PoolEvent::ConnectionClosed { id, @@ -874,13 +889,14 @@ where endpoint: &endpoint, remaining_established: num_established as usize, })); - return Some(SwarmEvent::ConnectionClosed { - peer_id, - connection_id: id, - endpoint, - cause: error, - num_established, - }); + self.pending_swarm_events + .push_back(SwarmEvent::ConnectionClosed { + peer_id, + connection_id: id, + endpoint, + cause: error, + num_established, + }); } PoolEvent::ConnectionEvent { peer_id, id, event } => { self.behaviour @@ -901,8 +917,6 @@ where })); } } - - None } fn handle_transport_event( @@ -911,7 +925,7 @@ where as Transport>::ListenerUpgrade, io::Error, >, - ) -> Option>> { + ) { match event { TransportEvent::Incoming { listener_id: _, @@ -938,12 +952,14 @@ where connection_id, })); - return Some(SwarmEvent::IncomingConnectionError { - connection_id, - local_addr, - send_back_addr, - error: listen_error, - }); + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error: listen_error, + }); + return; } } @@ -956,11 +972,12 @@ where connection_id, ); - Some(SwarmEvent::IncomingConnection { - connection_id, - local_addr, - send_back_addr, - }) + self.pending_swarm_events + .push_back(SwarmEvent::IncomingConnection { + connection_id, + local_addr, + send_back_addr, + }) } TransportEvent::NewAddress { listener_id, @@ -976,10 +993,11 @@ where listener_id, addr: &listen_addr, })); - Some(SwarmEvent::NewListenAddr { - listener_id, - address: listen_addr, - }) + self.pending_swarm_events + .push_back(SwarmEvent::NewListenAddr { + listener_id, + address: listen_addr, + }) } TransportEvent::AddressExpired { listener_id, @@ -998,10 +1016,11 @@ where listener_id, addr: &listen_addr, })); - Some(SwarmEvent::ExpiredListenAddr { - listener_id, - address: listen_addr, - }) + self.pending_swarm_events + .push_back(SwarmEvent::ExpiredListenAddr { + listener_id, + address: listen_addr, + }) } TransportEvent::ListenerClosed { listener_id, @@ -1019,11 +1038,12 @@ where listener_id, reason: reason.as_ref().copied(), })); - Some(SwarmEvent::ListenerClosed { - listener_id, - addresses: addrs.to_vec(), - reason, - }) + self.pending_swarm_events + .push_back(SwarmEvent::ListenerClosed { + listener_id, + addresses: addrs.to_vec(), + reason, + }) } TransportEvent::ListenerError { listener_id, error } => { self.behaviour @@ -1031,7 +1051,8 @@ where listener_id, err: &error, })); - Some(SwarmEvent::ListenerError { listener_id, error }) + self.pending_swarm_events + .push_back(SwarmEvent::ListenerError { listener_id, error }) } } } @@ -1039,14 +1060,17 @@ where fn handle_behaviour_event( &mut self, event: ToSwarm>, - ) -> Option>> { + ) { match event { - ToSwarm::GenerateEvent(event) => return Some(SwarmEvent::Behaviour(event)), + ToSwarm::GenerateEvent(event) => { + self.pending_swarm_events + .push_back(SwarmEvent::Behaviour(event)); + } ToSwarm::Dial { opts } => { let peer_id = opts.get_peer_id(); let connection_id = opts.connection_id(); if let Ok(()) = self.dial(opts) { - return Some(SwarmEvent::Dialing { + self.pending_swarm_events.push_back(SwarmEvent::Dialing { peer_id, connection_id, }); @@ -1064,7 +1088,7 @@ where handler, event, } => { - assert!(self.pending_event.is_none()); + assert!(self.pending_handler_event.is_none()); let handler = match handler { NotifyHandler::One(connection) => PendingNotifyHandler::One(connection), NotifyHandler::Any => { @@ -1076,7 +1100,7 @@ where } }; - self.pending_event = Some((peer_id, handler, event)); + self.pending_handler_event = Some((peer_id, handler, event)); } ToSwarm::NewExternalAddrCandidate(addr) => { // Apply address translation to the candidate address. @@ -1101,20 +1125,28 @@ where .on_swarm_event(FromSwarm::NewExternalAddrCandidate( NewExternalAddrCandidate { addr: &addr }, )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); } else { for addr in translated_addresses { self.behaviour .on_swarm_event(FromSwarm::NewExternalAddrCandidate( NewExternalAddrCandidate { addr: &addr }, )); + self.pending_swarm_events + .push_back(SwarmEvent::NewExternalAddrCandidate { address: addr }); } } } ToSwarm::ExternalAddrConfirmed(addr) => { - self.add_external_address(addr); + self.add_external_address(addr.clone()); + self.pending_swarm_events + .push_back(SwarmEvent::ExternalAddrConfirmed { address: addr }); } ToSwarm::ExternalAddrExpired(addr) => { self.remove_external_address(&addr); + self.pending_swarm_events + .push_back(SwarmEvent::ExternalAddrExpired { address: addr }); } ToSwarm::CloseConnection { peer_id, @@ -1130,8 +1162,6 @@ where } }, } - - None } /// Internal function used by everything event-related. @@ -1155,7 +1185,11 @@ where // // (2) is polled before (3) to prioritize existing connections over upgrading new incoming connections. loop { - match this.pending_event.take() { + if let Some(swarm_event) = this.pending_swarm_events.pop_front() { + return Poll::Ready(swarm_event); + } + + match this.pending_handler_event.take() { // Try to deliver the pending event emitted by the [`NetworkBehaviour`] in the previous // iteration to the connection handler(s). Some((peer_id, handler, event)) => match handler { @@ -1164,7 +1198,7 @@ where Some(conn) => match notify_one(conn, event, cx) { None => continue, Some(event) => { - this.pending_event = Some((peer_id, handler, event)); + this.pending_handler_event = Some((peer_id, handler, event)); } }, None => continue, @@ -1175,7 +1209,7 @@ where None => continue, Some((event, ids)) => { let handler = PendingNotifyHandler::Any(ids); - this.pending_event = Some((peer_id, handler, event)); + this.pending_handler_event = Some((peer_id, handler, event)); } } } @@ -1184,9 +1218,7 @@ where None => match this.behaviour.poll(cx) { Poll::Pending => {} Poll::Ready(behaviour_event) => { - if let Some(swarm_event) = this.handle_behaviour_event(behaviour_event) { - return Poll::Ready(swarm_event); - } + this.handle_behaviour_event(behaviour_event); continue; } @@ -1197,10 +1229,7 @@ where match this.pool.poll(cx) { Poll::Pending => {} Poll::Ready(pool_event) => { - if let Some(swarm_event) = this.handle_pool_event(pool_event) { - return Poll::Ready(swarm_event); - } - + this.handle_pool_event(pool_event); continue; } }; @@ -1209,10 +1238,7 @@ where match Pin::new(&mut this.transport).poll(cx) { Poll::Pending => {} Poll::Ready(transport_event) => { - if let Some(swarm_event) = this.handle_transport_event(transport_event) { - return Poll::Ready(swarm_event); - } - + this.handle_transport_event(transport_event); continue; } } From d0f62e9e23b883b31c4720e509ad37e9c1709bab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 Oct 2023 13:40:50 +0000 Subject: [PATCH 08/33] deps: bump rcgen from 0.11.1 to 0.11.3 Pull-Request: #4765. --- Cargo.lock | 20 +++++--------------- transports/tls/Cargo.toml | 2 +- transports/webrtc/Cargo.toml | 2 +- transports/websocket/Cargo.toml | 2 +- 4 files changed, 8 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d9d761316a..79856edf267 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3936,16 +3936,6 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" -[[package]] -name = "pem" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" -dependencies = [ - "base64 0.21.5", - "serde", -] - [[package]] name = "pem" version = "3.0.2" @@ -4384,11 +4374,11 @@ dependencies = [ [[package]] name = "rcgen" -version = "0.11.1" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4954fbc00dcd4d8282c987710e50ba513d351400dbdd00e803a05172a90d8976" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ - "pem 2.0.1", + "pem", "ring 0.16.20", "time", "x509-parser", @@ -6188,7 +6178,7 @@ dependencies = [ "interceptor", "lazy_static", "log", - "pem 3.0.2", + "pem", "rand 0.8.5", "rcgen", "regex", @@ -6251,7 +6241,7 @@ dependencies = [ "log", "p256", "p384", - "pem 3.0.2", + "pem", "rand 0.8.5", "rand_core 0.6.4", "rcgen", diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index 0e7e1660c60..3df1674c4b3 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -13,7 +13,7 @@ futures = { version = "0.3.29", default-features = false } futures-rustls = "0.24.0" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -rcgen = "0.11.0" +rcgen = "0.11.3" ring = "0.16.20" thiserror = "1.0.50" webpki = { version = "0.101.4", package = "rustls-webpki", features = ["std"] } diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index d562826c637..2379c299527 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -24,7 +24,7 @@ libp2p-webrtc-utils = { workspace = true } log = "0.4" multihash = { workspace = true } rand = "0.8" -rcgen = "0.11.1" +rcgen = "0.11.3" serde = { version = "1.0", features = ["derive"] } stun = "0.5" thiserror = "1" diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index ea725e07807..77616e1cefd 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -29,7 +29,7 @@ libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identity = { workspace = true, features = ["rand"] } async-std = { version = "1.6.5", features = ["attributes"] } -rcgen = "0.11.0" +rcgen = "0.11.3" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling From 6a8cef5cdbce4af379edb0b52ffa9deb03223839 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 1 Nov 2023 07:53:49 +1100 Subject: [PATCH 09/33] feat(relay): propagate errors to `Transport::{listen_on,dial}` To make a reservation with a relay, a user calls `Swarm::listen_on` with an address of the relay, suffixed with a `/p2pcircuit` protocol. Similarly, to establish a circuit to another peer, a user needs to call `Swarm::dial` with such an address. Upon success, the `Swarm` then issues a `SwarmEvent::NewListenAddr` event in case of a successful reservation or a `SwarmEvent::ConnectionEstablished` in case of a successful connect. The story is different for errors. Somewhat counterintuitively, the actual reason of an error during these operations are only reported as `relay::Event`s without a direct correlation to the user's `Swarm::listen_on` or `Swarm::dial` calls. With this PR, we send these errors back "into" the `Transport` and report them as `SwarmEvent::ListenerClosed` or `SwarmEvent::OutgoingConnectionError`. This is conceptually more correct. Additionally, by sending these errors back to the transport, we no longer use `ConnectionHandlerEvent::Close` which entirely closes the underlying relay connection. In case the connection is not used for something else, it will be closed by the keep-alive algorithm. Resolves: #4717. Related: #3591. Related: #4718. Pull-Request: #4745. --- Cargo.lock | 4 +- Cargo.toml | 2 +- hole-punching-tests/Cargo.toml | 1 + hole-punching-tests/src/main.rs | 58 ++- misc/futures-bounded/CHANGELOG.md | 5 + misc/futures-bounded/Cargo.toml | 2 +- misc/futures-bounded/src/futures_map.rs | 4 + misc/futures-bounded/src/futures_set.rs | 4 + misc/futures-bounded/src/stream_map.rs | 6 +- misc/futures-bounded/src/stream_set.rs | 4 + protocols/relay/CHANGELOG.md | 11 + protocols/relay/Cargo.toml | 1 + protocols/relay/src/lib.rs | 5 +- protocols/relay/src/priv_client.rs | 43 +- protocols/relay/src/priv_client/handler.rs | 483 ++++++++++--------- protocols/relay/src/priv_client/transport.rs | 17 +- protocols/relay/src/protocol/inbound_stop.rs | 33 +- protocols/relay/src/protocol/outbound_hop.rs | 176 +++---- protocols/relay/tests/lib.rs | 121 ++++- 19 files changed, 573 insertions(+), 407 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79856edf267..7d3f2e5b07a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1581,7 +1581,7 @@ dependencies = [ [[package]] name = "futures-bounded" -version = "0.2.0" +version = "0.2.1" dependencies = [ "futures-timer", "futures-util", @@ -1939,6 +1939,7 @@ name = "hole-punching-tests" version = "0.1.0" dependencies = [ "anyhow", + "either", "env_logger 0.10.0", "futures", "libp2p", @@ -2946,6 +2947,7 @@ dependencies = [ "libp2p-ping", "libp2p-plaintext", "libp2p-swarm", + "libp2p-swarm-test", "libp2p-yamux", "log", "quick-protobuf", diff --git a/Cargo.toml b/Cargo.toml index 2c823756bbe..e7044a185bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ resolver = "2" rust-version = "1.73.0" [workspace.dependencies] -futures-bounded = { version = "0.2.0", path = "misc/futures-bounded" } +futures-bounded = { version = "0.2.1", path = "misc/futures-bounded" } libp2p = { version = "0.53.0", path = "libp2p" } libp2p-allow-block-list = { version = "0.3.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.12.0", path = "protocols/autonat" } diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml index d8cf5c1ef71..4d067117260 100644 --- a/hole-punching-tests/Cargo.toml +++ b/hole-punching-tests/Cargo.toml @@ -15,3 +15,4 @@ redis = { version = "0.23.0", default-features = false, features = ["tokio-comp" tokio = { version = "1.29.1", features = ["full"] } serde = { version = "1.0.190", features = ["derive"] } serde_json = "1.0.107" +either = "1.9.0" diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index fd4616e1629..f7373aa4f94 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -19,7 +19,11 @@ // DEALINGS IN THE SOFTWARE. use anyhow::{Context, Result}; +use either::Either; use futures::stream::StreamExt; +use libp2p::core::transport::ListenerId; +use libp2p::swarm::dial_opts::DialOpts; +use libp2p::swarm::ConnectionId; use libp2p::{ core::multiaddr::{Multiaddr, Protocol}, dcutr, identify, noise, ping, relay, @@ -83,17 +87,22 @@ async fn main() -> Result<()> { .build(); client_listen_on_transport(&mut swarm, transport).await?; - client_setup(&mut swarm, &mut redis, relay_addr.clone(), mode).await?; + let id = client_setup(&mut swarm, &mut redis, relay_addr.clone(), mode).await?; let mut hole_punched_peer_connection = None; loop { - match (swarm.next().await.unwrap(), hole_punched_peer_connection) { + match ( + swarm.next().await.unwrap(), + hole_punched_peer_connection, + id, + ) { ( SwarmEvent::Behaviour(BehaviourEvent::RelayClient( relay::client::Event::ReservationReqAccepted { .. }, )), _, + _, ) => { log::info!("Relay accepted our reservation request."); @@ -109,6 +118,7 @@ async fn main() -> Result<()> { }, )), _, + _, ) => { log::info!("Successfully hole-punched to {remote_peer_id}"); @@ -121,6 +131,7 @@ async fn main() -> Result<()> { .. })), Some(hole_punched_connection), + _, ) if mode == Mode::Dial && connection == hole_punched_connection => { println!("{}", serde_json::to_string(&Report::new(rtt))?); @@ -135,12 +146,32 @@ async fn main() -> Result<()> { }, )), _, + _, ) => { log::info!("Failed to hole-punched to {remote_peer_id}"); return Err(anyhow::Error::new(error)); } - (SwarmEvent::OutgoingConnectionError { error, .. }, _) => { - anyhow::bail!(error) + ( + SwarmEvent::ListenerClosed { + listener_id, + reason: Err(e), + .. + }, + _, + Either::Left(reservation), + ) if listener_id == reservation => { + anyhow::bail!("Reservation on relay failed: {e}"); + } + ( + SwarmEvent::OutgoingConnectionError { + connection_id, + error, + .. + }, + _, + Either::Right(circuit), + ) if connection_id == circuit => { + anyhow::bail!("Circuit request relay failed: {error}"); } _ => {} } @@ -209,23 +240,30 @@ async fn client_setup( redis: &mut RedisClient, relay_addr: Multiaddr, mode: Mode, -) -> Result<()> { - match mode { +) -> Result> { + let either = match mode { Mode::Listen => { - swarm.listen_on(relay_addr.with(Protocol::P2pCircuit))?; + let id = swarm.listen_on(relay_addr.with(Protocol::P2pCircuit))?; + + Either::Left(id) } Mode::Dial => { let remote_peer_id = redis.pop(LISTEN_CLIENT_PEER_ID).await?; - swarm.dial( + let opts = DialOpts::from( relay_addr .with(Protocol::P2pCircuit) .with(Protocol::P2p(remote_peer_id)), - )?; + ); + let id = opts.connection_id(); + + swarm.dial(opts)?; + + Either::Right(id) } }; - Ok(()) + Ok(either) } fn tcp_addr(addr: IpAddr) -> Multiaddr { diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md index 90bd47f2f61..9801c9c1498 100644 --- a/misc/futures-bounded/CHANGELOG.md +++ b/misc/futures-bounded/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.1 - unreleased + +- Add `.len()` getter to `FuturesMap`, `FuturesSet`, `StreamMap` and `StreamSet`. + See [PR 4745](https://github.com/libp2p/rust-lib2pp/pulls/4745). + ## 0.2.0 - Add `StreamMap` type and remove `Future`-suffix from `PushError::ReplacedFuture` to reuse it for `StreamMap`. diff --git a/misc/futures-bounded/Cargo.toml b/misc/futures-bounded/Cargo.toml index 7689e9bdcbc..7b622374b43 100644 --- a/misc/futures-bounded/Cargo.toml +++ b/misc/futures-bounded/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "futures-bounded" -version = "0.2.0" +version = "0.2.1" edition = "2021" rust-version.workspace = true license = "MIT" diff --git a/misc/futures-bounded/src/futures_map.rs b/misc/futures-bounded/src/futures_map.rs index 5fd06037608..8e8802254bc 100644 --- a/misc/futures-bounded/src/futures_map.rs +++ b/misc/futures-bounded/src/futures_map.rs @@ -84,6 +84,10 @@ where } } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn is_empty(&self) -> bool { self.inner.is_empty() } diff --git a/misc/futures-bounded/src/futures_set.rs b/misc/futures-bounded/src/futures_set.rs index 79a82fde110..ea8f700991d 100644 --- a/misc/futures-bounded/src/futures_set.rs +++ b/misc/futures-bounded/src/futures_set.rs @@ -42,6 +42,10 @@ impl FuturesSet { } } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn is_empty(&self) -> bool { self.inner.is_empty() } diff --git a/misc/futures-bounded/src/stream_map.rs b/misc/futures-bounded/src/stream_map.rs index 7fcdd15e132..40294ce0fba 100644 --- a/misc/futures-bounded/src/stream_map.rs +++ b/misc/futures-bounded/src/stream_map.rs @@ -88,6 +88,10 @@ where Some(inner) } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn is_empty(&self) -> bool { self.inner.is_empty() } @@ -256,7 +260,7 @@ mod tests { assert!(poll.is_pending()); assert_eq!( - streams.inner.len(), + streams.len(), 0, "resources of cancelled streams are cleaned up properly" ); diff --git a/misc/futures-bounded/src/stream_set.rs b/misc/futures-bounded/src/stream_set.rs index 4fcb649fd49..bb32835065f 100644 --- a/misc/futures-bounded/src/stream_set.rs +++ b/misc/futures-bounded/src/stream_set.rs @@ -44,6 +44,10 @@ where } } + pub fn len(&self) -> usize { + self.inner.len() + } + pub fn is_empty(&self) -> bool { self.inner.is_empty() } diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index d26660f14b4..20d8370cf6d 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -2,6 +2,17 @@ - Fix a rare race condition when making a reservation on a relay that could lead to a failed reservation. See [PR 4747](https://github.com/libp2p/rust-lib2pp/pulls/4747). +- Propagate errors of relay client to the listener / dialer. + A failed reservation will now appear as `SwarmEvent::ListenerClosed` with the `ListenerId` of the corresponding `Swarm::listen_on` call. + A failed circuit request will now appear as `SwarmEvent::OutgoingConnectionError` with the `ConnectionId` of the corresponding `Swarm::dial` call. + Lastly, a failed reservation or circuit request will **no longer** close the underlying relay connection. + As a result, we remove the following enum variants: + - `relay::client::Event::ReservationReqFailed` + - `relay::client::Event::OutboundCircuitReqFailed` + - `relay::client::Event::InboundCircuitReqDenied` + - `relay::client::Event::InboundCircuitReqDenyFailed` + + See [PR 4745](https://github.com/libp2p/rust-lib2pp/pulls/4745). ## 0.16.2 diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index f83cb9c4a80..bca55217a2a 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -37,6 +37,7 @@ libp2p-plaintext = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +libp2p-swarm-test = { workspace = true } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index 39ccd539838..09d326be9fb 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -47,15 +47,12 @@ pub mod inbound { pub mod hop { pub use crate::protocol::inbound_hop::FatalUpgradeError; } - pub mod stop { - pub use crate::protocol::inbound_stop::FatalUpgradeError; - } } /// Types related to the relay protocol outbound. pub mod outbound { pub mod hop { - pub use crate::protocol::outbound_hop::FatalUpgradeError; + pub use crate::protocol::outbound_hop::{ConnectError, ProtocolViolation, ReserveError}; } pub mod stop { pub use crate::protocol::outbound_stop::FatalUpgradeError; diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index b15b3d68ae1..ae2ceb2e97d 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -25,7 +25,7 @@ pub(crate) mod transport; use crate::multiaddr_ext::MultiaddrExt; use crate::priv_client::handler::Handler; -use crate::protocol::{self, inbound_stop, outbound_hop}; +use crate::protocol::{self, inbound_stop}; use bytes::Bytes; use either::Either; use futures::channel::mpsc::Receiver; @@ -39,8 +39,7 @@ use libp2p_swarm::behaviour::{ConnectionClosed, ConnectionEstablished, FromSwarm use libp2p_swarm::dial_opts::DialOpts; use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, DialFailure, NetworkBehaviour, - NotifyHandler, Stream, StreamUpgradeError, THandler, THandlerInEvent, THandlerOutEvent, - ToSwarm, + NotifyHandler, Stream, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, VecDeque}; use std::io::{Error, ErrorKind, IoSlice}; @@ -59,32 +58,15 @@ pub enum Event { renewal: bool, limit: Option, }, - ReservationReqFailed { - relay_peer_id: PeerId, - /// Indicates whether the request replaces an existing reservation. - renewal: bool, - error: StreamUpgradeError, - }, OutboundCircuitEstablished { relay_peer_id: PeerId, limit: Option, }, - OutboundCircuitReqFailed { - relay_peer_id: PeerId, - error: StreamUpgradeError, - }, /// An inbound circuit has been established. InboundCircuitEstablished { src_peer_id: PeerId, limit: Option, }, - /// An inbound circuit request has been denied. - InboundCircuitReqDenied { src_peer_id: PeerId }, - /// Denying an inbound circuit request failed. - InboundCircuitReqDenyFailed { - src_peer_id: PeerId, - error: inbound_stop::UpgradeError, - }, } /// [`NetworkBehaviour`] implementation of the relay client @@ -252,32 +234,15 @@ impl NetworkBehaviour for Behaviour { limit, } } - handler::Event::ReservationReqFailed { renewal, error } => { - Event::ReservationReqFailed { - relay_peer_id: event_source, - renewal, - error, - } - } handler::Event::OutboundCircuitEstablished { limit } => { Event::OutboundCircuitEstablished { relay_peer_id: event_source, limit, } } - handler::Event::OutboundCircuitReqFailed { error } => Event::OutboundCircuitReqFailed { - relay_peer_id: event_source, - error, - }, handler::Event::InboundCircuitEstablished { src_peer_id, limit } => { Event::InboundCircuitEstablished { src_peer_id, limit } } - handler::Event::InboundCircuitReqDenied { src_peer_id } => { - Event::InboundCircuitReqDenied { src_peer_id } - } - handler::Event::InboundCircuitReqDenyFailed { src_peer_id, error } => { - Event::InboundCircuitReqDenyFailed { src_peer_id, error } - } }; self.queued_actions.push_back(ToSwarm::GenerateEvent(event)) @@ -336,7 +301,7 @@ impl NetworkBehaviour for Behaviour { peer_id: relay_peer_id, handler: NotifyHandler::One(*connection_id), event: Either::Left(handler::In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, }), }, @@ -350,7 +315,7 @@ impl NetworkBehaviour for Behaviour { self.pending_handler_commands.insert( connection_id, handler::In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, }, ); diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 66bbc5896b1..b3fb345e215 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -20,14 +20,10 @@ use crate::priv_client::transport; use crate::protocol::{self, inbound_stop, outbound_hop}; -use crate::{proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; use either::Either; use futures::channel::{mpsc, oneshot}; -use futures::future::{BoxFuture, FutureExt}; -use futures::sink::SinkExt; -use futures::stream::{FuturesUnordered, StreamExt}; -use futures::TryFutureExt; -use futures_bounded::{PushError, Timeout}; +use futures::future::FutureExt; use futures_timer::Delay; use libp2p_core::multiaddr::Protocol; use libp2p_core::upgrade::ReadyUpgrade; @@ -42,9 +38,9 @@ use libp2p_swarm::{ }; use log::debug; use std::collections::VecDeque; -use std::fmt; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, io}; /// The maximum number of circuits being denied concurrently. /// @@ -61,7 +57,7 @@ pub enum In { }, EstablishCircuit { dst_peer_id: PeerId, - send_back: oneshot::Sender>, + to_dial: oneshot::Sender>, }, } @@ -71,7 +67,7 @@ impl fmt::Debug for In { In::Reserve { to_listener: _ } => f.debug_struct("In::Reserve").finish(), In::EstablishCircuit { dst_peer_id, - send_back: _, + to_dial: _, } => f .debug_struct("In::EstablishCircuit") .field("dst_peer_id", dst_peer_id) @@ -87,40 +83,19 @@ pub enum Event { renewal: bool, limit: Option, }, - ReservationReqFailed { - /// Indicates whether the request replaces an existing reservation. - renewal: bool, - error: StreamUpgradeError, - }, /// An outbound circuit has been established. OutboundCircuitEstablished { limit: Option }, - OutboundCircuitReqFailed { - error: StreamUpgradeError, - }, /// An inbound circuit has been established. InboundCircuitEstablished { src_peer_id: PeerId, limit: Option, }, - /// An inbound circuit request has been denied. - InboundCircuitReqDenied { src_peer_id: PeerId }, - /// Denying an inbound circuit request failed. - InboundCircuitReqDenyFailed { - src_peer_id: PeerId, - error: inbound_stop::UpgradeError, - }, } pub struct Handler { local_peer_id: PeerId, remote_peer_id: PeerId, remote_addr: Multiaddr, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - StreamUpgradeError< - Either, - >, - >, /// Queue of events to return when polled. queued_events: VecDeque< @@ -132,29 +107,29 @@ pub struct Handler { >, >, - wait_for_outbound_stream: VecDeque, - outbound_circuits: futures_bounded::FuturesSet< - Result< - Either< - Result, - Result, outbound_hop::CircuitFailedReason>, - >, - outbound_hop::FatalUpgradeError, - >, - >, + /// We issue a stream upgrade for each pending request. + pending_requests: VecDeque, - reservation: Reservation, + /// A `RESERVE` request is in-flight for each item in this queue. + active_reserve_requests: VecDeque>, - open_circuit_futs: - futures_bounded::FuturesSet>, + inflight_reserve_requests: + futures_bounded::FuturesSet>, - circuit_deny_futs: futures_bounded::FuturesMap>, + /// A `CONNECT` request is in-flight for each item in this queue. + active_connect_requests: + VecDeque>>, - /// Futures that try to send errors to the transport. - /// - /// We may drop errors if this handler ends up in a terminal state (by returning - /// [`ConnectionHandlerEvent::Close`]). - send_error_futs: FuturesUnordered>, + inflight_outbound_connect_requests: + futures_bounded::FuturesSet>, + + inflight_inbound_circuit_requests: + futures_bounded::FuturesSet>, + + inflight_outbound_circuit_deny_requests: + futures_bounded::FuturesSet>, + + reservation: Reservation, } impl Handler { @@ -164,22 +139,26 @@ impl Handler { remote_peer_id, remote_addr, queued_events: Default::default(), - pending_error: Default::default(), - wait_for_outbound_stream: Default::default(), - outbound_circuits: futures_bounded::FuturesSet::new( + pending_requests: Default::default(), + active_reserve_requests: Default::default(), + inflight_reserve_requests: futures_bounded::FuturesSet::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), - reservation: Reservation::None, - open_circuit_futs: futures_bounded::FuturesSet::new( + inflight_inbound_circuit_requests: futures_bounded::FuturesSet::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), - circuit_deny_futs: futures_bounded::FuturesMap::new( + inflight_outbound_connect_requests: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + inflight_outbound_circuit_deny_requests: futures_bounded::FuturesSet::new( DENYING_CIRCUIT_TIMEOUT, MAX_NUMBER_DENYING_CIRCUIT, ), - send_error_futs: Default::default(), + active_connect_requests: Default::default(), + reservation: Reservation::None, } } @@ -190,64 +169,46 @@ impl Handler { ::OutboundProtocol, >, ) { - let outbound_info = self.wait_for_outbound_stream.pop_front().expect( - "got a stream error without a pending connection command or a reserve listener", - ); - match outbound_info { - outbound_hop::OutboundStreamInfo::Reserve(mut to_listener) => { - let non_fatal_error = match error { - StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, - StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, - StreamUpgradeError::Io(e) => { - self.pending_error = Some(StreamUpgradeError::Io(e)); - return; + let pending_request = self + .pending_requests + .pop_front() + .expect("got a stream error without a pending request"); + + match pending_request { + PendingRequest::Reserve { mut to_listener } => { + let error = match error { + StreamUpgradeError::Timeout => { + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) } - StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => { + outbound_hop::ReserveError::Unsupported + } + StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), }; - if self.pending_error.is_none() { - self.send_error_futs.push( - async move { - let _ = to_listener - .send(transport::ToListenerMsg::Reservation(Err(()))) - .await; - } - .boxed(), - ); - } else { - // Fatal error occurred, thus handler is closing as quickly as possible. - // Transport is notified through dropping `to_listener`. + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) + { + log::debug!("Unable to send error to listener: {}", e.into_send_error()) } - - let renewal = self.reservation.failed(); - - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqFailed { - renewal, - error: non_fatal_error, - }, - )); + self.reservation.failed(); } - outbound_hop::OutboundStreamInfo::CircuitConnection(cmd) => { - let non_fatal_error = match error { - StreamUpgradeError::Timeout => StreamUpgradeError::Timeout, - StreamUpgradeError::NegotiationFailed => StreamUpgradeError::NegotiationFailed, - StreamUpgradeError::Io(e) => { - self.pending_error = Some(StreamUpgradeError::Io(e)); - return; + PendingRequest::Connect { + to_dial: send_back, .. + } => { + let error = match error { + StreamUpgradeError::Timeout => { + outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::NegotiationFailed => { + outbound_hop::ConnectError::Unsupported } + StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), StreamUpgradeError::Apply(v) => void::unreachable(v), }; - let _ = cmd.send_back.send(Err(())); - - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitReqFailed { - error: non_fatal_error, - }, - )); + let _ = send_back.send(Err(error)); } } } @@ -255,17 +216,14 @@ impl Handler { fn insert_to_deny_futs(&mut self, circuit: inbound_stop::Circuit) { let src_peer_id = circuit.src_peer_id(); - match self.circuit_deny_futs.try_push( - src_peer_id, - circuit.deny(proto::Status::NO_RESERVATION), - ) { - Err(PushError::BeyondCapacity(_)) => log::warn!( - "Dropping inbound circuit request to be denied from {src_peer_id} due to exceeding limit." - ), - Err(PushError::Replaced(_)) => log::warn!( + if self + .inflight_outbound_circuit_deny_requests + .try_push(circuit.deny(proto::Status::NO_RESERVATION)) + .is_err() + { + log::warn!( "Dropping existing inbound circuit request to be denied from {src_peer_id} in favor of new one." - ), - Ok(()) => {} + ) } } } @@ -274,7 +232,7 @@ impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; type Error = StreamUpgradeError< - Either, + Either, >; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); @@ -288,22 +246,21 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { In::Reserve { to_listener } => { - self.wait_for_outbound_stream - .push_back(outbound_hop::OutboundStreamInfo::Reserve(to_listener)); + self.pending_requests + .push_back(PendingRequest::Reserve { to_listener }); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), }); } In::EstablishCircuit { - send_back, + to_dial: send_back, dst_peer_id, } => { - self.wait_for_outbound_stream.push_back( - outbound_hop::OutboundStreamInfo::CircuitConnection( - outbound_hop::Command::new(dst_peer_id, send_back), - ), - ); + self.pending_requests.push_back(PendingRequest::Connect { + dst_peer_id, + to_dial: send_back, + }); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), @@ -327,21 +284,25 @@ impl ConnectionHandler for Handler { Self::Error, >, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - - // Inbound circuits loop { - match self.outbound_circuits.poll_unpin(cx) { - Poll::Ready(Ok(Ok(Either::Left(Ok(outbound_hop::Reservation { + debug_assert_eq!( + self.inflight_reserve_requests.len(), + self.active_reserve_requests.len(), + "expect to have one active request per inflight stream" + ); + + // Reservations + match self.inflight_reserve_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(outbound_hop::Reservation { renewal_timeout, addrs, limit, - to_listener, - }))))) => { + }))) => { + let to_listener = self + .active_reserve_requests + .pop_front() + .expect("must have active request for stream"); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( self.reservation.accepted( renewal_timeout, @@ -350,57 +311,110 @@ impl ConnectionHandler for Handler { self.local_peer_id, limit, ), - )) - } - Poll::Ready(Ok(Ok(Either::Right(Ok(Some(outbound_hop::Circuit { limit })))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitEstablished { limit }, )); } - Poll::Ready(Ok(Ok(Either::Right(Ok(None))))) => continue, - Poll::Ready(Ok(Ok(Either::Right(Err(e))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundCircuitReqFailed { - error: StreamUpgradeError::Apply(e), - }, - )); + Poll::Ready(Ok(Err(error))) => { + let mut to_listener = self + .active_reserve_requests + .pop_front() + .expect("must have active request for stream"); + + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) + { + log::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; } - Poll::Ready(Ok(Ok(Either::Left(Err(e))))) => { - let renewal = self.reservation.failed(); + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + let mut to_listener = self + .active_reserve_requests + .pop_front() + .expect("must have active request for stream"); + + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err( + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), + ))) + { + log::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; + } + Poll::Pending => {} + } + + debug_assert_eq!( + self.inflight_outbound_connect_requests.len(), + self.active_connect_requests.len(), + "expect to have one active request per inflight stream" + ); + + // Circuits + match self.inflight_outbound_connect_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(outbound_hop::Circuit { + limit, + read_buffer, + stream, + }))) => { + let to_listener = self + .active_connect_requests + .pop_front() + .expect("must have active request for stream"); + + if to_listener + .send(Ok(priv_client::Connection { + state: priv_client::ConnectionState::new_outbound(stream, read_buffer), + })) + .is_err() + { + log::debug!( + "Dropping newly established circuit because the listener is gone" + ); + continue; + } + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqFailed { - renewal, - error: StreamUpgradeError::Apply(e), - }, + Event::OutboundCircuitEstablished { limit }, )); } - Poll::Ready(Ok(Err(e))) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Right(e), - ))) + Poll::Ready(Ok(Err(error))) => { + let to_dialer = self + .active_connect_requests + .pop_front() + .expect("must have active request for stream"); + + let _ = to_dialer.send(Err(error)); + continue; } - Poll::Ready(Err(Timeout { .. })) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + let mut to_listener = self + .active_reserve_requests + .pop_front() + .expect("must have active request for stream"); + + if let Err(e) = + to_listener.try_send(transport::ToListenerMsg::Reservation(Err( + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), + ))) + { + log::debug!("Unable to send error to listener: {}", e.into_send_error()) + } + self.reservation.failed(); + continue; } - Poll::Pending => break, + Poll::Pending => {} } - } - // Return queued events. - if let Some(event) = self.queued_events.pop_front() { - return Poll::Ready(event); - } - - if let Poll::Ready(worker_res) = self.open_circuit_futs.poll_unpin(cx) { - let res = match worker_res { - Ok(r) => r, - Err(Timeout { .. }) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); - } - }; + // Return queued events. + if let Some(event) = self.queued_events.pop_front() { + return Poll::Ready(event); + } - match res { - Ok(circuit) => match &mut self.reservation { + match self.inflight_inbound_circuit_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(circuit))) => match &mut self.reservation { Reservation::Accepted { pending_msgs, .. } | Reservation::Renewing { pending_msgs, .. } => { let src_peer_id = circuit.src_peer_id(); @@ -422,47 +436,45 @@ impl ConnectionHandler for Handler { } Reservation::None => { self.insert_to_deny_futs(circuit); + continue; } }, - Err(e) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Left(e), - ))); + Poll::Ready(Ok(Err(e))) => { + log::debug!("An inbound circuit request failed: {e}"); + continue; } + Poll::Ready(Err(e)) => { + log::debug!("An inbound circuit request timed out: {e}"); + continue; + } + Poll::Pending => {} } - } - - if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { - self.wait_for_outbound_stream - .push_back(outbound_hop::OutboundStreamInfo::Reserve(to_listener)); - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); - } + if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { + self.pending_requests + .push_back(PendingRequest::Reserve { to_listener }); - // Deny incoming circuit requests. - match self.circuit_deny_futs.poll_unpin(cx) { - Poll::Ready((src_peer_id, Ok(Ok(())))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundCircuitReqDenied { src_peer_id }, - )); - } - Poll::Ready((src_peer_id, Ok(Err(error)))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundCircuitReqDenyFailed { src_peer_id, error }, - )); - } - Poll::Ready((src_peer_id, Err(Timeout { .. }))) => { - log::warn!("Dropping inbound circuit request to be denied from {:?} due to exceeding limit.", src_peer_id); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); } - Poll::Pending => {} - } - // Send errors to transport. - while let Poll::Ready(Some(())) = self.send_error_futs.poll_next_unpin(cx) {} + // Deny incoming circuit requests. + match self.inflight_outbound_circuit_deny_requests.poll_unpin(cx) { + Poll::Ready(Ok(Ok(()))) => continue, + Poll::Ready(Ok(Err(error))) => { + log::debug!("Denying inbound circuit failed: {error}"); + continue; + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + log::debug!("Denying inbound circuit timed out"); + continue; + } + Poll::Pending => {} + } - Poll::Pending + return Poll::Pending; + } } fn on_connection_event( @@ -480,7 +492,7 @@ impl ConnectionHandler for Handler { .. }) => { if self - .open_circuit_futs + .inflight_inbound_circuit_requests .try_push(inbound_stop::handle_open_circuit(stream)) .is_err() { @@ -491,33 +503,29 @@ impl ConnectionHandler for Handler { protocol: stream, .. }) => { - let outbound_info = self.wait_for_outbound_stream.pop_front().expect( + let pending_request = self.pending_requests.pop_front().expect( "opened a stream without a pending connection command or a reserve listener", ); - match outbound_info { - outbound_hop::OutboundStreamInfo::Reserve(to_listener) => { + match pending_request { + PendingRequest::Reserve { to_listener } => { + self.active_reserve_requests.push_back(to_listener); if self - .outbound_circuits - .try_push( - outbound_hop::handle_reserve_message_response(stream, to_listener) - .map_ok(Either::Left), - ) + .inflight_reserve_requests + .try_push(outbound_hop::make_reservation(stream)) .is_err() { log::warn!("Dropping outbound stream because we are at capacity") } } - outbound_hop::OutboundStreamInfo::CircuitConnection(cmd) => { + PendingRequest::Connect { + dst_peer_id, + to_dial: send_back, + } => { + self.active_connect_requests.push_back(send_back); + if self - .outbound_circuits - .try_push( - outbound_hop::handle_connection_message_response( - stream, - self.remote_peer_id, - cmd, - ) - .map_ok(Either::Right), - ) + .inflight_outbound_connect_requests + .try_push(outbound_hop::open_circuit(stream, dst_peer_id)) .is_err() { log::warn!("Dropping outbound stream because we are at capacity") @@ -595,17 +603,8 @@ impl Reservation { } /// Marks the current reservation as failed. - /// - /// Returns whether the reservation request was a renewal. - fn failed(&mut self) -> bool { - let renewal = matches!( - self, - Reservation::Accepted { .. } | Reservation::Renewing { .. } - ); - + fn failed(&mut self) { *self = Reservation::None; - - renewal } fn forward_messages_to_transport_listener(&mut self, cx: &mut Context<'_>) { @@ -668,3 +667,15 @@ impl Reservation { poll_val } } + +pub(crate) enum PendingRequest { + Reserve { + /// A channel into the [`Transport`](priv_client::Transport). + to_listener: mpsc::Sender, + }, + Connect { + dst_peer_id: PeerId, + /// A channel into the future returned by [`Transport::dial`](libp2p_core::Transport::dial). + to_dial: oneshot::Sender>, + }, +} diff --git a/protocols/relay/src/priv_client/transport.rs b/protocols/relay/src/priv_client/transport.rs index 41114d0cdd5..c463de9cc66 100644 --- a/protocols/relay/src/priv_client/transport.rs +++ b/protocols/relay/src/priv_client/transport.rs @@ -21,6 +21,8 @@ use crate::multiaddr_ext::MultiaddrExt; use crate::priv_client::Connection; +use crate::protocol::outbound_hop; +use crate::protocol::outbound_hop::{ConnectError, ReserveError}; use crate::RequestId; use futures::channel::mpsc; use futures::channel::oneshot; @@ -97,7 +99,7 @@ pub struct Transport { impl Transport { pub(crate) fn new() -> (Self, mpsc::Receiver) { - let (to_behaviour, from_transport) = mpsc::channel(0); + let (to_behaviour, from_transport) = mpsc::channel(1000); let transport = Transport { to_behaviour, pending_to_behaviour: VecDeque::new(), @@ -189,7 +191,8 @@ impl libp2p_core::Transport for Transport { send_back: tx, }) .await?; - let stream = rx.await?.map_err(|()| Error::Connect)?; + let stream = rx.await??; + Ok(stream) } .boxed()) @@ -381,7 +384,7 @@ impl Stream for Listener { send_back_addr: Protocol::P2p(src_peer_id).into(), }) } - ToListenerMsg::Reservation(Err(())) => self.close(Err(Error::Reservation)), + ToListenerMsg::Reservation(Err(e)) => self.close(Err(Error::Reservation(e))), }; } } @@ -409,9 +412,9 @@ pub enum Error { #[error("One of the provided multiaddresses is malformed.")] MalformedMultiaddr, #[error("Failed to get Reservation.")] - Reservation, + Reservation(#[from] ReserveError), #[error("Failed to connect to destination.")] - Connect, + Connect(#[from] ConnectError), } impl From for TransportError { @@ -431,7 +434,7 @@ pub(crate) enum TransportToBehaviourMsg { relay_peer_id: PeerId, dst_addr: Option, dst_peer_id: PeerId, - send_back: oneshot::Sender>, + send_back: oneshot::Sender>, }, /// Listen for incoming relayed connections via relay node. ListenReq { @@ -443,7 +446,7 @@ pub(crate) enum TransportToBehaviourMsg { #[allow(clippy::large_enum_variant)] pub enum ToListenerMsg { - Reservation(Result), + Reservation(Result), IncomingRelayedConnection { stream: Connection, src_peer_id: PeerId, diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index caaeee9cc53..22b8244080f 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -25,9 +25,10 @@ use bytes::Bytes; use futures::prelude::*; use libp2p_identity::PeerId; use libp2p_swarm::Stream; +use std::io; use thiserror::Error; -pub(crate) async fn handle_open_circuit(io: Stream) -> Result { +pub(crate) async fn handle_open_circuit(io: Stream) -> Result { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); let proto::StopMessage { @@ -38,40 +39,42 @@ pub(crate) async fn handle_open_circuit(io: Stream) -> Result { - let src_peer_id = PeerId::from_bytes(&peer.ok_or(FatalUpgradeError::MissingPeer)?.id) - .map_err(|_| FatalUpgradeError::ParsePeerId)?; + let src_peer_id = PeerId::from_bytes(&peer.ok_or(ProtocolViolation::MissingPeer)?.id) + .map_err(|_| ProtocolViolation::ParsePeerId)?; Ok(Circuit { substream, src_peer_id, limit: limit.map(Into::into), }) } - proto::StopMessageType::STATUS => Err(FatalUpgradeError::UnexpectedTypeStatus), + proto::StopMessageType::STATUS => { + Err(Error::Protocol(ProtocolViolation::UnexpectedTypeStatus)) + } } } #[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), +pub(crate) enum Error { + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), + #[error("IO error")] + Io(#[from] io::Error), } -impl From for UpgradeError { +impl From for Error { fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) + Self::Protocol(ProtocolViolation::Codec(error)) } } #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Failed to parse response type field.")] ParseTypeField, #[error("Failed to parse peer id.")] @@ -97,7 +100,7 @@ impl Circuit { self.limit } - pub(crate) async fn accept(mut self) -> Result<(Stream, Bytes), UpgradeError> { + pub(crate) async fn accept(mut self) -> Result<(Stream, Bytes), Error> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, @@ -121,7 +124,7 @@ impl Circuit { Ok((io, read_buffer.freeze())) } - pub(crate) async fn deny(mut self, status: proto::Status) -> Result<(), UpgradeError> { + pub(crate) async fn deny(mut self, status: proto::Status) -> Result<(), Error> { let msg = proto::StopMessage { type_pb: proto::StopMessageType::STATUS, peer: None, diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index 6a222db55c1..4e9b512c3e7 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -18,25 +18,24 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; use std::time::{Duration, SystemTime}; use asynchronous_codec::{Framed, FramedParts}; -use futures::channel::{mpsc, oneshot}; +use bytes::Bytes; use futures::prelude::*; use futures_timer::Delay; -use log::debug; use thiserror::Error; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_swarm::Stream; -use crate::priv_client::transport; use crate::protocol::{Limit, MAX_MESSAGE_SIZE}; -use crate::{priv_client, proto}; +use crate::{proto, HOP_PROTOCOL_NAME}; #[derive(Debug, Error)] -pub enum CircuitFailedReason { +pub enum ConnectError { #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, #[error("Relay failed to connect to destination.")] @@ -45,22 +44,32 @@ pub enum CircuitFailedReason { NoReservation, #[error("Remote denied permission.")] PermissionDenied, + #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[source] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), } #[derive(Debug, Error)] -pub enum ReservationFailedReason { +pub enum ReserveError { #[error("Reservation refused.")] Refused, #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, + #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[source] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), } #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, #[error("Expected 'reservation' field to be set.")] @@ -83,21 +92,31 @@ pub enum FatalUpgradeError { UnexpectedStatus(proto::Status), } +impl From for ConnectError { + fn from(e: quick_protobuf_codec::Error) -> Self { + ConnectError::Protocol(ProtocolViolation::Codec(e)) + } +} + +impl From for ReserveError { + fn from(e: quick_protobuf_codec::Error) -> Self { + ReserveError::Protocol(ProtocolViolation::Codec(e)) + } +} + pub(crate) struct Reservation { pub(crate) renewal_timeout: Delay, pub(crate) addrs: Vec, pub(crate) limit: Option, - pub(crate) to_listener: mpsc::Sender, } pub(crate) struct Circuit { + pub(crate) stream: Stream, + pub(crate) read_buffer: Bytes, pub(crate) limit: Option, } -pub(crate) async fn handle_reserve_message_response( - protocol: Stream, - to_listener: mpsc::Sender, -) -> Result, FatalUpgradeError> { +pub(crate) async fn make_reservation(stream: Stream) -> Result { let msg = proto::HopMessage { type_pb: proto::HopMessageType::RESERVE, peer: None, @@ -105,7 +124,7 @@ pub(crate) async fn handle_reserve_message_response( limit: None, status: None, }; - let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); + let mut substream = Framed::new(stream, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); substream.send(msg).await?; @@ -118,35 +137,47 @@ pub(crate) async fn handle_reserve_message_response( } = substream .next() .await - .ok_or(FatalUpgradeError::StreamClosed)??; + .ok_or(ReserveError::Io(io::ErrorKind::UnexpectedEof.into()))??; match type_pb { proto::HopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect); + return Err(ReserveError::Protocol( + ProtocolViolation::UnexpectedTypeConnect, + )); } proto::HopMessageType::RESERVE => { - return Err(FatalUpgradeError::UnexpectedTypeReserve); + return Err(ReserveError::Protocol( + ProtocolViolation::UnexpectedTypeReserve, + )); } proto::HopMessageType::STATUS => {} } let limit = limit.map(Into::into); - match status.ok_or(FatalUpgradeError::MissingStatusField)? { + match status.ok_or(ProtocolViolation::MissingStatusField)? { proto::Status::OK => {} proto::Status::RESERVATION_REFUSED => { - return Ok(Err(ReservationFailedReason::Refused)); + return Err(ReserveError::Refused); } proto::Status::RESOURCE_LIMIT_EXCEEDED => { - return Ok(Err(ReservationFailedReason::ResourceLimitExceeded)); + return Err(ReserveError::ResourceLimitExceeded); + } + s => { + return Err(ReserveError::Protocol(ProtocolViolation::UnexpectedStatus( + s, + ))) } - s => return Err(FatalUpgradeError::UnexpectedStatus(s)), } - let reservation = reservation.ok_or(FatalUpgradeError::MissingReservationField)?; + let reservation = reservation.ok_or(ReserveError::Protocol( + ProtocolViolation::MissingReservationField, + ))?; if reservation.addrs.is_empty() { - return Err(FatalUpgradeError::NoAddressesInReservation); + return Err(ReserveError::Protocol( + ProtocolViolation::NoAddressesInReservation, + )); } let addrs = reservation @@ -154,7 +185,7 @@ pub(crate) async fn handle_reserve_message_response( .into_iter() .map(|b| Multiaddr::try_from(b.to_vec())) .collect::, _>>() - .map_err(|_| FatalUpgradeError::InvalidReservationAddrs)?; + .map_err(|_| ReserveError::Protocol(ProtocolViolation::InvalidReservationAddrs))?; let renewal_timeout = reservation .expire @@ -168,25 +199,25 @@ pub(crate) async fn handle_reserve_message_response( .and_then(|duration| duration.checked_sub(duration / 4)) .map(Duration::from_secs) .map(Delay::new) - .ok_or(FatalUpgradeError::InvalidReservationExpiration)?; + .ok_or(ReserveError::Protocol( + ProtocolViolation::InvalidReservationExpiration, + ))?; - Ok(Ok(Reservation { + Ok(Reservation { renewal_timeout, addrs, limit, - to_listener, - })) + }) } -pub(crate) async fn handle_connection_message_response( +pub(crate) async fn open_circuit( protocol: Stream, - remote_peer_id: PeerId, - con_command: Command, -) -> Result, CircuitFailedReason>, FatalUpgradeError> { + dst_peer_id: PeerId, +) -> Result { let msg = proto::HopMessage { type_pb: proto::HopMessageType::CONNECT, peer: Some(proto::Peer { - id: con_command.dst_peer_id.to_bytes(), + id: dst_peer_id.to_bytes(), addrs: vec![], }), reservation: None, @@ -196,9 +227,7 @@ pub(crate) async fn handle_connection_message_response( let mut substream = Framed::new(protocol, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); - if substream.send(msg).await.is_err() { - return Err(FatalUpgradeError::StreamClosed); - } + substream.send(msg).await?; let proto::HopMessage { type_pb, @@ -206,17 +235,21 @@ pub(crate) async fn handle_connection_message_response( reservation: _, limit, status, - } = match substream.next().await { - Some(Ok(r)) => r, - _ => return Err(FatalUpgradeError::StreamClosed), - }; + } = substream + .next() + .await + .ok_or(ConnectError::Io(io::ErrorKind::UnexpectedEof.into()))??; match type_pb { proto::HopMessageType::CONNECT => { - return Err(FatalUpgradeError::UnexpectedTypeConnect); + return Err(ConnectError::Protocol( + ProtocolViolation::UnexpectedTypeConnect, + )); } proto::HopMessageType::RESERVE => { - return Err(FatalUpgradeError::UnexpectedTypeReserve); + return Err(ConnectError::Protocol( + ProtocolViolation::UnexpectedTypeReserve, + )); } proto::HopMessageType::STATUS => {} } @@ -224,22 +257,26 @@ pub(crate) async fn handle_connection_message_response( match status { Some(proto::Status::OK) => {} Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { - return Ok(Err(CircuitFailedReason::ResourceLimitExceeded)); + return Err(ConnectError::ResourceLimitExceeded); } Some(proto::Status::CONNECTION_FAILED) => { - return Ok(Err(CircuitFailedReason::ConnectionFailed)); + return Err(ConnectError::ConnectionFailed); } Some(proto::Status::NO_RESERVATION) => { - return Ok(Err(CircuitFailedReason::NoReservation)); + return Err(ConnectError::NoReservation); } Some(proto::Status::PERMISSION_DENIED) => { - return Ok(Err(CircuitFailedReason::PermissionDenied)); + return Err(ConnectError::PermissionDenied); } Some(s) => { - return Err(FatalUpgradeError::UnexpectedStatus(s)); + return Err(ConnectError::Protocol(ProtocolViolation::UnexpectedStatus( + s, + ))); } None => { - return Err(FatalUpgradeError::MissingStatusField); + return Err(ConnectError::Protocol( + ProtocolViolation::MissingStatusField, + )); } } @@ -256,40 +293,11 @@ pub(crate) async fn handle_connection_message_response( "Expect a flushed Framed to have empty write buffer." ); - match con_command.send_back.send(Ok(priv_client::Connection { - state: priv_client::ConnectionState::new_outbound(io, read_buffer.freeze()), - })) { - Ok(()) => Ok(Ok(Some(Circuit { limit }))), - Err(_) => { - debug!( - "Oneshot to `client::transport::Dial` future dropped. \ - Dropping established relayed connection to {:?}.", - remote_peer_id, - ); - - Ok(Ok(None)) - } - } -} - -pub(crate) enum OutboundStreamInfo { - Reserve(mpsc::Sender), - CircuitConnection(Command), -} - -pub(crate) struct Command { - dst_peer_id: PeerId, - pub(crate) send_back: oneshot::Sender>, -} + let circuit = Circuit { + stream: io, + read_buffer: read_buffer.freeze(), + limit, + }; -impl Command { - pub(crate) fn new( - dst_peer_id: PeerId, - send_back: oneshot::Sender>, - ) -> Self { - Self { - dst_peer_id, - send_back, - } - } + Ok(circuit) } diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 28273c1088b..39fc2b1f6dc 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -33,7 +33,10 @@ use libp2p_identity::PeerId; use libp2p_ping as ping; use libp2p_plaintext as plaintext; use libp2p_relay as relay; -use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm::dial_opts::DialOpts; +use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; +use libp2p_swarm_test::SwarmExt; +use std::error::Error; use std::time::Duration; #[test] @@ -271,6 +274,107 @@ fn handle_dial_failure() { assert!(!pool.run_until(wait_for_dial(&mut client, relay_peer_id))); } +#[test] +fn propagate_reservation_error_to_listener() { + let _ = env_logger::try_init(); + let mut pool = LocalPool::new(); + + let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); + let mut relay = build_relay_with_config(relay::Config { + max_reservations: 0, // Will make us fail to make the reservation + ..relay::Config::default() + }); + let relay_peer_id = *relay.local_peer_id(); + + relay.listen_on(relay_addr.clone()).unwrap(); + relay.add_external_address(relay_addr.clone()); + spawn_swarm_on_pool(&pool, relay); + + let client_addr = relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit); + let mut client = build_client(); + + let reservation_listener = client.listen_on(client_addr.clone()).unwrap(); + + // Wait for connection to relay. + assert!(pool.run_until(wait_for_dial(&mut client, relay_peer_id))); + + let error = pool.run_until(client.wait(|e| match e { + SwarmEvent::ListenerClosed { + listener_id, + reason: Err(e), + .. + } if listener_id == reservation_listener => Some(e), + _ => None, + })); + + let error = error + .source() + .unwrap() + .downcast_ref::() + .unwrap(); + + assert!(matches!( + error, + relay::outbound::hop::ReserveError::ResourceLimitExceeded + )); +} + +#[test] +fn propagate_connect_error_to_unknown_peer_to_dialer() { + let _ = env_logger::try_init(); + let mut pool = LocalPool::new(); + + let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); + let mut relay = build_relay(); + let relay_peer_id = *relay.local_peer_id(); + + relay.listen_on(relay_addr.clone()).unwrap(); + relay.add_external_address(relay_addr.clone()); + spawn_swarm_on_pool(&pool, relay); + + let mut src = build_client(); + + let dst_peer_id = PeerId::random(); // We don't have a destination peer in this test, so the CONNECT request will fail. + let dst_addr = relay_addr + .with(Protocol::P2p(relay_peer_id)) + .with(Protocol::P2pCircuit) + .with(Protocol::P2p(dst_peer_id)); + + let opts = DialOpts::from(dst_addr.clone()); + let circuit_connection_id = opts.connection_id(); + + src.dial(opts).unwrap(); + + let (failed_address, error) = pool.run_until(src.wait(|e| match e { + SwarmEvent::OutgoingConnectionError { + connection_id, + error: DialError::Transport(mut errors), + .. + } if connection_id == circuit_connection_id => { + assert_eq!(errors.len(), 1); + Some(errors.remove(0)) + } + _ => None, + })); + + // This is a bit wonky but we need to get the _actual_ source error :) + let error = error + .source() + .unwrap() + .source() + .unwrap() + .downcast_ref::() + .unwrap(); + + assert_eq!(failed_address, dst_addr); + assert!(matches!( + error, + relay::outbound::hop::ConnectError::NoReservation + )); +} + #[test] fn reuse_connection() { let _ = env_logger::try_init(); @@ -309,6 +413,13 @@ fn reuse_connection() { } fn build_relay() -> Swarm { + build_relay_with_config(relay::Config { + reservation_duration: Duration::from_secs(2), + ..Default::default() + }) +} + +fn build_relay_with_config(config: relay::Config) -> Swarm { let local_key = identity::Keypair::generate_ed25519(); let local_peer_id = local_key.public().to_peer_id(); @@ -318,13 +429,7 @@ fn build_relay() -> Swarm { transport, Relay { ping: ping::Behaviour::new(ping::Config::new()), - relay: relay::Behaviour::new( - local_peer_id, - relay::Config { - reservation_duration: Duration::from_secs(2), - ..Default::default() - }, - ), + relay: relay::Behaviour::new(local_peer_id, config), }, local_peer_id, Config::with_async_std_executor(), From f303b3f2e25d6b8c8d1cae529e47f51353f0e75d Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 1 Nov 2023 12:31:52 +1100 Subject: [PATCH 10/33] refactor(dcutr): simplify public API We refactor the `libp2p-dcutr` API to only emit a single event: whether the hole-punch was successful or not. All other intermediate events are removed. Hole-punching is something that we try to do automatically as soon as we are connected to a peer over a relayed connection. The lack of explicit user intent means any event we emit is at best informational and not a "response" that the user would wait for. Thus, I chose to not expose the details of why the hole-punch failed but return an opaque error. Lastly, this PR also removes the usage of `ConnectionHandlerEvent::Close`. Just because something went wrong during the DCUtR handshake, doesn't mean we should close the relayed connection. Related: #3591. Pull-Request: #4749. --- Cargo.lock | 1 + hole-punching-tests/src/main.rs | 22 +-- libp2p/src/tutorials/hole_punching.rs | 15 +- misc/metrics/src/dcutr.rs | 19 +- protocols/dcutr/CHANGELOG.md | 8 +- protocols/dcutr/Cargo.toml | 1 + protocols/dcutr/src/behaviour.rs | 108 +++++------- protocols/dcutr/src/handler/relayed.rs | 211 +++++++++++------------ protocols/dcutr/src/lib.rs | 4 +- protocols/dcutr/src/protocol/inbound.rs | 155 +++++++---------- protocols/dcutr/src/protocol/outbound.rs | 155 ++++++++--------- protocols/dcutr/tests/lib.rs | 22 +-- 12 files changed, 312 insertions(+), 409 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d3f2e5b07a..ef942f48c08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2502,6 +2502,7 @@ dependencies = [ "either", "env_logger 0.10.0", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index f7373aa4f94..72b81f776ad 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -111,12 +111,10 @@ async fn main() -> Result<()> { .await?; } ( - SwarmEvent::Behaviour(BehaviourEvent::Dcutr( - dcutr::Event::DirectConnectionUpgradeSucceeded { - remote_peer_id, - connection_id, - }, - )), + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(dcutr::Event { + remote_peer_id, + result: Ok(connection_id), + })), _, _, ) => { @@ -138,13 +136,11 @@ async fn main() -> Result<()> { return Ok(()); } ( - SwarmEvent::Behaviour(BehaviourEvent::Dcutr( - dcutr::Event::DirectConnectionUpgradeFailed { - remote_peer_id, - error, - .. - }, - )), + SwarmEvent::Behaviour(BehaviourEvent::Dcutr(dcutr::Event { + remote_peer_id, + result: Err(error), + .. + })), _, _, ) => { diff --git a/libp2p/src/tutorials/hole_punching.rs b/libp2p/src/tutorials/hole_punching.rs index 5fd74fe754e..f9f42432ba4 100644 --- a/libp2p/src/tutorials/hole_punching.rs +++ b/libp2p/src/tutorials/hole_punching.rs @@ -166,18 +166,9 @@ //! [2022-01-30T12:54:10Z INFO client] Established connection to PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") via Dialer { address: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X", role_override: Dialer } //! ``` //! -//! 2. The listening client initiating a direct connection upgrade for the new relayed connection. -//! Reported by [`dcutr`](crate::dcutr) through -//! [`Event::RemoteInitiatedDirectConnectionUpgrade`](crate::dcutr::Event::RemoteInitiatedDirectConnectionUpgrade). +//! 2. The direct connection upgrade, also known as hole punch, succeeding. +//! Reported by [`dcutr`](crate::dcutr) through [`Event`](crate::dcutr::Event) containing [`Result::Ok`] with the [`ConnectionId`](libp2p_swarm::ConnectionId) of the new direct connection. //! //! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] RemoteInitiatedDirectConnectionUpgrade { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), remote_relayed_addr: "/ip4/$RELAY_PEER_ID/tcp/4001/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN/p2p-circuit/p2p/12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X" } -//! ``` -//! -//! 3. The direct connection upgrade, also known as hole punch, succeeding. Reported by -//! [`dcutr`](crate::dcutr) through -//! [`Event::RemoteInitiatedDirectConnectionUpgrade`](crate::dcutr::Event::DirectConnectionUpgradeSucceeded). -//! -//! ``` ignore -//! [2022-01-30T12:54:11Z INFO client] DirectConnectionUpgradeSucceeded { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X") } +//! [2022-01-30T12:54:11Z INFO client] Event { remote_peer_id: PeerId("12D3KooWPjceQrSwdWXPyLLeABRXmuqt69Rg3sBYbU1Nft9HyQ6X"), result: Ok(2) } //! ``` diff --git a/misc/metrics/src/dcutr.rs b/misc/metrics/src/dcutr.rs index dc15e1f838d..3e60dca2cab 100644 --- a/misc/metrics/src/dcutr.rs +++ b/misc/metrics/src/dcutr.rs @@ -49,8 +49,6 @@ struct EventLabels { #[derive(Debug, Clone, Hash, PartialEq, Eq, EncodeLabelValue)] enum EventType { - InitiateDirectConnectionUpgrade, - RemoteInitiatedDirectConnectionUpgrade, DirectConnectionUpgradeSucceeded, DirectConnectionUpgradeFailed, } @@ -58,22 +56,13 @@ enum EventType { impl From<&libp2p_dcutr::Event> for EventType { fn from(event: &libp2p_dcutr::Event) -> Self { match event { - libp2p_dcutr::Event::InitiatedDirectConnectionUpgrade { + libp2p_dcutr::Event { remote_peer_id: _, - local_relayed_addr: _, - } => EventType::InitiateDirectConnectionUpgrade, - libp2p_dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: _, - remote_relayed_addr: _, - } => EventType::RemoteInitiatedDirectConnectionUpgrade, - libp2p_dcutr::Event::DirectConnectionUpgradeSucceeded { - remote_peer_id: _, - connection_id: _, + result: Ok(_), } => EventType::DirectConnectionUpgradeSucceeded, - libp2p_dcutr::Event::DirectConnectionUpgradeFailed { + libp2p_dcutr::Event { remote_peer_id: _, - connection_id: _, - error: _, + result: Err(_), } => EventType::DirectConnectionUpgradeFailed, } } diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index 179db86dff2..cb84020ec5f 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,13 +1,13 @@ ## 0.11.0 - unreleased - Add `ConnectionId` to `Event::DirectConnectionUpgradeSucceeded` and `Event::DirectConnectionUpgradeFailed`. - See [PR 4558]. - -[PR 4558]: https://github.com/libp2p/rust-libp2p/pull/4558 - + See [PR 4558](https://github.com/libp2p/rust-libp2p/pull/4558). - Exchange address _candidates_ instead of external addresses in `CONNECT`. If hole-punching wasn't working properly for you until now, this might be the reason why. See [PR 4624](https://github.com/libp2p/rust-libp2p/pull/4624). +- Simplify public API. + We now only emit a single event: whether the hole-punch was successful or not. + See [PR XXXX](https://github.com/libp2p/rust-libp2p/pull/XXXX). ## 0.10.0 diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 33dc570d112..0e59585a416 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -25,6 +25,7 @@ quick-protobuf-codec = { workspace = true } thiserror = "1.0" void = "1" lru = "0.11.1" +futures-bounded = { workspace = true } [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 72b30421346..6aecc596c71 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -20,7 +20,7 @@ //! [`NetworkBehaviour`] to act as a direct connection upgrade through relay node. -use crate::handler; +use crate::{handler, protocol}; use either::Either; use libp2p_core::connection::ConnectedPoint; use libp2p_core::multiaddr::Protocol; @@ -32,7 +32,7 @@ use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionHandler, ConnectionId, NewExternalAddrCandidate, THandler, THandlerOutEvent, }; -use libp2p_swarm::{NetworkBehaviour, NotifyHandler, StreamUpgradeError, THandlerInEvent, ToSwarm}; +use libp2p_swarm::{NetworkBehaviour, NotifyHandler, THandlerInEvent, ToSwarm}; use lru::LruCache; use std::collections::{HashMap, HashSet, VecDeque}; use std::num::NonZeroUsize; @@ -44,32 +44,25 @@ pub(crate) const MAX_NUMBER_OF_UPGRADE_ATTEMPTS: u8 = 3; /// The events produced by the [`Behaviour`]. #[derive(Debug)] -pub enum Event { - InitiatedDirectConnectionUpgrade { - remote_peer_id: PeerId, - local_relayed_addr: Multiaddr, - }, - RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: PeerId, - remote_relayed_addr: Multiaddr, - }, - DirectConnectionUpgradeSucceeded { - remote_peer_id: PeerId, - connection_id: ConnectionId, - }, - DirectConnectionUpgradeFailed { - remote_peer_id: PeerId, - connection_id: ConnectionId, - error: Error, - }, +pub struct Event { + pub remote_peer_id: PeerId, + pub result: Result, +} + +#[derive(Debug, Error)] +#[error("Failed to hole-punch connection: {inner}")] +pub struct Error { + inner: InnerError, } #[derive(Debug, Error)] -pub enum Error { - #[error("Failed to dial peer.")] - Dial, - #[error("Failed to establish substream: {0}.")] - Handler(StreamUpgradeError), +enum InnerError { + #[error("Giving up after {0} dial attempts")] + AttemptsExceeded(u8), + #[error("Inbound stream error: {0}")] + InboundError(protocol::inbound::Error), + #[error("Outbound stream error: {0}")] + OutboundError(protocol::outbound::Error), } pub struct Behaviour { @@ -142,13 +135,12 @@ impl Behaviour { event: Either::Left(handler::relayed::Command::Connect), }) } else { - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: peer_id, - connection_id: failed_direct_connection, - error: Error::Dial, - }, - )]); + self.queued_events.extend([ToSwarm::GenerateEvent(Event { + remote_peer_id: peer_id, + result: Err(Error { + inner: InnerError::AttemptsExceeded(MAX_NUMBER_OF_UPGRADE_ATTEMPTS), + }), + })]); } } @@ -197,13 +189,6 @@ impl NetworkBehaviour for Behaviour { handler::relayed::Handler::new(connected_point, self.observed_addresses()); handler.on_behaviour_event(handler::relayed::Command::Connect); - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::InitiatedDirectConnectionUpgrade { - remote_peer_id: peer, - local_relayed_addr: local_addr.clone(), - }, - )]); - return Ok(Either::Left(handler)); // TODO: We could make two `handler::relayed::Handler` here, one inbound one outbound. } self.direct_connections @@ -255,12 +240,10 @@ impl NetworkBehaviour for Behaviour { ); } - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeSucceeded { - remote_peer_id: peer, - connection_id, - }, - )]); + self.queued_events.extend([ToSwarm::GenerateEvent(Event { + remote_peer_id: peer, + result: Ok(connection_id), + })]); } Ok(Either::Right(dummy::ConnectionHandler)) @@ -284,15 +267,7 @@ impl NetworkBehaviour for Behaviour { }; match handler_event { - Either::Left(handler::relayed::Event::InboundConnectRequest { remote_addr }) => { - self.queued_events.extend([ToSwarm::GenerateEvent( - Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id: event_source, - remote_relayed_addr: remote_addr, - }, - )]); - } - Either::Left(handler::relayed::Event::InboundConnectNegotiated(remote_addrs)) => { + Either::Left(handler::relayed::Event::InboundConnectNegotiated { remote_addrs }) => { log::debug!( "Attempting to hole-punch as dialer to {event_source} using {remote_addrs:?}" ); @@ -308,14 +283,23 @@ impl NetworkBehaviour for Behaviour { .insert(maybe_direct_connection_id, relayed_connection_id); self.queued_events.push_back(ToSwarm::Dial { opts }); } - Either::Left(handler::relayed::Event::OutboundNegotiationFailed { error }) => { - self.queued_events.push_back(ToSwarm::GenerateEvent( - Event::DirectConnectionUpgradeFailed { - remote_peer_id: event_source, - connection_id: relayed_connection_id, - error: Error::Handler(error), - }, - )); + Either::Left(handler::relayed::Event::InboundConnectFailed { error }) => { + self.queued_events.push_back(ToSwarm::GenerateEvent(Event { + remote_peer_id: event_source, + result: Err(Error { + inner: InnerError::InboundError(error), + }), + })); + } + Either::Left(handler::relayed::Event::OutboundConnectFailed { error }) => { + self.queued_events.push_back(ToSwarm::GenerateEvent(Event { + remote_peer_id: event_source, + result: Err(Error { + inner: InnerError::OutboundError(error), + }), + })); + + // Maybe treat these as transient and retry? } Either::Left(handler::relayed::Event::OutboundConnectNegotiated { remote_addrs }) => { log::debug!( diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 23ab9f4ae5a..9d600d234e5 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -21,22 +21,26 @@ //! [`ConnectionHandler`] handling relayed connection potentially upgraded to a direct connection. use crate::behaviour::MAX_NUMBER_OF_UPGRADE_ATTEMPTS; -use crate::protocol; +use crate::{protocol, PROTOCOL_NAME}; use either::Either; use futures::future; -use futures::future::{BoxFuture, FutureExt}; use libp2p_core::multiaddr::Multiaddr; -use libp2p_core::upgrade::DeniedUpgrade; +use libp2p_core::upgrade::{DeniedUpgrade, ReadyUpgrade}; use libp2p_core::ConnectedPoint; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, ListenUpgradeError, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, StreamUpgradeError, SubstreamProtocol, + ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + SubstreamProtocol, }; +use protocol::{inbound, outbound}; use std::collections::VecDeque; +use std::io; use std::task::{Context, Poll}; +use std::time::Duration; +use void::Void; #[derive(Debug)] pub enum Command { @@ -45,26 +49,14 @@ pub enum Command { #[derive(Debug)] pub enum Event { - InboundConnectRequest { - remote_addr: Multiaddr, - }, - InboundConnectNegotiated(Vec), - OutboundNegotiationFailed { - error: StreamUpgradeError, - }, - OutboundConnectNegotiated { - remote_addrs: Vec, - }, + InboundConnectNegotiated { remote_addrs: Vec }, + OutboundConnectNegotiated { remote_addrs: Vec }, + InboundConnectFailed { error: inbound::Error }, + OutboundConnectFailed { error: outbound::Error }, } pub struct Handler { endpoint: ConnectedPoint, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - StreamUpgradeError< - Either, - >, - >, /// Queue of events to return when polled. queued_events: VecDeque< ConnectionHandlerEvent< @@ -74,9 +66,12 @@ pub struct Handler { ::Error, >, >, - /// Inbound connect, accepted by the behaviour, pending completion. - inbound_connect: - Option, protocol::inbound::UpgradeError>>>, + + // Inbound DCUtR handshakes + inbound_stream: futures_bounded::FuturesSet, inbound::Error>>, + + // Outbound DCUtR handshake. + outbound_stream: futures_bounded::FuturesSet, outbound::Error>>, /// The addresses we will send to the other party for hole-punching attempts. holepunch_candidates: Vec, @@ -88,9 +83,9 @@ impl Handler { pub fn new(endpoint: ConnectedPoint, holepunch_candidates: Vec) -> Self { Self { endpoint, - pending_error: Default::default(), queued_events: Default::default(), - inbound_connect: Default::default(), + inbound_stream: futures_bounded::FuturesSet::new(Duration::from_secs(10), 1), + outbound_stream: futures_bounded::FuturesSet::new(Duration::from_secs(10), 1), holepunch_candidates, attempts: 0, } @@ -106,29 +101,19 @@ impl Handler { >, ) { match output { - future::Either::Left(inbound_connect) => { + future::Either::Left(stream) => { if self - .inbound_connect - .replace( - inbound_connect - .accept(self.holepunch_candidates.clone()) - .boxed(), - ) - .is_some() + .inbound_stream + .try_push(inbound::handshake( + stream, + self.holepunch_candidates.clone(), + )) + .is_err() { log::warn!( - "New inbound connect stream while still upgrading previous one. \ - Replacing previous with new.", + "New inbound connect stream while still upgrading previous one. Replacing previous with new.", ); } - let remote_addr = match &self.endpoint { - ConnectedPoint::Dialer { address, role_override: _ } => address.clone(), - ConnectedPoint::Listener { ..} => unreachable!("`::listen_protocol` denies all incoming substreams as a listener."), - }; - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundConnectRequest { remote_addr }, - )); self.attempts += 1; } // A connection listener denies all incoming substreams, thus none can ever be fully negotiated. @@ -139,8 +124,7 @@ impl Handler { fn on_fully_negotiated_outbound( &mut self, FullyNegotiatedOutbound { - protocol: protocol::outbound::Connect { obs_addrs }, - .. + protocol: stream, .. }: FullyNegotiatedOutbound< ::OutboundProtocol, ::OutboundOpenInfo, @@ -150,12 +134,18 @@ impl Handler { self.endpoint.is_listener(), "A connection dialer never initiates a connection upgrade." ); - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundConnectNegotiated { - remote_addrs: obs_addrs, - }, - )); + if self + .outbound_stream + .try_push(outbound::handshake( + stream, + self.holepunch_candidates.clone(), + )) + .is_err() + { + log::warn!( + "New outbound connect stream while still upgrading previous one. Replacing previous with new.", + ); + } } fn on_listen_upgrade_error( @@ -165,10 +155,7 @@ impl Handler { ::InboundProtocol, >, ) { - self.pending_error = Some(StreamUpgradeError::Apply(match error { - Either::Left(e) => Either::Left(e), - Either::Right(v) => void::unreachable(v), - })); + void::unreachable(error.into_inner()); } fn on_dial_upgrade_error( @@ -178,50 +165,33 @@ impl Handler { ::OutboundProtocol, >, ) { - match error { - StreamUpgradeError::Timeout => { - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundNegotiationFailed { - error: StreamUpgradeError::Timeout, - }, - )); - } - StreamUpgradeError::NegotiationFailed => { - // The remote merely doesn't support the DCUtR protocol. - // This is no reason to close the connection, which may - // successfully communicate with other protocols already. - self.queued_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour( - Event::OutboundNegotiationFailed { - error: StreamUpgradeError::NegotiationFailed, - }, - )); - } - _ => { - // Anything else is considered a fatal error or misbehaviour of - // the remote peer and results in closing the connection. - self.pending_error = Some(error.map_upgrade_err(Either::Right)); - } - } + let error = match error { + StreamUpgradeError::Apply(v) => void::unreachable(v), + StreamUpgradeError::NegotiationFailed => outbound::Error::Unsupported, + StreamUpgradeError::Io(e) => outbound::Error::Io(e), + StreamUpgradeError::Timeout => outbound::Error::Io(io::ErrorKind::TimedOut.into()), + }; + + self.queued_events + .push_back(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { error }, + )) } } impl ConnectionHandler for Handler { type FromBehaviour = Command; type ToBehaviour = Event; - type Error = StreamUpgradeError< - Either, - >; - type InboundProtocol = Either; - type OutboundProtocol = protocol::outbound::Upgrade; + type Error = Void; + type InboundProtocol = Either, DeniedUpgrade>; + type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { match self.endpoint { ConnectedPoint::Dialer { .. } => { - SubstreamProtocol::new(Either::Left(protocol::inbound::Upgrade {}), ()) + SubstreamProtocol::new(Either::Left(ReadyUpgrade::new(PROTOCOL_NAME)), ()) } ConnectedPoint::Listener { .. } => { // By the protocol specification the listening side of a relayed connection @@ -239,10 +209,7 @@ impl ConnectionHandler for Handler { Command::Connect => { self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - protocol::outbound::Upgrade::new(self.holepunch_candidates.clone()), - (), - ), + protocol: SubstreamProtocol::new(ReadyUpgrade::new(PROTOCOL_NAME), ()), }); self.attempts += 1; } @@ -268,31 +235,55 @@ impl ConnectionHandler for Handler { Self::Error, >, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); } - if let Some(Poll::Ready(result)) = self.inbound_connect.as_mut().map(|f| f.poll_unpin(cx)) { - self.inbound_connect = None; - match result { - Ok(addresses) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::InboundConnectNegotiated(addresses), - )); - } - Err(e) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Left(e), - ))) - } + match self.inbound_stream.poll_unpin(cx) { + Poll::Ready(Ok(Ok(addresses))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectNegotiated { + remote_addrs: addresses, + }, + )) + } + Poll::Ready(Ok(Err(error))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectFailed { error }, + )) + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::InboundConnectFailed { + error: inbound::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )) + } + Poll::Pending => {} + } + + match self.outbound_stream.poll_unpin(cx) { + Poll::Ready(Ok(Ok(addresses))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiated { + remote_addrs: addresses, + }, + )) + } + Poll::Ready(Ok(Err(error))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { error }, + )) + } + Poll::Ready(Err(futures_bounded::Timeout { .. })) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectFailed { + error: outbound::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )) } + Poll::Pending => {} } Poll::Pending diff --git a/protocols/dcutr/src/lib.rs b/protocols/dcutr/src/lib.rs index 389365f94c5..7c5d28aba19 100644 --- a/protocols/dcutr/src/lib.rs +++ b/protocols/dcutr/src/lib.rs @@ -36,8 +36,8 @@ mod proto { pub use behaviour::{Behaviour, Error, Event}; pub use protocol::PROTOCOL_NAME; pub mod inbound { - pub use crate::protocol::inbound::UpgradeError; + pub use crate::protocol::inbound::ProtocolViolation; } pub mod outbound { - pub use crate::protocol::outbound::UpgradeError; + pub use crate::protocol::outbound::ProtocolViolation; } diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index d38b6f4559a..95665843724 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -20,114 +20,91 @@ use crate::proto; use asynchronous_codec::Framed; -use futures::{future::BoxFuture, prelude::*}; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p_swarm::{Stream, StreamProtocol}; +use futures::prelude::*; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_swarm::Stream; use std::convert::TryFrom; -use std::iter; +use std::io; use thiserror::Error; -pub struct Upgrade {} - -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; +pub(crate) async fn handshake( + stream: Stream, + candidates: Vec, +) -> Result, Error> { + let mut stream = Framed::new( + stream, + quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), + ); + + let proto::HolePunch { type_pb, ObsAddrs } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; + + if ObsAddrs.is_empty() { + return Err(Error::Protocol(ProtocolViolation::NoAddresses)); + }; + + let obs_addrs = ObsAddrs + .into_iter() + .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { + Ok(a) => Some(a), + Err(e) => { + log::debug!("Unable to parse multiaddr: {e}"); + None + } + }) + // Filter out relayed addresses. + .filter(|a| { + if a.iter().any(|p| p == Protocol::P2pCircuit) { + log::debug!("Dropping relayed address {a}"); + false + } else { + true + } + }) + .collect(); - fn protocol_info(&self) -> Self::InfoIter { - iter::once(super::PROTOCOL_NAME) + if !matches!(type_pb, proto::Type::CONNECT) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeSync)); } -} - -impl upgrade::InboundUpgrade for Upgrade { - type Output = PendingConnect; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; - fn upgrade_inbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), - ); + let msg = proto::HolePunch { + type_pb: proto::Type::CONNECT, + ObsAddrs: candidates.into_iter().map(|a| a.to_vec()).collect(), + }; - async move { - let proto::HolePunch { type_pb, ObsAddrs } = - substream.next().await.ok_or(UpgradeError::StreamClosed)??; - - let obs_addrs = if ObsAddrs.is_empty() { - return Err(UpgradeError::NoAddresses); - } else { - ObsAddrs - .into_iter() - .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { - Ok(a) => Some(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); - None - } - }) - // Filter out relayed addresses. - .filter(|a| { - if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); - false - } else { - true - } - }) - .collect::>() - }; + stream.send(msg).await?; + let proto::HolePunch { type_pb, .. } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; - match type_pb { - proto::Type::CONNECT => {} - proto::Type::SYNC => return Err(UpgradeError::UnexpectedTypeSync), - } - - Ok(PendingConnect { - substream, - remote_obs_addrs: obs_addrs, - }) - } - .boxed() + if !matches!(type_pb, proto::Type::SYNC) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeConnect)); } -} -pub struct PendingConnect { - substream: Framed>, - remote_obs_addrs: Vec, + Ok(obs_addrs) } -impl PendingConnect { - pub async fn accept( - mut self, - local_obs_addrs: Vec, - ) -> Result, UpgradeError> { - let msg = proto::HolePunch { - type_pb: proto::Type::CONNECT, - ObsAddrs: local_obs_addrs.into_iter().map(|a| a.to_vec()).collect(), - }; - - self.substream.send(msg).await?; - let proto::HolePunch { type_pb, .. } = self - .substream - .next() - .await - .ok_or(UpgradeError::StreamClosed)??; - - match type_pb { - proto::Type::CONNECT => return Err(UpgradeError::UnexpectedTypeConnect), - proto::Type::SYNC => {} - } +#[derive(Debug, Error)] +pub enum Error { + #[error("IO error")] + Io(#[from] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} - Ok(self.remote_obs_addrs) +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) } } #[derive(Debug, Error)] -pub enum UpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected at least one address in reservation.")] NoAddresses, #[error("Failed to parse response type field.")] diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 960d98cbe66..67c7116d706 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -19,115 +19,102 @@ // DEALINGS IN THE SOFTWARE. use crate::proto; +use crate::PROTOCOL_NAME; use asynchronous_codec::Framed; -use futures::{future::BoxFuture, prelude::*}; +use futures::prelude::*; use futures_timer::Delay; use instant::Instant; -use libp2p_core::{multiaddr::Protocol, upgrade, Multiaddr}; -use libp2p_swarm::{Stream, StreamProtocol}; +use libp2p_core::{multiaddr::Protocol, Multiaddr}; +use libp2p_swarm::Stream; use std::convert::TryFrom; -use std::iter; +use std::io; use thiserror::Error; -pub struct Upgrade { - obs_addrs: Vec, -} +pub(crate) async fn handshake( + stream: Stream, + candidates: Vec, +) -> Result, Error> { + let mut stream = Framed::new( + stream, + quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), + ); -impl upgrade::UpgradeInfo for Upgrade { - type Info = StreamProtocol; - type InfoIter = iter::Once; + let msg = proto::HolePunch { + type_pb: proto::Type::CONNECT, + ObsAddrs: candidates.into_iter().map(|a| a.to_vec()).collect(), + }; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(super::PROTOCOL_NAME) - } -} + stream.send(msg).await?; -impl Upgrade { - pub fn new(obs_addrs: Vec) -> Self { - Self { obs_addrs } - } -} + let sent_time = Instant::now(); -impl upgrade::OutboundUpgrade for Upgrade { - type Output = Connect; - type Error = UpgradeError; - type Future = BoxFuture<'static, Result>; + let proto::HolePunch { type_pb, ObsAddrs } = stream + .next() + .await + .ok_or(io::Error::from(io::ErrorKind::UnexpectedEof))??; - fn upgrade_outbound(self, substream: Stream, _: Self::Info) -> Self::Future { - let mut substream = Framed::new( - substream, - quick_protobuf_codec::Codec::new(super::MAX_MESSAGE_SIZE_BYTES), - ); + let rtt = sent_time.elapsed(); - let msg = proto::HolePunch { - type_pb: proto::Type::CONNECT, - ObsAddrs: self.obs_addrs.into_iter().map(|a| a.to_vec()).collect(), - }; + if !matches!(type_pb, proto::Type::CONNECT) { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeSync)); + } - async move { - substream.send(msg).await?; + if ObsAddrs.is_empty() { + return Err(Error::Protocol(ProtocolViolation::NoAddresses)); + } - let sent_time = Instant::now(); + let obs_addrs = ObsAddrs + .into_iter() + .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { + Ok(a) => Some(a), + Err(e) => { + log::debug!("Unable to parse multiaddr: {e}"); + None + } + }) + // Filter out relayed addresses. + .filter(|a| { + if a.iter().any(|p| p == Protocol::P2pCircuit) { + log::debug!("Dropping relayed address {a}"); + false + } else { + true + } + }) + .collect(); - let proto::HolePunch { type_pb, ObsAddrs } = - substream.next().await.ok_or(UpgradeError::StreamClosed)??; + let msg = proto::HolePunch { + type_pb: proto::Type::SYNC, + ObsAddrs: vec![], + }; - let rtt = sent_time.elapsed(); + stream.send(msg).await?; - match type_pb { - proto::Type::CONNECT => {} - proto::Type::SYNC => return Err(UpgradeError::UnexpectedTypeSync), - } + Delay::new(rtt / 2).await; - let obs_addrs = if ObsAddrs.is_empty() { - return Err(UpgradeError::NoAddresses); - } else { - ObsAddrs - .into_iter() - .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { - Ok(a) => Some(a), - Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); - None - } - }) - // Filter out relayed addresses. - .filter(|a| { - if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); - false - } else { - true - } - }) - .collect::>() - }; - - let msg = proto::HolePunch { - type_pb: proto::Type::SYNC, - ObsAddrs: vec![], - }; - - substream.send(msg).await?; - - Delay::new(rtt / 2).await; - - Ok(Connect { obs_addrs }) - } - .boxed() - } + Ok(obs_addrs) } -pub struct Connect { - pub obs_addrs: Vec, +#[derive(Debug, Error)] +pub enum Error { + #[error("IO error")] + Io(#[from] io::Error), + #[error("Remote does not support the `{PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} + +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) + } } #[derive(Debug, Error)] -pub enum UpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, #[error("Expected 'reservation' field to be set.")] diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index f43144154a7..1c5ddb5a972 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -69,21 +69,6 @@ async fn connect() { src.dial_and_wait(dst_relayed_addr.clone()).await; - while let Ok(event) = src.next_swarm_event().await.try_into_behaviour_event() { - match event { - ClientEvent::Dcutr(dcutr::Event::RemoteInitiatedDirectConnectionUpgrade { - remote_peer_id, - remote_relayed_addr, - }) => { - if remote_peer_id == dst_peer_id && remote_relayed_addr == dst_relayed_addr { - break; - } - } - ClientEvent::Identify(_) => {} - other => panic!("Unexpected event: {other:?}."), - } - } - let dst_addr = dst_tcp_addr.with(Protocol::P2p(dst_peer_id)); let established_conn_id = src @@ -99,9 +84,10 @@ async fn connect() { let reported_conn_id = src .wait(move |e| match e { - SwarmEvent::Behaviour(ClientEvent::Dcutr( - dcutr::Event::DirectConnectionUpgradeSucceeded { connection_id, .. }, - )) => Some(connection_id), + SwarmEvent::Behaviour(ClientEvent::Dcutr(dcutr::Event { + result: Ok(connection_id), + .. + })) => Some(connection_id), _ => None, }) .await; From 823d0b2b7551fb69bb0a62cec11a51ebd3613928 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 1 Nov 2023 12:51:44 +1100 Subject: [PATCH 11/33] feat(relay): don't close connections upon errors in relay server To remove the usages of `ConnectionHandlerEvent::Close` from the relay-server, we unify what used to be called `CircuitFailedReason` and `FatalUpgradeError`. Whilst the errors may be fatal for the particular circuit, they are not necessarily fatal for the entire connection. Related: #3591. Resolves: #4716. Pull-Request: #4718. --- misc/metrics/src/relay.rs | 5 + protocols/relay/CHANGELOG.md | 7 + protocols/relay/src/behaviour.rs | 32 +- protocols/relay/src/behaviour/handler.rs | 293 ++++++++++-------- protocols/relay/src/lib.rs | 7 +- protocols/relay/src/priv_client/handler.rs | 5 +- protocols/relay/src/protocol/inbound_hop.rs | 36 +-- protocols/relay/src/protocol/inbound_stop.rs | 10 +- protocols/relay/src/protocol/outbound_hop.rs | 4 - protocols/relay/src/protocol/outbound_stop.rs | 167 ++++------ 10 files changed, 281 insertions(+), 285 deletions(-) diff --git a/misc/metrics/src/relay.rs b/misc/metrics/src/relay.rs index 9ba692721e9..607daf3f1e1 100644 --- a/misc/metrics/src/relay.rs +++ b/misc/metrics/src/relay.rs @@ -66,20 +66,25 @@ impl From<&libp2p_relay::Event> for EventType { fn from(event: &libp2p_relay::Event) -> Self { match event { libp2p_relay::Event::ReservationReqAccepted { .. } => EventType::ReservationReqAccepted, + #[allow(deprecated)] libp2p_relay::Event::ReservationReqAcceptFailed { .. } => { EventType::ReservationReqAcceptFailed } libp2p_relay::Event::ReservationReqDenied { .. } => EventType::ReservationReqDenied, + #[allow(deprecated)] libp2p_relay::Event::ReservationReqDenyFailed { .. } => { EventType::ReservationReqDenyFailed } libp2p_relay::Event::ReservationTimedOut { .. } => EventType::ReservationTimedOut, libp2p_relay::Event::CircuitReqDenied { .. } => EventType::CircuitReqDenied, + #[allow(deprecated)] libp2p_relay::Event::CircuitReqOutboundConnectFailed { .. } => { EventType::CircuitReqOutboundConnectFailed } + #[allow(deprecated)] libp2p_relay::Event::CircuitReqDenyFailed { .. } => EventType::CircuitReqDenyFailed, libp2p_relay::Event::CircuitReqAccepted { .. } => EventType::CircuitReqAccepted, + #[allow(deprecated)] libp2p_relay::Event::CircuitReqAcceptFailed { .. } => EventType::CircuitReqAcceptFailed, libp2p_relay::Event::CircuitClosed { .. } => EventType::CircuitClosed, } diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index 20d8370cf6d..200cc4bc18d 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,5 +1,12 @@ ## 0.17.0 - unreleased +- Don't close connections on protocol failures within the relay-server. + To achieve this, error handling was restructured: + - `libp2p::relay::outbound::stop::FatalUpgradeError` has been removed. + - `libp2p::relay::outbound::stop::{Error, ProtocolViolation}` have been introduced. + - Several variants of `libp2p::relay::Event` have been deprecated. + + See [PR 4718](https://github.com/libp2p/rust-libp2p/pull/4718). - Fix a rare race condition when making a reservation on a relay that could lead to a failed reservation. See [PR 4747](https://github.com/libp2p/rust-lib2pp/pulls/4747). - Propagate errors of relay client to the listener / dialer. diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 256bb463b5a..5b9f1fe5843 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -34,7 +34,7 @@ use libp2p_identity::PeerId; use libp2p_swarm::behaviour::{ConnectionClosed, FromSwarm}; use libp2p_swarm::{ dummy, ConnectionDenied, ConnectionId, ExternalAddresses, NetworkBehaviour, NotifyHandler, - StreamUpgradeError, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, + THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; use std::collections::{hash_map, HashMap, HashSet, VecDeque}; use std::num::NonZeroU32; @@ -169,16 +169,22 @@ pub enum Event { renewed: bool, }, /// Accepting an inbound reservation request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] ReservationReqAcceptFailed { src_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound reservation request has been denied. ReservationReqDenied { src_peer_id: PeerId }, /// Denying an inbound reservation request has failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] ReservationReqDenyFailed { src_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound reservation has timed out. ReservationTimedOut { src_peer_id: PeerId }, @@ -188,10 +194,13 @@ pub enum Event { dst_peer_id: PeerId, }, /// Denying an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqDenyFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound cirucit request has been accepted. CircuitReqAccepted { @@ -199,16 +208,22 @@ pub enum Event { dst_peer_id: PeerId, }, /// An outbound connect for an inbound cirucit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqOutboundConnectFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: StreamUpgradeError, + error: outbound_stop::Error, }, /// Accepting an inbound circuit request failed. + #[deprecated( + note = "Will be removed in favor of logging them internally, see for details." + )] CircuitReqAcceptFailed { src_peer_id: PeerId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound circuit has closed. CircuitClosed { @@ -455,6 +470,7 @@ impl NetworkBehaviour for Behaviour { )); } handler::Event::ReservationReqAcceptFailed { error } => { + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::ReservationReqAcceptFailed { src_peer_id: event_source, @@ -470,6 +486,7 @@ impl NetworkBehaviour for Behaviour { )); } handler::Event::ReservationReqDenyFailed { error } => { + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::ReservationReqDenyFailed { src_peer_id: event_source, @@ -592,6 +609,7 @@ impl NetworkBehaviour for Behaviour { self.circuits.remove(circuit_id); } + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::CircuitReqDenyFailed { src_peer_id: event_source, @@ -637,6 +655,7 @@ impl NetworkBehaviour for Behaviour { status, }), }); + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::CircuitReqOutboundConnectFailed { src_peer_id, @@ -662,6 +681,7 @@ impl NetworkBehaviour for Behaviour { error, } => { self.circuits.remove(circuit_id); + #[allow(deprecated)] self.queued_actions.push_back(ToSwarm::GenerateEvent( Event::CircuitReqAcceptFailed { src_peer_id: event_source, diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 60997a107e6..a2ba268392f 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -39,10 +39,10 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use std::collections::VecDeque; -use std::fmt; +use std::collections::{HashMap, VecDeque}; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, io}; const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); @@ -151,11 +151,11 @@ pub enum Event { renewed: bool, }, /// Accepting an inbound reservation request failed. - ReservationReqAcceptFailed { error: inbound_hop::UpgradeError }, + ReservationReqAcceptFailed { error: inbound_hop::Error }, /// An inbound reservation request has been denied. ReservationReqDenied {}, /// Denying an inbound reservation request has failed. - ReservationReqDenyFailed { error: inbound_hop::UpgradeError }, + ReservationReqDenyFailed { error: inbound_hop::Error }, /// An inbound reservation has timed out. ReservationTimedOut {}, /// An inbound circuit request has been received. @@ -172,7 +172,7 @@ pub enum Event { CircuitReqDenyFailed { circuit_id: Option, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An inbound circuit request has been accepted. CircuitReqAccepted { @@ -183,7 +183,7 @@ pub enum Event { CircuitReqAcceptFailed { circuit_id: CircuitId, dst_peer_id: PeerId, - error: inbound_hop::UpgradeError, + error: inbound_hop::Error, }, /// An outbound substream for an inbound circuit request has been /// negotiated. @@ -202,7 +202,7 @@ pub enum Event { src_connection_id: ConnectionId, inbound_circuit_req: inbound_hop::CircuitReq, status: proto::Status, - error: StreamUpgradeError, + error: outbound_stop::Error, }, /// An inbound circuit has closed. CircuitClosed { @@ -343,13 +343,6 @@ pub struct Handler { >, >, - /// A pending fatal error that results in the connection being closed. - pending_error: Option< - StreamUpgradeError< - Either, - >, - >, - /// The point in time when this connection started idleing. idle_at: Option, @@ -359,44 +352,41 @@ pub struct Handler { active_reservation: Option, /// Futures accepting an inbound circuit request. - circuit_accept_futures: - Futures>, + circuit_accept_futures: Futures>, /// Futures denying an inbound circuit request. - circuit_deny_futures: Futures<( - Option, - PeerId, - Result<(), inbound_hop::UpgradeError>, - )>, + circuit_deny_futures: Futures<(Option, PeerId, Result<(), inbound_hop::Error>)>, /// Futures relaying data for circuit between two peers. circuits: Futures<(CircuitId, PeerId, Result<(), std::io::Error>)>, - pending_connect_requests: VecDeque, - - workers: futures_bounded::FuturesSet< - Either< - Result< - Either, - inbound_hop::FatalUpgradeError, - >, - Result< - Result, - outbound_stop::FatalUpgradeError, - >, - >, + /// We issue a stream upgrade for each [`PendingConnect`] request. + pending_connect_requests: VecDeque, + + /// A `CONNECT` request is in flight for these circuits. + active_connect_requests: HashMap, + + inbound_workers: futures_bounded::FuturesSet< + Result, inbound_hop::Error>, + >, + outbound_workers: futures_bounded::FuturesMap< + CircuitId, + Result, >, } impl Handler { pub fn new(config: Config, endpoint: ConnectedPoint) -> Handler { Handler { - workers: futures_bounded::FuturesSet::new( + inbound_workers: futures_bounded::FuturesSet::new( + STREAM_TIMEOUT, + MAX_CONCURRENT_STREAMS_PER_CONNECTION, + ), + outbound_workers: futures_bounded::FuturesMap::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, ), endpoint, config, queued_events: Default::default(), - pending_error: Default::default(), idle_at: None, reservation_request_future: Default::default(), circuit_accept_futures: Default::default(), @@ -404,21 +394,19 @@ impl Handler { circuits: Default::default(), active_reservation: Default::default(), pending_connect_requests: Default::default(), + active_connect_requests: Default::default(), } } fn on_fully_negotiated_inbound(&mut self, stream: Stream) { if self - .workers - .try_push( - inbound_hop::handle_inbound_request( - stream, - self.config.reservation_duration, - self.config.max_circuit_duration, - self.config.max_circuit_bytes, - ) - .map(Either::Left), - ) + .inbound_workers + .try_push(inbound_hop::handle_inbound_request( + stream, + self.config.reservation_duration, + self.config.max_circuit_duration, + self.config.max_circuit_bytes, + )) .is_err() { log::warn!("Dropping inbound stream because we are at capacity") @@ -426,18 +414,29 @@ impl Handler { } fn on_fully_negotiated_outbound(&mut self, stream: Stream) { - let stop_command = self + let connect = self .pending_connect_requests .pop_front() .expect("opened a stream without a pending stop command"); if self - .workers - .try_push(outbound_stop::connect(stream, stop_command).map(Either::Right)) + .outbound_workers + .try_push( + connect.circuit_id, + outbound_stop::connect( + stream, + connect.src_peer_id, + connect.max_circuit_duration, + connect.max_circuit_bytes, + ), + ) .is_err() { log::warn!("Dropping outbound stream because we are at capacity") } + + self.active_connect_requests + .insert(connect.circuit_id, connect); } fn on_dial_upgrade_error( @@ -447,21 +446,10 @@ impl Handler { ::OutboundProtocol, >, ) { - let (non_fatal_error, status) = match error { - StreamUpgradeError::Timeout => ( - StreamUpgradeError::Timeout, - proto::Status::CONNECTION_FAILED, - ), - StreamUpgradeError::NegotiationFailed => { - // The remote has previously done a reservation. Doing a reservation but not - // supporting the stop protocol is pointless, thus disconnecting. - self.pending_error = Some(StreamUpgradeError::NegotiationFailed); - return; - } - StreamUpgradeError::Io(e) => { - self.pending_error = Some(StreamUpgradeError::Io(e)); - return; - } + let error = match error { + StreamUpgradeError::Timeout => outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), + StreamUpgradeError::NegotiationFailed => outbound_stop::Error::Unsupported, + StreamUpgradeError::Io(e) => outbound_stop::Error::Io(e), StreamUpgradeError::Apply(v) => void::unreachable(v), }; @@ -477,16 +465,16 @@ impl Handler { src_peer_id: stop_command.src_peer_id, src_connection_id: stop_command.src_connection_id, inbound_circuit_req: stop_command.inbound_circuit_req, - status, - error: non_fatal_error, + status: proto::Status::CONNECTION_FAILED, + error, }, )); } } enum ReservationRequestFuture { - Accepting(BoxFuture<'static, Result<(), inbound_hop::UpgradeError>>), - Denying(BoxFuture<'static, Result<(), inbound_hop::UpgradeError>>), + Accepting(BoxFuture<'static, Result<(), inbound_hop::Error>>), + Denying(BoxFuture<'static, Result<(), inbound_hop::Error>>), } type Futures = FuturesUnordered>; @@ -494,9 +482,7 @@ type Futures = FuturesUnordered>; impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; - type Error = StreamUpgradeError< - Either, - >; + type Error = void::Void; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); type OutboundProtocol = ReadyUpgrade; @@ -542,14 +528,13 @@ impl ConnectionHandler for Handler { src_peer_id, src_connection_id, } => { - self.pending_connect_requests - .push_back(outbound_stop::PendingConnect::new( - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - &self.config, - )); + self.pending_connect_requests.push_back(PendingConnect::new( + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + &self.config, + )); self.queued_events .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(ReadyUpgrade::new(STOP_PROTOCOL_NAME), ()), @@ -614,12 +599,6 @@ impl ConnectionHandler for Handler { Self::Error, >, > { - // Check for a pending (fatal) error. - if let Some(err) = self.pending_error.take() { - // The handler will not be polled again by the `Swarm`. - return Poll::Ready(ConnectionHandlerEvent::Close(err)); - } - // Return queued events. if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); @@ -651,61 +630,92 @@ impl ConnectionHandler for Handler { } } - // Process protocol requests - match self.workers.poll_unpin(cx) { - Poll::Ready(Ok(Either::Left(Ok(Either::Left(inbound_reservation_req))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::ReservationReqReceived { - inbound_reservation_req, - endpoint: self.endpoint.clone(), - renewed: self.active_reservation.is_some(), - }, - )); - } - Poll::Ready(Ok(Either::Left(Ok(Either::Right(inbound_circuit_req))))) => { - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( - Event::CircuitReqReceived { - inbound_circuit_req, - endpoint: self.endpoint.clone(), - }, - )); + // Process inbound protocol workers + loop { + match self.inbound_workers.poll_unpin(cx) { + Poll::Ready(Ok(Ok(Either::Left(inbound_reservation_req)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::ReservationReqReceived { + inbound_reservation_req, + endpoint: self.endpoint.clone(), + renewed: self.active_reservation.is_some(), + }, + )); + } + Poll::Ready(Ok(Ok(Either::Right(inbound_circuit_req)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::CircuitReqReceived { + inbound_circuit_req, + endpoint: self.endpoint.clone(), + }, + )); + } + Poll::Ready(Err(e)) => { + log::debug!("Inbound stream operation timed out: {e}"); + continue; + } + Poll::Ready(Ok(Err(e))) => { + log::debug!("Inbound stream operation failed: {e}"); + continue; + } + Poll::Pending => { + break; + } } - Poll::Ready(Ok(Either::Right(Ok(Ok(circuit))))) => { + } + + // Process outbound protocol workers + match self.outbound_workers.poll_unpin(cx) { + Poll::Ready((id, Ok(Ok(circuit)))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::OutboundConnectNegotiated { - circuit_id: circuit.circuit_id, - src_peer_id: circuit.src_peer_id, - src_connection_id: circuit.src_connection_id, - inbound_circuit_req: circuit.inbound_circuit_req, + circuit_id: id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, dst_stream: circuit.dst_stream, dst_pending_data: circuit.dst_pending_data, }, )); } - Poll::Ready(Ok(Either::Right(Ok(Err(circuit_failed))))) => { + Poll::Ready((id, Ok(Err(error)))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( Event::OutboundConnectNegotiationFailed { - circuit_id: circuit_failed.circuit_id, - src_peer_id: circuit_failed.src_peer_id, - src_connection_id: circuit_failed.src_connection_id, - inbound_circuit_req: circuit_failed.inbound_circuit_req, - status: circuit_failed.status, - error: circuit_failed.error, + circuit_id: connect.circuit_id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + status: error.to_status(), + error, }, )); } - Poll::Ready(Err(futures_bounded::Timeout { .. })) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Timeout)); - } - Poll::Ready(Ok(Either::Left(Err(e)))) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Left(e), - ))); - } - Poll::Ready(Ok(Either::Right(Err(e)))) => { - return Poll::Ready(ConnectionHandlerEvent::Close(StreamUpgradeError::Apply( - Either::Right(e), - ))); + Poll::Ready((id, Err(futures_bounded::Timeout { .. }))) => { + let connect = self + .active_connect_requests + .remove(&id) + .expect("must have pending connect"); + + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + Event::OutboundConnectNegotiationFailed { + circuit_id: connect.circuit_id, + src_peer_id: connect.src_peer_id, + src_connection_id: connect.src_connection_id, + inbound_circuit_req: connect.inbound_circuit_req, + status: proto::Status::CONNECTION_FAILED, // Best fit? + error: outbound_stop::Error::Io(io::ErrorKind::TimedOut.into()), + }, + )); } Poll::Pending => {} } @@ -903,3 +913,32 @@ struct CircuitParts { dst_stream: Stream, dst_pending_data: Bytes, } + +/// Holds everything we know about a to-be-issued `CONNECT` request to a peer. +struct PendingConnect { + circuit_id: CircuitId, + inbound_circuit_req: inbound_hop::CircuitReq, + src_peer_id: PeerId, + src_connection_id: ConnectionId, + max_circuit_duration: Duration, + max_circuit_bytes: u64, +} + +impl PendingConnect { + fn new( + circuit_id: CircuitId, + inbound_circuit_req: inbound_hop::CircuitReq, + src_peer_id: PeerId, + src_connection_id: ConnectionId, + config: &Config, + ) -> Self { + Self { + circuit_id, + inbound_circuit_req, + src_peer_id, + src_connection_id, + max_circuit_duration: config.max_circuit_duration, + max_circuit_bytes: config.max_circuit_bytes, + } + } +} diff --git a/protocols/relay/src/lib.rs b/protocols/relay/src/lib.rs index 09d326be9fb..eca3578d599 100644 --- a/protocols/relay/src/lib.rs +++ b/protocols/relay/src/lib.rs @@ -45,7 +45,10 @@ pub use protocol::{HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; /// Types related to the relay protocol inbound. pub mod inbound { pub mod hop { - pub use crate::protocol::inbound_hop::FatalUpgradeError; + #[deprecated(note = "Renamed to `Error`.")] + pub type FatalUpgradeError = Error; + + pub use crate::protocol::inbound_hop::Error; } } @@ -55,7 +58,7 @@ pub mod outbound { pub use crate::protocol::outbound_hop::{ConnectError, ProtocolViolation, ReserveError}; } pub mod stop { - pub use crate::protocol::outbound_stop::FatalUpgradeError; + pub use crate::protocol::outbound_stop::{Error, ProtocolViolation}; } } diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index b3fb345e215..3e79b60ef97 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -21,7 +21,6 @@ use crate::priv_client::transport; use crate::protocol::{self, inbound_stop, outbound_hop}; use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; -use either::Either; use futures::channel::{mpsc, oneshot}; use futures::future::FutureExt; use futures_timer::Delay; @@ -231,9 +230,7 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; - type Error = StreamUpgradeError< - Either, - >; + type Error = void::Void; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); type OutboundProtocol = ReadyUpgrade; diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index b44d29e42ce..69ec495261f 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -35,25 +35,11 @@ use crate::proto::message_v2::pb::mod_HopMessage::Type; use crate::protocol::MAX_MESSAGE_SIZE; #[derive(Debug, Error)] -pub enum UpgradeError { - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} - -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} - -#[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum Error { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), #[error("Stream closed")] StreamClosed, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Failed to parse peer id.")] ParsePeerId, #[error("Expected 'peer' field to be set.")] @@ -70,7 +56,7 @@ pub struct ReservationReq { } impl ReservationReq { - pub async fn accept(self, addrs: Vec) -> Result<(), FatalUpgradeError> { + pub async fn accept(self, addrs: Vec) -> Result<(), Error> { if addrs.is_empty() { log::debug!( "Accepting relay reservation without providing external addresses of local node. \ @@ -104,7 +90,7 @@ impl ReservationReq { self.send(msg).await } - pub async fn deny(self, status: proto::Status) -> Result<(), FatalUpgradeError> { + pub async fn deny(self, status: proto::Status) -> Result<(), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -116,7 +102,7 @@ impl ReservationReq { self.send(msg).await } - async fn send(mut self, msg: proto::HopMessage) -> Result<(), FatalUpgradeError> { + async fn send(mut self, msg: proto::HopMessage) -> Result<(), Error> { self.substream.send(msg).await?; self.substream.flush().await?; self.substream.close().await?; @@ -135,7 +121,7 @@ impl CircuitReq { self.dst } - pub async fn accept(mut self) -> Result<(Stream, Bytes), FatalUpgradeError> { + pub async fn accept(mut self) -> Result<(Stream, Bytes), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -160,7 +146,7 @@ impl CircuitReq { Ok((io, read_buffer.freeze())) } - pub async fn deny(mut self, status: proto::Status) -> Result<(), FatalUpgradeError> { + pub async fn deny(mut self, status: proto::Status) -> Result<(), Error> { let msg = proto::HopMessage { type_pb: proto::HopMessageType::STATUS, peer: None, @@ -185,13 +171,13 @@ pub(crate) async fn handle_inbound_request( reservation_duration: Duration, max_circuit_duration: Duration, max_circuit_bytes: u64, -) -> Result, FatalUpgradeError> { +) -> Result, Error> { let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); let res = substream.next().await; if let None | Some(Err(_)) = res { - return Err(FatalUpgradeError::StreamClosed); + return Err(Error::StreamClosed); } let proto::HopMessage { @@ -212,17 +198,17 @@ pub(crate) async fn handle_inbound_request( Type::CONNECT => { let peer_id_res = match peer { Some(r) => PeerId::from_bytes(&r.id), - None => return Err(FatalUpgradeError::MissingPeer), + None => return Err(Error::MissingPeer), }; let dst = match peer_id_res { Ok(res) => res, - Err(_) => return Err(FatalUpgradeError::ParsePeerId), + Err(_) => return Err(Error::ParsePeerId), }; Either::Right(CircuitReq { dst, substream }) } - Type::STATUS => return Err(FatalUpgradeError::UnexpectedTypeStatus), + Type::STATUS => return Err(Error::UnexpectedTypeStatus), }; Ok(req) diff --git a/protocols/relay/src/protocol/inbound_stop.rs b/protocols/relay/src/protocol/inbound_stop.rs index 22b8244080f..b698a5ff769 100644 --- a/protocols/relay/src/protocol/inbound_stop.rs +++ b/protocols/relay/src/protocol/inbound_stop.rs @@ -72,11 +72,9 @@ impl From for Error { } #[derive(Debug, Error)] -pub enum ProtocolViolation { +pub(crate) enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Failed to parse peer id.")] ParsePeerId, #[error("Expected 'peer' field to be set.")] @@ -132,10 +130,12 @@ impl Circuit { status: Some(status), }; - self.send(msg).await.map_err(Into::into) + self.send(msg).await?; + + Ok(()) } - async fn send(&mut self, msg: proto::StopMessage) -> Result<(), quick_protobuf_codec::Error> { + async fn send(&mut self, msg: proto::StopMessage) -> Result<(), Error> { self.substream.send(msg).await?; self.substream.flush().await?; diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index 4e9b512c3e7..2a39ec5fd4a 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -80,14 +80,10 @@ pub enum ProtocolViolation { InvalidReservationExpiration, #[error("Invalid addresses in reservation.")] InvalidReservationAddrs, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Unexpected message type 'connect'")] UnexpectedTypeConnect, #[error("Unexpected message type 'reserve'")] UnexpectedTypeReserve, - #[error("Failed to parse response type field.")] - ParseStatusField, #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } diff --git a/protocols/relay/src/protocol/outbound_stop.rs b/protocols/relay/src/protocol/outbound_stop.rs index 6f715f14f14..525ebc10821 100644 --- a/protocols/relay/src/protocol/outbound_stop.rs +++ b/protocols/relay/src/protocol/outbound_stop.rs @@ -18,6 +18,7 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use std::io; use std::time::Duration; use asynchronous_codec::{Framed, FramedParts}; @@ -26,124 +27,111 @@ use futures::prelude::*; use thiserror::Error; use libp2p_identity::PeerId; -use libp2p_swarm::{ConnectionId, Stream, StreamUpgradeError}; +use libp2p_swarm::Stream; -use crate::behaviour::handler::Config; -use crate::protocol::{inbound_hop, MAX_MESSAGE_SIZE}; -use crate::{proto, CircuitId}; +use crate::protocol::MAX_MESSAGE_SIZE; +use crate::{proto, STOP_PROTOCOL_NAME}; #[derive(Debug, Error)] -pub(crate) enum UpgradeError { - #[error("Circuit failed")] - CircuitFailed(#[from] CircuitFailedReason), - #[error("Fatal")] - Fatal(#[from] FatalUpgradeError), -} - -impl From for UpgradeError { - fn from(error: quick_protobuf_codec::Error) -> Self { - Self::Fatal(error.into()) - } -} - -#[derive(Debug, Error)] -pub enum CircuitFailedReason { +pub enum Error { #[error("Remote reported resource limit exceeded.")] ResourceLimitExceeded, #[error("Remote reported permission denied.")] PermissionDenied, + #[error("Remote does not support the `{STOP_PROTOCOL_NAME}` protocol")] + Unsupported, + #[error("IO error")] + Io(#[source] io::Error), + #[error("Protocol error")] + Protocol(#[from] ProtocolViolation), +} + +impl Error { + pub(crate) fn to_status(&self) -> proto::Status { + match self { + Error::ResourceLimitExceeded => proto::Status::RESOURCE_LIMIT_EXCEEDED, + Error::PermissionDenied => proto::Status::PERMISSION_DENIED, + Error::Unsupported => proto::Status::CONNECTION_FAILED, + Error::Io(_) => proto::Status::CONNECTION_FAILED, + Error::Protocol( + ProtocolViolation::UnexpectedStatus(_) | ProtocolViolation::UnexpectedTypeConnect, + ) => proto::Status::UNEXPECTED_MESSAGE, + Error::Protocol(_) => proto::Status::MALFORMED_MESSAGE, + } + } } +/// Depicts all forms of protocol violations. #[derive(Debug, Error)] -pub enum FatalUpgradeError { +pub enum ProtocolViolation { #[error(transparent)] Codec(#[from] quick_protobuf_codec::Error), - #[error("Stream closed")] - StreamClosed, #[error("Expected 'status' field to be set.")] MissingStatusField, - #[error("Failed to parse response type field.")] - ParseTypeField, #[error("Unexpected message type 'connect'")] UnexpectedTypeConnect, - #[error("Failed to parse response type field.")] - ParseStatusField, #[error("Unexpected message status '{0:?}'")] UnexpectedStatus(proto::Status), } +impl From for Error { + fn from(e: quick_protobuf_codec::Error) -> Self { + Error::Protocol(ProtocolViolation::Codec(e)) + } +} + /// Attempts to _connect_ to a peer via the given stream. pub(crate) async fn connect( io: Stream, - stop_command: PendingConnect, -) -> Result, FatalUpgradeError> { + src_peer_id: PeerId, + max_duration: Duration, + max_bytes: u64, +) -> Result { let msg = proto::StopMessage { type_pb: proto::StopMessageType::CONNECT, peer: Some(proto::Peer { - id: stop_command.src_peer_id.to_bytes(), + id: src_peer_id.to_bytes(), addrs: vec![], }), limit: Some(proto::Limit { duration: Some( - stop_command - .max_circuit_duration + max_duration .as_secs() .try_into() .expect("`max_circuit_duration` not to exceed `u32::MAX`."), ), - data: Some(stop_command.max_circuit_bytes), + data: Some(max_bytes), }), status: None, }; let mut substream = Framed::new(io, quick_protobuf_codec::Codec::new(MAX_MESSAGE_SIZE)); - if substream.send(msg).await.is_err() { - return Err(FatalUpgradeError::StreamClosed); - } - - let res = substream.next().await; - - if let None | Some(Err(_)) = res { - return Err(FatalUpgradeError::StreamClosed); - } + substream.send(msg).await?; let proto::StopMessage { type_pb, peer: _, limit: _, status, - } = res.unwrap().expect("should be ok"); + } = substream + .next() + .await + .ok_or(Error::Io(io::ErrorKind::UnexpectedEof.into()))??; match type_pb { - proto::StopMessageType::CONNECT => return Err(FatalUpgradeError::UnexpectedTypeConnect), + proto::StopMessageType::CONNECT => { + return Err(Error::Protocol(ProtocolViolation::UnexpectedTypeConnect)) + } proto::StopMessageType::STATUS => {} } match status { Some(proto::Status::OK) => {} - Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => { - return Ok(Err(CircuitFailed { - circuit_id: stop_command.circuit_id, - src_peer_id: stop_command.src_peer_id, - src_connection_id: stop_command.src_connection_id, - inbound_circuit_req: stop_command.inbound_circuit_req, - status: proto::Status::RESOURCE_LIMIT_EXCEEDED, - error: StreamUpgradeError::Apply(CircuitFailedReason::ResourceLimitExceeded), - })) - } - Some(proto::Status::PERMISSION_DENIED) => { - return Ok(Err(CircuitFailed { - circuit_id: stop_command.circuit_id, - src_peer_id: stop_command.src_peer_id, - src_connection_id: stop_command.src_connection_id, - inbound_circuit_req: stop_command.inbound_circuit_req, - status: proto::Status::PERMISSION_DENIED, - error: StreamUpgradeError::Apply(CircuitFailedReason::PermissionDenied), - })) - } - Some(s) => return Err(FatalUpgradeError::UnexpectedStatus(s)), - None => return Err(FatalUpgradeError::MissingStatusField), + Some(proto::Status::RESOURCE_LIMIT_EXCEEDED) => return Err(Error::ResourceLimitExceeded), + Some(proto::Status::PERMISSION_DENIED) => return Err(Error::PermissionDenied), + Some(s) => return Err(Error::Protocol(ProtocolViolation::UnexpectedStatus(s))), + None => return Err(Error::Protocol(ProtocolViolation::MissingStatusField)), } let FramedParts { @@ -157,58 +145,13 @@ pub(crate) async fn connect( "Expect a flushed Framed to have an empty write buffer." ); - Ok(Ok(Circuit { - circuit_id: stop_command.circuit_id, - src_peer_id: stop_command.src_peer_id, - src_connection_id: stop_command.src_connection_id, - inbound_circuit_req: stop_command.inbound_circuit_req, + Ok(Circuit { dst_stream: io, dst_pending_data: read_buffer.freeze(), - })) + }) } pub(crate) struct Circuit { - pub(crate) circuit_id: CircuitId, - pub(crate) src_peer_id: PeerId, - pub(crate) src_connection_id: ConnectionId, - pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, pub(crate) dst_stream: Stream, pub(crate) dst_pending_data: Bytes, } - -pub(crate) struct CircuitFailed { - pub(crate) circuit_id: CircuitId, - pub(crate) src_peer_id: PeerId, - pub(crate) src_connection_id: ConnectionId, - pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, - pub(crate) status: proto::Status, - pub(crate) error: StreamUpgradeError, -} - -pub(crate) struct PendingConnect { - pub(crate) circuit_id: CircuitId, - pub(crate) inbound_circuit_req: inbound_hop::CircuitReq, - pub(crate) src_peer_id: PeerId, - pub(crate) src_connection_id: ConnectionId, - max_circuit_duration: Duration, - max_circuit_bytes: u64, -} - -impl PendingConnect { - pub(crate) fn new( - circuit_id: CircuitId, - inbound_circuit_req: inbound_hop::CircuitReq, - src_peer_id: PeerId, - src_connection_id: ConnectionId, - config: &Config, - ) -> Self { - Self { - circuit_id, - inbound_circuit_req, - src_peer_id, - src_connection_id, - max_circuit_duration: config.max_circuit_duration, - max_circuit_bytes: config.max_circuit_bytes, - } - } -} From e5efafcc64f27484d871b0707d5d2440e2511a45 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 2 Nov 2023 13:22:32 +1100 Subject: [PATCH 12/33] refactor(plaintext): use `quick-protobuf-codec` Instead of depending on `unsigned-varint`, we can implement the handshake using `quick-protobuf-codec`. This is a lot shorter and allows us to reuse more code. Pull-Request: #4783. --- Cargo.lock | 2 +- transports/plaintext/Cargo.toml | 2 +- transports/plaintext/src/error.rs | 2 +- transports/plaintext/src/handshake.rs | 106 +++++++------------------- transports/plaintext/src/lib.rs | 6 +- 5 files changed, 33 insertions(+), 85 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ef942f48c08..f108d94abcb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2876,9 +2876,9 @@ dependencies = [ "libp2p-identity", "log", "quick-protobuf", + "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", - "unsigned-varint", ] [[package]] diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index 11ec9ba7e57..33c2eeab7ae 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -18,7 +18,7 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true } log = "0.4.20" quick-protobuf = "0.8" -unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } +quick-protobuf-codec = { workspace = true } [dev-dependencies] env_logger = "0.10.0" diff --git a/transports/plaintext/src/error.rs b/transports/plaintext/src/error.rs index a1e4d8660df..7480874a85e 100644 --- a/transports/plaintext/src/error.rs +++ b/transports/plaintext/src/error.rs @@ -41,7 +41,7 @@ pub enum Error { } #[derive(Debug)] -pub struct DecodeError(pub(crate) quick_protobuf::Error); +pub struct DecodeError(pub(crate) quick_protobuf_codec::Error); impl fmt::Display for DecodeError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 05e3b9085a0..51dd5501ea0 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -21,95 +21,46 @@ use crate::error::{DecodeError, Error}; use crate::proto::Exchange; use crate::Config; - use asynchronous_codec::{Framed, FramedParts}; -use bytes::{Bytes, BytesMut}; +use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; use log::{debug, trace}; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; -use unsigned_varint::codec::UviBytes; - -struct HandshakeContext { - config: Config, - state: T, -} - -// HandshakeContext<()> --with_local-> HandshakeContext -struct Local { - // Our local exchange's raw bytes: - exchange_bytes: Vec, -} - -// HandshakeContext --with_remote-> HandshakeContext -pub(crate) struct Remote { - // The remote's peer ID: - pub(crate) peer_id: PeerId, // The remote's public key: - pub(crate) public_key: PublicKey, -} - -impl HandshakeContext { - fn new(config: Config) -> Self { - #[allow(deprecated)] - let exchange = Exchange { - id: Some(config.local_public_key.to_peer_id().to_bytes()), - pubkey: Some(config.local_public_key.encode_protobuf()), - }; - let mut buf = Vec::with_capacity(exchange.get_size()); - let mut writer = Writer::new(&mut buf); - exchange - .write_message(&mut writer) - .expect("Encoding to succeed"); - - Self { - config, - state: Local { - exchange_bytes: buf, - }, - } - } - fn with_remote(self, exchange_bytes: BytesMut) -> Result, Error> { - let mut reader = BytesReader::from_bytes(&exchange_bytes); - let prop = Exchange::from_reader(&mut reader, &exchange_bytes).map_err(DecodeError)?; - - let public_key = PublicKey::try_decode_protobuf(&prop.pubkey.unwrap_or_default())?; - let peer_id = PeerId::from_bytes(&prop.id.unwrap_or_default())?; - - // Check the validity of the remote's `Exchange`. - if peer_id != public_key.to_peer_id() { - return Err(Error::PeerIdMismatch); - } - - Ok(HandshakeContext { - config: self.config, - state: Remote { - peer_id, - public_key, - }, - }) - } -} - -pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, Remote, Bytes), Error> +pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> where S: AsyncRead + AsyncWrite + Send + Unpin, { // The handshake messages all start with a variable-length integer indicating the size. - let mut framed_socket = Framed::new(socket, UviBytes::default()); - - trace!("starting handshake"); - let context = HandshakeContext::new(config); + let mut framed_socket = Framed::new(socket, quick_protobuf_codec::Codec::::new(100)); trace!("sending exchange to remote"); framed_socket - .send(BytesMut::from(&context.state.exchange_bytes[..])) - .await?; + .send(Exchange { + id: Some(config.local_public_key.to_peer_id().to_bytes()), + pubkey: Some(config.local_public_key.encode_protobuf()), + }) + .await + .map_err(DecodeError)?; trace!("receiving the remote's exchange"); - let context = match framed_socket.next().await { - Some(p) => context.with_remote(p?)?, + let public_key = match framed_socket + .next() + .await + .transpose() + .map_err(DecodeError)? + { + Some(remote) => { + let public_key = PublicKey::try_decode_protobuf(&remote.pubkey.unwrap_or_default())?; + let peer_id = PeerId::from_bytes(&remote.id.unwrap_or_default())?; + + if peer_id != public_key.to_peer_id() { + return Err(Error::PeerIdMismatch); + } + + public_key + } None => { debug!("unexpected eof while waiting for remote's exchange"); let err = IoError::new(IoErrorKind::BrokenPipe, "unexpected eof"); @@ -117,10 +68,7 @@ where } }; - trace!( - "received exchange from remote; pubkey = {:?}", - context.state.public_key - ); + trace!("received exchange from remote; pubkey = {:?}", public_key); let FramedParts { io, @@ -129,5 +77,5 @@ where .. } = framed_socket.into_parts(); assert!(write_buffer.is_empty()); - Ok((io, context.state, read_buffer.freeze())) + Ok((io, public_key, read_buffer.freeze())) } diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index fcc56be9477..bdca271a68e 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -102,14 +102,14 @@ impl Config { T: AsyncRead + AsyncWrite + Send + Unpin + 'static, { debug!("Starting plaintext handshake."); - let (socket, remote, read_buffer) = handshake::handshake(socket, self).await?; + let (socket, remote_key, read_buffer) = handshake::handshake(socket, self).await?; debug!("Finished plaintext handshake."); Ok(( - remote.peer_id, + remote_key.to_peer_id(), Output { socket, - remote_key: remote.public_key, + remote_key, read_buffer, }, )) From 96b4c4aa74085407d7844afe274a77c05dc766de Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 2 Nov 2023 13:41:09 +1100 Subject: [PATCH 13/33] chore: enforce that we don't have `dbg!` in our code Pull-Request: #4784. --- Cargo.toml | 1 + transports/noise/src/lib.rs | 2 +- transports/webrtc/tests/smoke.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e7044a185bf..4be55edb103 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ clippy.used_underscore_binding = "warn" clippy.pedantic = "allow" clippy.type_complexity = "allow" clippy.unnecessary_wraps = "warn" +clippy.dbg_macro = "warn" [workspace.metadata.release] pre-release-hook = ["/bin/sh", '-c', '/bin/sh $WORKSPACE_ROOT/scripts/add-changelog-header.sh'] # Nested use of shell to expand variables. diff --git a/transports/noise/src/lib.rs b/transports/noise/src/lib.rs index c17b2adb795..70fae9d7ee6 100644 --- a/transports/noise/src/lib.rs +++ b/transports/noise/src/lib.rs @@ -187,7 +187,7 @@ where handshake::send_identity(&mut state).await?; handshake::recv_identity(&mut state).await?; - let (pk, io) = dbg!(state.finish())?; + let (pk, io) = state.finish()?; Ok((pk.to_peer_id(), io)) } diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index 8e56b99723d..ce94da0aea8 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -344,7 +344,7 @@ impl Future for ListenUpgrade<'_> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { loop { - match dbg!(self.listener.poll_next_unpin(cx)) { + match self.listener.poll_next_unpin(cx) { Poll::Ready(Some(TransportEvent::Incoming { upgrade, send_back_addr, From 6b567e9446ebcddfa3552b9d5e8ec789447c6c07 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 2 Nov 2023 16:08:52 +1100 Subject: [PATCH 14/33] ci: use dynamic PR base Previously, we would always compare the diff to master. This is wrong because not all PRs go into master. Instead, we now use the correct SHA of the PR base. Pull-Request: #4785. --- .github/workflows/ci.yml | 2 +- scripts/ensure-version-bump-and-changelog.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2c69eb65276..4a559c73b8f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -85,7 +85,7 @@ jobs: ./scripts/ensure-version-bump-and-changelog.sh env: HEAD_SHA: ${{ github.event.pull_request.head.sha }} - + PR_BASE: ${{ github.event.pull_request.base.ref }} wasm_tests: name: Run all WASM tests diff --git a/scripts/ensure-version-bump-and-changelog.sh b/scripts/ensure-version-bump-and-changelog.sh index 164af5126eb..1470ec3a5e4 100755 --- a/scripts/ensure-version-bump-and-changelog.sh +++ b/scripts/ensure-version-bump-and-changelog.sh @@ -5,9 +5,9 @@ set -ex; MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") -MERGE_BASE=$(git merge-base "$HEAD_SHA" master) # Find the merge base. This ensures we only diff what was actually added in the PR. +MERGE_BASE=$(git merge-base "$HEAD_SHA" "$PR_BASE") # Find the merge base. This ensures we only diff what was actually added in the PR. -SRC_DIFF_TO_MASTER=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE/src" "$DIR_TO_CRATE/Cargo.toml") +SRC_DIFF_TO_BASE=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE/src" "$DIR_TO_CRATE/Cargo.toml") CHANGELOG_DIFF=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-only -- "$DIR_TO_CRATE/CHANGELOG.md") VERSION_IN_CHANGELOG=$(awk -F' ' '/^## [0-9]+\.[0-9]+\.[0-9]+/{print $2; exit}' "$DIR_TO_CRATE/CHANGELOG.md") @@ -20,7 +20,7 @@ if [[ "$VERSION_IN_CHANGELOG" != "$VERSION_IN_MANIFEST" ]]; then fi # If the source files of this crate weren't touched in this PR, exit early. -if [ -z "$SRC_DIFF_TO_MASTER" ]; then +if [ -z "$SRC_DIFF_TO_BASE" ]; then exit 0; fi From e0674609bd1b6d799d77bc6dad33d1ce85f9aa2c Mon Sep 17 00:00:00 2001 From: contrun Date: Thu, 2 Nov 2023 13:56:43 +0800 Subject: [PATCH 15/33] fix(webrtc): fix building for webrtc browser connectivity example Due to some API changes, the browser-webrtc example no longer builds. This PR will fix that. Pull-Request: #4775. --- .github/workflows/ci.yml | 9 +++++++++ examples/browser-webrtc/src/lib.rs | 27 +++++++++------------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a559c73b8f..6c44e79aad0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -279,6 +279,15 @@ jobs: cargo check --manifest-path "$toml"; done + - uses: taiki-e/cache-cargo-install-action@v1 + with: + tool: wasm-pack@0.12.0 + + - name: Build webrtc-browser example + run: | + cd examples/browser-webrtc + wasm-pack build --target web --out-dir static + semver: runs-on: ubuntu-latest steps: diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index ef0fd1d0b58..062a7978a01 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -3,12 +3,11 @@ use futures::StreamExt; use js_sys::Date; use libp2p::core::Multiaddr; -use libp2p::identity::{Keypair, PeerId}; use libp2p::ping; -use libp2p::swarm::{keep_alive, NetworkBehaviour, SwarmBuilder, SwarmEvent}; -use libp2p::webrtc_websys; -use std::convert::From; +use libp2p::swarm::SwarmEvent; +use libp2p_webrtc_websys as webrtc_websys; use std::io; +use std::time::Duration; use wasm_bindgen::prelude::*; use web_sys::{Document, HtmlElement}; @@ -19,15 +18,13 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { let body = Body::from_current_window()?; body.append_p("Let's ping the WebRTC Server!")?; - let swarm = libp2p::SwarmBuilder::with_new_identity() + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_wasm_bindgen() .with_other_transport(|key| { webrtc_websys::Transport::new(webrtc_websys::Config::new(&key)) })? - .with_behaviour(|_| Behaviour { - ping: ping::Behaviour::new(ping::Config::new()), - keep_alive: keep_alive::Behaviour, - })? + .with_behaviour(|_| ping::Behaviour::new(ping::Config::new()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(5))) .build(); let addr = libp2p_endpoint.parse::()?; @@ -36,16 +33,16 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { loop { match swarm.next().await.unwrap() { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { result: Err(e), .. })) => { + SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { log::error!("Ping failed: {:?}", e); break; } - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { + SwarmEvent::Behaviour(ping::Event { peer, result: Ok(rtt), .. - })) => { + }) => { log::info!("Ping successful: RTT: {rtt:?}, from {peer}"); body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?; } @@ -56,12 +53,6 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { Ok(()) } -#[derive(NetworkBehaviour)] -struct Behaviour { - ping: ping::Behaviour, - keep_alive: keep_alive::Behaviour, -} - /// Convenience wrapper around the current document body struct Body { body: HtmlElement, From be2023a5466e09abf94562c932e5aad76128b92f Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 2 Nov 2023 17:07:36 +1100 Subject: [PATCH 16/33] ci: fetch necessary refs Follow-up to #4785. Whilst comparing against the actual PR base is correct, we need to fetch the ref first before `git` can resolve it. Pull-Request: #4786. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6c44e79aad0..8802871dc9d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -82,6 +82,7 @@ jobs: !contains(github.event.pull_request.labels.*.name, 'internal-change') run: | git fetch origin master:master + git fetch origin ${{ github.event.pull_request.base.ref }}:${{ github.event.pull_request.base.ref }} ./scripts/ensure-version-bump-and-changelog.sh env: HEAD_SHA: ${{ github.event.pull_request.head.sha }} From ac2848866a4bc792e2daae681a62c1518ecc6e3a Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 2 Nov 2023 07:54:23 +0100 Subject: [PATCH 17/33] fix(relay): close stream once done sending Not explicitly closing a stream can lead to stream resets in the happy path once the stream is dropped. Instead, explicitly close the stream once the local node is done sending data. Related: #4747. Pull-Request: #4776. --- protocols/relay/src/protocol/outbound_hop.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index 2a39ec5fd4a..e5f9a6a0a52 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -124,6 +124,8 @@ pub(crate) async fn make_reservation(stream: Stream) -> Result Date: Thu, 2 Nov 2023 18:26:18 +1100 Subject: [PATCH 18/33] refactor(autonat): use `quick-protobuf-codec` Resolves: #4489. Resolves: #2500. Pull-Request: #4787. --- Cargo.lock | 2 + protocols/autonat/Cargo.toml | 2 + protocols/autonat/src/protocol.rs | 100 ++++++++++++++---------------- 3 files changed, 49 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f108d94abcb..b32e561d48c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2430,6 +2430,7 @@ version = "0.12.0" dependencies = [ "async-std", "async-trait", + "asynchronous-codec 0.6.2", "env_logger 0.10.0", "futures", "futures-timer", @@ -2441,6 +2442,7 @@ dependencies = [ "libp2p-swarm-test", "log", "quick-protobuf", + "quick-protobuf-codec", "rand 0.8.5", ] diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index e3b4ed4a120..9acad187586 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -22,6 +22,8 @@ libp2p-identity = { workspace = true } log = "0.4" rand = "0.8" quick-protobuf = "0.8" +quick-protobuf-codec = { workspace = true } +asynchronous-codec = "0.6.2" [dev-dependencies] async-std = { version = "1.10", features = ["attributes"] } diff --git a/protocols/autonat/src/protocol.rs b/protocols/autonat/src/protocol.rs index a63fd8cdf4d..904af6473e2 100644 --- a/protocols/autonat/src/protocol.rs +++ b/protocols/autonat/src/protocol.rs @@ -20,12 +20,13 @@ use crate::proto; use async_trait::async_trait; -use futures::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use libp2p_core::{upgrade, Multiaddr}; +use asynchronous_codec::{FramedRead, FramedWrite}; +use futures::io::{AsyncRead, AsyncWrite}; +use futures::{SinkExt, StreamExt}; +use libp2p_core::Multiaddr; use libp2p_identity::PeerId; use libp2p_request_response::{self as request_response}; use libp2p_swarm::StreamProtocol; -use quick_protobuf::{BytesReader, Writer}; use std::{convert::TryFrom, io}; /// The protocol name used for negotiating with multistream-select. @@ -44,8 +45,12 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncRead + Send + Unpin, { - let bytes = upgrade::read_length_prefixed(io, 1024).await?; - let request = DialRequest::from_bytes(&bytes)?; + let message = FramedRead::new(io, codec()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + let request = DialRequest::from_proto(message)?; + Ok(request) } @@ -57,8 +62,12 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncRead + Send + Unpin, { - let bytes = upgrade::read_length_prefixed(io, 1024).await?; - let response = DialResponse::from_bytes(&bytes)?; + let message = FramedRead::new(io, codec()) + .next() + .await + .ok_or(io::ErrorKind::UnexpectedEof)??; + let response = DialResponse::from_proto(message)?; + Ok(response) } @@ -71,8 +80,11 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncWrite + Send + Unpin, { - upgrade::write_length_prefixed(io, data.into_bytes()).await?; - io.close().await + let mut framed = FramedWrite::new(io, codec()); + framed.send(data.into_proto()).await?; + framed.close().await?; + + Ok(()) } async fn write_response( @@ -84,11 +96,18 @@ impl request_response::Codec for AutoNatCodec { where T: AsyncWrite + Send + Unpin, { - upgrade::write_length_prefixed(io, data.into_bytes()).await?; - io.close().await + let mut framed = FramedWrite::new(io, codec()); + framed.send(data.into_proto()).await?; + framed.close().await?; + + Ok(()) } } +fn codec() -> quick_protobuf_codec::Codec { + quick_protobuf_codec::Codec::::new(1024) +} + #[derive(Clone, Debug, Eq, PartialEq)] pub struct DialRequest { pub peer_id: PeerId, @@ -96,12 +115,7 @@ pub struct DialRequest { } impl DialRequest { - pub fn from_bytes(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - let msg = proto::Message::from_reader(&mut reader, bytes) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + pub fn from_proto(msg: proto::Message) -> Result { if msg.type_pb != Some(proto::MessageType::DIAL) { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid type")); } @@ -143,9 +157,7 @@ impl DialRequest { }) } - pub fn into_bytes(self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn into_proto(self) -> proto::Message { let peer_id = self.peer_id.to_bytes(); let addrs = self .addresses @@ -153,7 +165,7 @@ impl DialRequest { .map(|addr| addr.to_vec()) .collect(); - let msg = proto::Message { + proto::Message { type_pb: Some(proto::MessageType::DIAL), dial: Some(proto::Dial { peer: Some(proto::PeerInfo { @@ -162,12 +174,7 @@ impl DialRequest { }), }), dialResponse: None, - }; - - let mut buf = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut buf); - msg.write_message(&mut writer).expect("Encoding to succeed"); - buf + } } } @@ -217,12 +224,7 @@ pub struct DialResponse { } impl DialResponse { - pub fn from_bytes(bytes: &[u8]) -> Result { - use quick_protobuf::MessageRead; - - let mut reader = BytesReader::from_bytes(bytes); - let msg = proto::Message::from_reader(&mut reader, bytes) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + pub fn from_proto(msg: proto::Message) -> Result { if msg.type_pb != Some(proto::MessageType::DIAL_RESPONSE) { return Err(io::Error::new(io::ErrorKind::InvalidData, "invalid type")); } @@ -258,9 +260,7 @@ impl DialResponse { }) } - pub fn into_bytes(self) -> Vec { - use quick_protobuf::MessageWrite; - + pub fn into_proto(self) -> proto::Message { let dial_response = match self.result { Ok(addr) => proto::DialResponse { status: Some(proto::ResponseStatus::OK), @@ -274,23 +274,17 @@ impl DialResponse { }, }; - let msg = proto::Message { + proto::Message { type_pb: Some(proto::MessageType::DIAL_RESPONSE), dial: None, dialResponse: Some(dial_response), - }; - - let mut buf = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut buf); - msg.write_message(&mut writer).expect("Encoding to succeed"); - buf + } } } #[cfg(test)] mod tests { use super::*; - use quick_protobuf::MessageWrite; #[test] fn test_request_encode_decode() { @@ -301,8 +295,8 @@ mod tests { "/ip4/192.168.1.42/tcp/30333".parse().unwrap(), ], }; - let bytes = request.clone().into_bytes(); - let request2 = DialRequest::from_bytes(&bytes).unwrap(); + let proto = request.clone().into_proto(); + let request2 = DialRequest::from_proto(proto).unwrap(); assert_eq!(request, request2); } @@ -312,8 +306,8 @@ mod tests { result: Ok("/ip4/8.8.8.8/tcp/30333".parse().unwrap()), status_text: None, }; - let bytes = response.clone().into_bytes(); - let response2 = DialResponse::from_bytes(&bytes).unwrap(); + let proto = response.clone().into_proto(); + let response2 = DialResponse::from_proto(proto).unwrap(); assert_eq!(response, response2); } @@ -323,8 +317,8 @@ mod tests { result: Err(ResponseError::DialError), status_text: Some("dial failed".to_string()), }; - let bytes = response.clone().into_bytes(); - let response2 = DialResponse::from_bytes(&bytes).unwrap(); + let proto = response.clone().into_proto(); + let response2 = DialResponse::from_proto(proto).unwrap(); assert_eq!(response, response2); } @@ -350,11 +344,7 @@ mod tests { dialResponse: None, }; - let mut bytes = Vec::with_capacity(msg.get_size()); - let mut writer = Writer::new(&mut bytes); - msg.write_message(&mut writer).expect("Encoding to succeed"); - - let request = DialRequest::from_bytes(&bytes).expect("not to fail"); + let request = DialRequest::from_proto(msg).expect("not to fail"); assert_eq!(request.addresses, vec![valid_multiaddr]) } From fab920500ddee04be65a4108a1abdb2e91a30b1b Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 2 Nov 2023 20:46:40 +1100 Subject: [PATCH 19/33] feat(core): remove `upgrade::transfer` module As described in #4011, these utility functions don't belong in `libp2p-core`. Users can use `quick-protobuf-codec` if they need to write varint-prefixed protobuf messages. For writing varint-prefixed bytes, the `unsigned-varint` crate offers a various codec implementations. `libp2p-core` is the base dependency of all other crates. Thus, we should only expose items there that are actually needed by all other crates. For implementation details like how bytes are written, downstream crates (including users) should reach for other crates. Depends-On: #4787. Resolves: #4011. Pull-Request: #4788. --- core/CHANGELOG.md | 3 + core/src/upgrade.rs | 7 +- core/src/upgrade/transfer.rs | 220 ----------------------------------- 3 files changed, 4 insertions(+), 226 deletions(-) delete mode 100644 core/src/upgrade/transfer.rs diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 2eec5d3b054..da6f81c139e 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -4,6 +4,9 @@ See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). - Remove deprecated functions from `ListenerId`. See [PR 4736](https://github.com/libp2p/rust-libp2p/pull/4736). +- Remove `upgrade::transfer` module. + See [issue 4011](https://github.com/libp2p/rust-libp2p/issues/4011) for details. + See [PR 4788](https://github.com/libp2p/rust-libp2p/pull/4788). ## 0.40.1 diff --git a/core/src/upgrade.rs b/core/src/upgrade.rs index 777443822b7..69561fbebd8 100644 --- a/core/src/upgrade.rs +++ b/core/src/upgrade.rs @@ -64,7 +64,6 @@ mod error; mod pending; mod ready; mod select; -mod transfer; pub(crate) use apply::{ apply, apply_inbound, apply_outbound, InboundUpgradeApply, OutboundUpgradeApply, @@ -73,11 +72,7 @@ pub(crate) use error::UpgradeError; use futures::future::Future; pub use self::{ - denied::DeniedUpgrade, - pending::PendingUpgrade, - ready::ReadyUpgrade, - select::SelectUpgrade, - transfer::{read_length_prefixed, read_varint, write_length_prefixed, write_varint}, + denied::DeniedUpgrade, pending::PendingUpgrade, ready::ReadyUpgrade, select::SelectUpgrade, }; pub use crate::Negotiated; pub use multistream_select::{NegotiatedComplete, NegotiationError, ProtocolError, Version}; diff --git a/core/src/upgrade/transfer.rs b/core/src/upgrade/transfer.rs deleted file mode 100644 index 93aeb987c8a..00000000000 --- a/core/src/upgrade/transfer.rs +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// -// Permission is hereby granted, free of charge, to any person obtaining a -// copy of this software and associated documentation files (the "Software"), -// to deal in the Software without restriction, including without limitation -// the rights to use, copy, modify, merge, publish, distribute, sublicense, -// and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -// DEALINGS IN THE SOFTWARE. - -//! Contains some helper futures for creating upgrades. - -use futures::prelude::*; -use std::io; - -// TODO: these methods could be on an Ext trait to AsyncWrite - -/// Writes a message to the given socket with a length prefix appended to it. Also flushes the socket. -/// -/// > **Note**: Prepends a variable-length prefix indicate the length of the message. This is -/// > compatible with what [`read_length_prefixed`] expects. -pub async fn write_length_prefixed( - socket: &mut (impl AsyncWrite + Unpin), - data: impl AsRef<[u8]>, -) -> Result<(), io::Error> { - write_varint(socket, data.as_ref().len()).await?; - socket.write_all(data.as_ref()).await?; - socket.flush().await?; - - Ok(()) -} - -/// Writes a variable-length integer to the `socket`. -/// -/// > **Note**: Does **NOT** flush the socket. -pub async fn write_varint( - socket: &mut (impl AsyncWrite + Unpin), - len: usize, -) -> Result<(), io::Error> { - let mut len_data = unsigned_varint::encode::usize_buffer(); - let encoded_len = unsigned_varint::encode::usize(len, &mut len_data).len(); - socket.write_all(&len_data[..encoded_len]).await?; - - Ok(()) -} - -/// Reads a variable-length integer from the `socket`. -/// -/// As a special exception, if the `socket` is empty and EOFs right at the beginning, then we -/// return `Ok(0)`. -/// -/// > **Note**: This function reads bytes one by one from the `socket`. It is therefore encouraged -/// > to use some sort of buffering mechanism. -pub async fn read_varint(socket: &mut (impl AsyncRead + Unpin)) -> Result { - let mut buffer = unsigned_varint::encode::usize_buffer(); - let mut buffer_len = 0; - - loop { - match socket.read(&mut buffer[buffer_len..buffer_len + 1]).await? { - 0 => { - // Reaching EOF before finishing to read the length is an error, unless the EOF is - // at the very beginning of the substream, in which case we assume that the data is - // empty. - if buffer_len == 0 { - return Ok(0); - } else { - return Err(io::ErrorKind::UnexpectedEof.into()); - } - } - n => debug_assert_eq!(n, 1), - } - - buffer_len += 1; - - match unsigned_varint::decode::usize(&buffer[..buffer_len]) { - Ok((len, _)) => return Ok(len), - Err(unsigned_varint::decode::Error::Overflow) => { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - "overflow in variable-length integer", - )); - } - // TODO: why do we have a `__Nonexhaustive` variant in the error? I don't know how to process it - // Err(unsigned_varint::decode::Error::Insufficient) => {} - Err(_) => {} - } - } -} - -/// Reads a length-prefixed message from the given socket. -/// -/// The `max_size` parameter is the maximum size in bytes of the message that we accept. This is -/// necessary in order to avoid DoS attacks where the remote sends us a message of several -/// gigabytes. -/// -/// > **Note**: Assumes that a variable-length prefix indicates the length of the message. This is -/// > compatible with what [`write_length_prefixed`] does. -pub async fn read_length_prefixed( - socket: &mut (impl AsyncRead + Unpin), - max_size: usize, -) -> io::Result> { - let len = read_varint(socket).await?; - if len > max_size { - return Err(io::Error::new( - io::ErrorKind::InvalidData, - format!("Received data size ({len} bytes) exceeds maximum ({max_size} bytes)"), - )); - } - - let mut buf = vec![0; len]; - socket.read_exact(&mut buf).await?; - - Ok(buf) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn write_length_prefixed_works() { - let data = (0..rand::random::() % 10_000) - .map(|_| rand::random::()) - .collect::>(); - let mut out = vec![0; 10_000]; - - futures::executor::block_on(async { - let mut socket = futures::io::Cursor::new(&mut out[..]); - - write_length_prefixed(&mut socket, &data).await.unwrap(); - socket.close().await.unwrap(); - }); - - let (out_len, out_data) = unsigned_varint::decode::usize(&out).unwrap(); - assert_eq!(out_len, data.len()); - assert_eq!(&out_data[..out_len], &data[..]); - } - - // TODO: rewrite these tests - /* - #[test] - fn read_one_works() { - let original_data = (0..rand::random::() % 10_000) - .map(|_| rand::random::()) - .collect::>(); - - let mut len_buf = unsigned_varint::encode::usize_buffer(); - let len_buf = unsigned_varint::encode::usize(original_data.len(), &mut len_buf); - - let mut in_buffer = len_buf.to_vec(); - in_buffer.extend_from_slice(&original_data); - - let future = read_one_then(Cursor::new(in_buffer), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert_eq!(out, original_data); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_one_zero_len() { - let future = read_one_then(Cursor::new(vec![0]), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert!(out.is_empty()); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_checks_length() { - let mut len_buf = unsigned_varint::encode::u64_buffer(); - let len_buf = unsigned_varint::encode::u64(5_000, &mut len_buf); - - let mut in_buffer = len_buf.to_vec(); - in_buffer.extend((0..5000).map(|_| 0)); - - let future = read_one_then(Cursor::new(in_buffer), 100, (), move |_, ()| -> Result<_, ReadOneError> { - Ok(()) - }); - - match futures::executor::block_on(future) { - Err(ReadOneError::TooLarge { .. }) => (), - _ => panic!(), - } - } - - #[test] - fn read_one_accepts_empty() { - let future = read_one_then(Cursor::new([]), 10_000, (), move |out, ()| -> Result<_, ReadOneError> { - assert!(out.is_empty()); - Ok(()) - }); - - futures::executor::block_on(future).unwrap(); - } - - #[test] - fn read_one_eof_before_len() { - let future = read_one_then(Cursor::new([0x80]), 10_000, (), move |_, ()| -> Result<(), ReadOneError> { - unreachable!() - }); - - match futures::executor::block_on(future) { - Err(ReadOneError::Io(ref err)) if err.kind() == io::ErrorKind::UnexpectedEof => (), - _ => panic!() - } - }*/ -} From e2e9179fd787800ead9ea7ca4450413025cf498a Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Thu, 2 Nov 2023 17:42:27 +0200 Subject: [PATCH 20/33] feat: introduce tracing We replace `log` with `tracing` across the codebase. Where possible, we make use of structured logging now instead of templating strings. `tracing` offers the ability to also record "span"s. A span lasts until its dropped and describes the entire duration that it is active for. All logs (in `tracing` term "events") are hierarchically embedded in all parent-spans). We introduce several spans: - On debug level: One for `new_outgoing_connection`, `new_incoming_connection` and `new_established_connection` - On debug level: `Connection::poll`, `Swarm::poll` and `Pool::poll` - On trace level: `NetworkBehaviour::poll` for each implementation of `NetworkBehaviour` - On trace level: `ConnectionHandler::poll` for each implementation of (protocol) `ConnectionHandler`s The idea here is that logging on debug level gives you a decent overview of what the system is doing. You get spans for the duration of connections and how often each connection gets polled. Dropping down to trace level gives you an extremely detailed view of how long each individual `ConnectionHandler` was executed as part of `poll` which could be used for detailed analysis on how busy certain handlers are. Most importantly, simply logging on `info` does not give you any spans. We consider `info` to be a good default that should be reasonably quiet. Resolves #1533. Pull-Request: #4282. --- Cargo.lock | 374 +++++++++++--- core/Cargo.toml | 2 +- core/src/transport/choice.rs | 49 +- core/src/transport/global_only.rs | 13 +- core/src/upgrade/apply.rs | 9 +- examples/autonat/Cargo.toml | 3 +- examples/autonat/src/bin/autonat_client.rs | 5 +- examples/autonat/src/bin/autonat_server.rs | 5 +- examples/browser-webrtc/Cargo.toml | 4 +- examples/browser-webrtc/src/lib.rs | 8 +- examples/browser-webrtc/src/main.rs | 19 +- examples/chat/Cargo.toml | 3 +- examples/chat/src/main.rs | 5 + examples/dcutr/Cargo.toml | 3 +- examples/dcutr/src/main.rs | 26 +- .../distributed-key-value-store/Cargo.toml | 3 +- .../distributed-key-value-store/src/main.rs | 5 +- examples/file-sharing/Cargo.toml | 3 +- examples/file-sharing/src/main.rs | 5 +- examples/identify/Cargo.toml | 3 +- examples/identify/src/main.rs | 5 +- examples/ipfs-kad/Cargo.toml | 2 + examples/ipfs-kad/src/main.rs | 5 +- examples/ipfs-private/Cargo.toml | 3 +- examples/ipfs-private/src/main.rs | 5 +- examples/metrics/Cargo.toml | 12 +- examples/metrics/README.md | 41 +- examples/metrics/docker-compose.yml | 23 + examples/metrics/otel-collector-config.yaml | 25 + examples/metrics/src/http_service.rs | 21 +- examples/metrics/src/main.rs | 74 ++- examples/ping/Cargo.toml | 3 +- examples/ping/src/main.rs | 5 + examples/relay-server/Cargo.toml | 5 +- examples/relay-server/src/main.rs | 5 +- examples/rendezvous/Cargo.toml | 4 +- examples/rendezvous/src/bin/rzv-discover.rs | 13 +- examples/rendezvous/src/bin/rzv-identify.rs | 19 +- examples/rendezvous/src/bin/rzv-register.rs | 21 +- examples/rendezvous/src/main.rs | 15 +- examples/upnp/Cargo.toml | 1 + examples/upnp/src/main.rs | 5 + hole-punching-tests/Cargo.toml | 2 +- hole-punching-tests/src/main.rs | 14 +- identity/Cargo.toml | 2 +- identity/src/keypair.rs | 8 +- interop-tests/Cargo.toml | 8 +- interop-tests/src/arch.rs | 8 +- interop-tests/src/bin/wasm_ping.rs | 12 +- interop-tests/src/lib.rs | 16 +- libp2p/Cargo.toml | 3 +- libp2p/src/tutorials/ping.rs | 27 +- misc/memory-connection-limits/Cargo.toml | 2 +- misc/memory-connection-limits/src/lib.rs | 2 +- misc/multistream-select/Cargo.toml | 4 +- misc/multistream-select/src/dialer_select.rs | 27 +- .../src/length_delimited.rs | 2 +- .../multistream-select/src/listener_select.rs | 16 +- misc/multistream-select/src/negotiated.rs | 4 +- misc/multistream-select/src/protocol.rs | 2 +- misc/server/Cargo.toml | 4 +- misc/server/src/http_service.rs | 7 +- misc/server/src/main.rs | 28 +- misc/webrtc-utils/Cargo.toml | 4 +- misc/webrtc-utils/src/sdp.rs | 2 +- misc/webrtc-utils/src/stream/drop_listener.rs | 2 +- muxers/mplex/Cargo.toml | 4 +- muxers/mplex/benches/split_send_size.rs | 5 +- muxers/mplex/src/io.rs | 233 +++++---- muxers/test-harness/Cargo.toml | 2 +- muxers/test-harness/src/lib.rs | 8 +- muxers/yamux/Cargo.toml | 2 +- muxers/yamux/src/lib.rs | 9 +- protocols/autonat/Cargo.toml | 6 +- protocols/autonat/src/behaviour.rs | 1 + protocols/autonat/src/behaviour/as_client.rs | 22 +- protocols/autonat/src/behaviour/as_server.rs | 48 +- protocols/autonat/src/protocol.rs | 8 +- protocols/dcutr/Cargo.toml | 4 +- protocols/dcutr/src/behaviour.rs | 8 +- protocols/dcutr/src/handler/relayed.rs | 5 +- protocols/dcutr/src/protocol/inbound.rs | 4 +- protocols/dcutr/src/protocol/outbound.rs | 4 +- protocols/dcutr/tests/lib.rs | 5 +- protocols/floodsub/Cargo.toml | 2 +- protocols/floodsub/src/layer.rs | 7 +- protocols/gossipsub/Cargo.toml | 4 +- protocols/gossipsub/src/behaviour.rs | 478 +++++++++--------- protocols/gossipsub/src/behaviour/tests.rs | 5 +- protocols/gossipsub/src/gossip_promises.rs | 8 +- protocols/gossipsub/src/handler.rs | 29 +- protocols/gossipsub/src/mcache.rs | 11 +- protocols/gossipsub/src/peer_score.rs | 70 +-- protocols/gossipsub/src/protocol.rs | 37 +- .../gossipsub/src/subscription_filter.rs | 3 +- protocols/gossipsub/tests/smoke.rs | 9 +- protocols/identify/Cargo.toml | 4 +- protocols/identify/src/behaviour.rs | 10 +- protocols/identify/src/handler.rs | 27 +- protocols/identify/src/protocol.rs | 15 +- protocols/identify/tests/smoke.rs | 21 +- protocols/kad/Cargo.toml | 4 +- protocols/kad/src/behaviour.rs | 119 +++-- protocols/kad/src/behaviour/test.rs | 4 +- protocols/kad/src/handler.rs | 75 ++- protocols/kad/src/protocol.rs | 2 +- protocols/kad/tests/client_mode.rs | 17 +- protocols/mdns/Cargo.toml | 4 +- protocols/mdns/src/behaviour.rs | 13 +- protocols/mdns/src/behaviour/iface.rs | 40 +- protocols/mdns/src/behaviour/iface/dns.rs | 2 +- protocols/mdns/tests/use-async-std.rs | 17 +- protocols/mdns/tests/use-tokio.rs | 13 +- protocols/perf/Cargo.toml | 6 +- protocols/perf/src/bin/perf.rs | 24 +- protocols/perf/src/client/behaviour.rs | 1 + protocols/perf/src/client/handler.rs | 1 + protocols/perf/src/server/behaviour.rs | 1 + protocols/perf/src/server/handler.rs | 5 +- protocols/perf/tests/lib.rs | 5 +- protocols/ping/Cargo.toml | 4 +- protocols/ping/src/handler.rs | 16 +- protocols/ping/src/lib.rs | 9 +- protocols/relay/Cargo.toml | 7 +- protocols/relay/src/behaviour.rs | 6 +- protocols/relay/src/behaviour/handler.rs | 13 +- protocols/relay/src/priv_client/handler.rs | 35 +- protocols/relay/src/protocol/inbound_hop.rs | 2 +- protocols/relay/tests/lib.rs | 29 +- protocols/rendezvous/Cargo.toml | 8 +- protocols/rendezvous/src/client.rs | 2 +- protocols/rendezvous/src/server.rs | 6 +- protocols/rendezvous/tests/rendezvous.rs | 37 +- protocols/request-response/Cargo.toml | 4 +- protocols/request-response/src/handler.rs | 7 +- protocols/request-response/src/lib.rs | 8 +- .../request-response/tests/error_reporting.rs | 25 +- protocols/request-response/tests/ping.rs | 5 +- protocols/upnp/Cargo.toml | 4 +- protocols/upnp/src/behaviour.rs | 108 ++-- swarm-test/Cargo.toml | 2 +- swarm-test/src/lib.rs | 19 +- swarm/Cargo.toml | 10 +- swarm/src/behaviour/external_addresses.rs | 8 +- swarm/src/connection.rs | 26 +- swarm/src/connection/pool.rs | 60 ++- swarm/src/handler/multi.rs | 8 +- swarm/src/lib.rs | 81 ++- transports/dns/Cargo.toml | 10 +- transports/dns/src/lib.rs | 26 +- transports/noise/Cargo.toml | 4 +- transports/noise/src/io.rs | 11 +- transports/noise/src/io/framed.rs | 8 +- transports/noise/tests/smoke.rs | 8 +- transports/plaintext/Cargo.toml | 4 +- transports/plaintext/src/handshake.rs | 9 +- transports/plaintext/src/lib.rs | 5 +- transports/plaintext/tests/smoke.rs | 12 +- transports/pnet/Cargo.toml | 2 +- transports/pnet/src/crypt_writer.rs | 3 +- transports/pnet/src/lib.rs | 9 +- transports/quic/Cargo.toml | 4 +- transports/quic/src/hole_punching.rs | 2 +- transports/quic/src/transport.rs | 19 +- transports/quic/tests/smoke.rs | 52 +- transports/tcp/Cargo.toml | 4 +- transports/tcp/src/lib.rs | 49 +- transports/uds/Cargo.toml | 2 +- transports/uds/src/lib.rs | 7 +- transports/webrtc-websys/Cargo.toml | 2 +- transports/webrtc-websys/src/connection.rs | 16 +- transports/webrtc-websys/src/sdp.rs | 2 +- .../src/stream/poll_data_channel.rs | 8 +- transports/webrtc-websys/src/upgrade.rs | 6 +- transports/webrtc/Cargo.toml | 5 +- transports/webrtc/src/tokio/connection.rs | 32 +- transports/webrtc/src/tokio/sdp.rs | 2 +- transports/webrtc/src/tokio/transport.rs | 2 +- transports/webrtc/src/tokio/udp_mux.rs | 36 +- transports/webrtc/src/tokio/upgrade.rs | 15 +- transports/webrtc/tests/smoke.rs | 17 +- transports/websocket-websys/Cargo.toml | 2 +- transports/websocket-websys/src/lib.rs | 2 +- transports/websocket/Cargo.toml | 2 +- transports/websocket/src/framed.rs | 52 +- transports/webtransport-websys/Cargo.toml | 2 +- .../webtransport-websys/src/transport.rs | 2 +- 187 files changed, 2158 insertions(+), 1382 deletions(-) create mode 100644 examples/metrics/docker-compose.yml create mode 100644 examples/metrics/otel-collector-config.yaml diff --git a/Cargo.lock b/Cargo.lock index b32e561d48c..98d0a971eff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -486,10 +486,11 @@ name = "autonat-example" version = "0.1.0" dependencies = [ "clap", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -679,13 +680,11 @@ version = "0.1.0" dependencies = [ "anyhow", "axum", - "env_logger 0.10.0", "futures", "js-sys", "libp2p", "libp2p-webrtc", "libp2p-webrtc-websys", - "log", "mime_guess", "rand 0.8.5", "rust-embed", @@ -693,6 +692,8 @@ dependencies = [ "tokio-util", "tower", "tower-http", + "tracing", + "tracing-subscriber", "wasm-bindgen", "wasm-bindgen-futures", "wasm-logger", @@ -820,10 +821,11 @@ name = "chat-example" version = "0.1.0" dependencies = [ "async-trait", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1223,12 +1225,13 @@ name = "dcutr-example" version = "0.1.0" dependencies = [ "clap", - "env_logger 0.10.0", "futures", "futures-timer", "libp2p", "log", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1314,9 +1317,10 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -1527,10 +1531,11 @@ dependencies = [ "async-std", "clap", "either", - "env_logger 0.10.0", "futures", "libp2p", "serde", + "tracing", + "tracing-subscriber", "void", ] @@ -1943,11 +1948,11 @@ dependencies = [ "env_logger 0.10.0", "futures", "libp2p", - "log", "redis", "serde", "serde_json", "tokio", + "tracing", ] [[package]] @@ -2046,6 +2051,18 @@ dependencies = [ "tokio-rustls 0.23.4", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2065,9 +2082,10 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2198,7 +2216,6 @@ dependencies = [ "axum", "console_error_panic_hook", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2208,7 +2225,6 @@ dependencies = [ "libp2p-tls", "libp2p-webrtc", "libp2p-webrtc-websys", - "log", "mime_guess", "rand 0.8.5", "redis", @@ -2260,6 +2276,8 @@ dependencies = [ "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2268,10 +2286,11 @@ version = "0.1.0" dependencies = [ "async-trait", "either", - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2367,7 +2386,6 @@ dependencies = [ "bytes", "clap", "either", - "env_logger 0.10.0", "futures", "futures-timer", "getrandom 0.2.10", @@ -2409,6 +2427,7 @@ dependencies = [ "rw-stream-sink", "thiserror", "tokio", + "tracing-subscriber", ] [[package]] @@ -2431,7 +2450,6 @@ dependencies = [ "async-std", "async-trait", "asynchronous-codec 0.6.2", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2440,10 +2458,11 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-test", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2476,7 +2495,6 @@ dependencies = [ "libp2p-identity", "libp2p-mplex", "libp2p-noise", - "log", "multiaddr", "multihash", "multistream-select", @@ -2490,6 +2508,7 @@ dependencies = [ "serde", "smallvec", "thiserror", + "tracing", "unsigned-varint", "void", ] @@ -2502,7 +2521,6 @@ dependencies = [ "asynchronous-codec 0.6.2", "clap", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2519,12 +2537,13 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "lru 0.11.1", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", + "tracing", + "tracing-subscriber", "void", ] @@ -2535,14 +2554,14 @@ dependencies = [ "async-std", "async-std-resolver", "async-trait", - "env_logger 0.10.0", "futures", "libp2p-core", "libp2p-identity", - "log", "parking_lot", "smallvec", "tokio", + "tracing", + "tracing-subscriber", "trust-dns-resolver", ] @@ -2558,12 +2577,12 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "smallvec", "thiserror", + "tracing", ] [[package]] @@ -2576,7 +2595,6 @@ dependencies = [ "byteorder", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-ticker", @@ -2590,7 +2608,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "prometheus-client", "quick-protobuf", "quick-protobuf-codec", @@ -2600,6 +2617,8 @@ dependencies = [ "serde", "sha2 0.10.8", "smallvec", + "tracing", + "tracing-subscriber", "unsigned-varint", "void", ] @@ -2611,7 +2630,6 @@ dependencies = [ "async-std", "asynchronous-codec 0.6.2", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2619,12 +2637,13 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "lru 0.12.0", "quick-protobuf", "quick-protobuf-codec", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "void", ] @@ -2640,7 +2659,6 @@ dependencies = [ "hex-literal", "hkdf", "libsecp256k1", - "log", "multihash", "p256", "quick-protobuf", @@ -2653,6 +2671,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "thiserror", + "tracing", "void", "zeroize", ] @@ -2666,7 +2685,6 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-timer", @@ -2678,7 +2696,6 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", @@ -2687,6 +2704,8 @@ dependencies = [ "sha2 0.10.8", "smallvec", "thiserror", + "tracing", + "tracing-subscriber", "uint", "unsigned-varint", "void", @@ -2699,7 +2718,6 @@ dependencies = [ "async-io", "async-std", "data-encoding", - "env_logger 0.10.0", "futures", "if-watch", "libp2p-core", @@ -2709,11 +2727,12 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", "smallvec", "socket2 0.5.5", "tokio", + "tracing", + "tracing-subscriber", "trust-dns-proto", "void", ] @@ -2729,10 +2748,10 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-derive", "libp2p-swarm-test", - "log", "memory-stats", "rand 0.8.5", "sysinfo", + "tracing", "void", ] @@ -2761,19 +2780,19 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "criterion", - "env_logger 0.10.0", "futures", "libp2p-core", "libp2p-identity", "libp2p-muxer-test-harness", "libp2p-plaintext", "libp2p-tcp", - "log", "nohash-hasher", "parking_lot", "quickcheck-ext", "rand 0.8.5", "smallvec", + "tracing", + "tracing-subscriber", "unsigned-varint", ] @@ -2785,7 +2804,7 @@ dependencies = [ "futures-timer", "futures_ringbuf", "libp2p-core", - "log", + "tracing", ] [[package]] @@ -2795,12 +2814,10 @@ dependencies = [ "asynchronous-codec 0.7.0", "bytes", "curve25519-dalek", - "env_logger 0.10.0", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "multiaddr", "multihash", "once_cell", @@ -2811,6 +2828,8 @@ dependencies = [ "snow", "static_assertions", "thiserror", + "tracing", + "tracing-subscriber", "x25519-dalek", "zeroize", ] @@ -2821,7 +2840,6 @@ version = "0.3.0" dependencies = [ "anyhow", "clap", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2836,12 +2854,13 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", "rand 0.8.5", "serde", "serde_json", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] @@ -2851,7 +2870,6 @@ version = "0.44.0" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2859,9 +2877,10 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "libp2p-swarm-test", - "log", "quickcheck-ext", "rand 0.8.5", + "tracing", + "tracing-subscriber", "void", ] @@ -2871,16 +2890,16 @@ version = "0.41.0" dependencies = [ "asynchronous-codec 0.6.2", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", "libp2p-core", "libp2p-identity", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2895,13 +2914,13 @@ dependencies = [ "libp2p-tcp", "libp2p-websocket", "libp2p-yamux", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "salsa20", "sha3", "tokio", + "tracing", ] [[package]] @@ -2910,7 +2929,6 @@ version = "0.10.0" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", @@ -2921,7 +2939,6 @@ dependencies = [ "libp2p-tcp", "libp2p-tls", "libp2p-yamux", - "log", "parking_lot", "quickcheck", "quinn", @@ -2931,6 +2948,8 @@ dependencies = [ "socket2 0.5.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2940,7 +2959,6 @@ dependencies = [ "asynchronous-codec 0.6.2", "bytes", "either", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -2952,13 +2970,14 @@ dependencies = [ "libp2p-swarm", "libp2p-swarm-test", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "quickcheck-ext", "rand 0.8.5", "static_assertions", "thiserror", + "tracing", + "tracing-subscriber", "void", ] @@ -2969,7 +2988,6 @@ dependencies = [ "async-trait", "asynchronous-codec 0.6.2", "bimap", - "env_logger 0.10.0", "futures", "futures-timer", "instant", @@ -2983,12 +3001,13 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", "thiserror", "tokio", + "tracing", + "tracing-subscriber", "void", ] @@ -3000,7 +3019,6 @@ dependencies = [ "async-std", "async-trait", "cbor4ii", - "env_logger 0.10.0", "futures", "futures-bounded", "futures-timer", @@ -3013,11 +3031,12 @@ dependencies = [ "libp2p-swarm-test", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", "serde", "serde_json", "smallvec", + "tracing", + "tracing-subscriber", "void", ] @@ -3027,17 +3046,17 @@ version = "0.12.3" dependencies = [ "base64 0.21.5", "clap", - "env_logger 0.10.0", "futures", "futures-timer", "hyper", "libp2p", - "log", "prometheus-client", "serde", "serde_derive", "serde_json", "tokio", + "tracing", + "tracing-subscriber", "zeroize", ] @@ -3047,7 +3066,6 @@ version = "0.44.0" dependencies = [ "async-std", "either", - "env_logger 0.10.0", "fnv", "futures", "futures-timer", @@ -3062,13 +3080,14 @@ dependencies = [ "libp2p-swarm-derive", "libp2p-swarm-test", "libp2p-yamux", - "log", "multistream-select", "once_cell", "quickcheck-ext", "rand 0.8.5", "smallvec", "tokio", + "tracing", + "tracing-subscriber", "trybuild", "void", "wasm-bindgen-futures", @@ -3097,8 +3116,8 @@ dependencies = [ "libp2p-swarm", "libp2p-tcp", "libp2p-yamux", - "log", "rand 0.8.5", + "tracing", ] [[package]] @@ -3107,16 +3126,16 @@ version = "0.41.0" dependencies = [ "async-io", "async-std", - "env_logger 0.10.0", "futures", "futures-timer", "if-watch", "libc", "libp2p-core", "libp2p-identity", - "log", "socket2 0.5.5", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -3148,9 +3167,9 @@ dependencies = [ "async-std", "futures", "libp2p-core", - "log", "tempfile", "tokio", + "tracing", ] [[package]] @@ -3162,8 +3181,8 @@ dependencies = [ "igd-next", "libp2p-core", "libp2p-swarm", - "log", "tokio", + "tracing", "void", ] @@ -3173,7 +3192,6 @@ version = "0.6.1-alpha" dependencies = [ "async-trait", "bytes", - "env_logger 0.10.0", "futures", "futures-timer", "hex", @@ -3182,7 +3200,6 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "libp2p-webrtc-utils", - "log", "multihash", "quickcheck", "rand 0.8.5", @@ -3193,6 +3210,8 @@ dependencies = [ "tinytemplate", "tokio", "tokio-util", + "tracing", + "tracing-subscriber", "webrtc", ] @@ -3208,7 +3227,6 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-noise", - "log", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -3216,6 +3234,7 @@ dependencies = [ "sha2 0.10.8", "thiserror", "tinytemplate", + "tracing", "unsigned-varint", ] @@ -3236,10 +3255,10 @@ dependencies = [ "libp2p-ping", "libp2p-swarm", "libp2p-webrtc-utils", - "log", "send_wrapper 0.6.0", "serde", "thiserror", + "tracing", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3257,12 +3276,12 @@ dependencies = [ "libp2p-dns", "libp2p-identity", "libp2p-tcp", - "log", "parking_lot", "pin-project-lite", "rcgen", "rw-stream-sink", "soketto", + "tracing", "url", "webpki-roots", ] @@ -3278,10 +3297,10 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "libp2p-yamux", - "log", "parking_lot", "send_wrapper 0.6.0", "thiserror", + "tracing", "wasm-bindgen", "web-sys", ] @@ -3295,12 +3314,12 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-noise", - "log", "multiaddr", "multibase", "multihash", "send_wrapper 0.6.0", "thiserror", + "tracing", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3314,8 +3333,8 @@ dependencies = [ "futures", "libp2p-core", "libp2p-muxer-test-harness", - "log", "thiserror", + "tracing", "yamux", ] @@ -3499,13 +3518,17 @@ dependencies = [ name = "metrics-example" version = "0.1.0" dependencies = [ - "env_logger 0.10.0", "futures", "hyper", "libp2p", - "log", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry_api", "prometheus-client", "tokio", + "tracing", + "tracing-opentelemetry", + "tracing-subscriber", ] [[package]] @@ -3600,15 +3623,15 @@ version = "0.13.0" dependencies = [ "async-std", "bytes", - "env_logger 0.10.0", "futures", "futures_ringbuf", - "log", "pin-project", "quickcheck-ext", "rand 0.8.5", "rw-stream-sink", "smallvec", + "tracing", + "tracing-subscriber", "unsigned-varint", ] @@ -3876,6 +3899,104 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" +dependencies = [ + "async-trait", + "futures-core", + "http", + "opentelemetry-proto", + "opentelemetry-semantic-conventions", + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" +dependencies = [ + "opentelemetry_api", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry_api" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" +dependencies = [ + "futures-channel", + "futures-util", + "indexmap 1.9.3", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "ordered-float", + "percent-encoding", + "rand 0.8.5", + "regex", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", +] + +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -4002,10 +4123,11 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" name = "ping-example" version = "0.1.0" dependencies = [ - "env_logger 0.10.0", "futures", "libp2p", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4180,6 +4302,29 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "prost" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -4498,9 +4643,10 @@ dependencies = [ "async-std", "async-trait", "clap", - "env_logger 0.10.0", "futures", "libp2p", + "tracing", + "tracing-subscriber", ] [[package]] @@ -4509,11 +4655,11 @@ version = "0.1.0" dependencies = [ "async-std", "async-trait", - "env_logger 0.10.0", "futures", "libp2p", - "log", "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -5540,6 +5686,16 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.1.0" @@ -5582,6 +5738,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.10" @@ -5597,6 +5764,34 @@ dependencies = [ "tracing", ] +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-trait", + "axum", + "base64 0.21.5", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.4.13" @@ -5605,9 +5800,13 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap 1.9.3", "pin-project", "pin-project-lite", + "rand 0.8.5", + "slab", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -5694,6 +5893,22 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" +dependencies = [ + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + [[package]] name = "tracing-subscriber" version = "0.3.17" @@ -5919,6 +6134,7 @@ dependencies = [ "futures", "libp2p", "tokio", + "tracing-subscriber", ] [[package]] @@ -5932,6 +6148,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "urlencoding" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + [[package]] name = "utf8parse" version = "0.2.1" diff --git a/core/Cargo.toml b/core/Cargo.toml index 4cbfa827af6..b9ebb0ad851 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -17,7 +17,6 @@ futures = { version = "0.3.29", features = ["executor", "thread-pool"] } futures-timer = "3" instant = "0.1.12" libp2p-identity = { workspace = true, features = ["peerid", "ed25519"] } -log = "0.4" multiaddr = { workspace = true } multihash = { workspace = true } multistream-select = { workspace = true } @@ -30,6 +29,7 @@ rw-stream-sink = { workspace = true } serde = { version = "1", optional = true, features = ["derive"] } smallvec = "1.11.1" thiserror = "1.0" +tracing = "0.1.37" unsigned-varint = "0.7" void = "1" diff --git a/core/src/transport/choice.rs b/core/src/transport/choice.rs index 8d3bfdecb79..aa3acfc3231 100644 --- a/core/src/transport/choice.rs +++ b/core/src/transport/choice.rs @@ -22,7 +22,6 @@ use crate::either::EitherFuture; use crate::transport::{ListenerId, Transport, TransportError, TransportEvent}; use either::Either; use futures::future; -use log::{debug, trace}; use multiaddr::Multiaddr; use std::{pin::Pin, task::Context, task::Poll}; @@ -52,16 +51,16 @@ where id: ListenerId, addr: Multiaddr, ) -> Result<(), TransportError> { - trace!( - "Attempting to listen on {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", std::any::type_name::() ); let addr = match self.0.listen_on(id, addr) { Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to listen on {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", std::any::type_name::() ); addr @@ -69,16 +68,16 @@ where res => return res.map_err(|err| err.map(Either::Left)), }; - trace!( - "Attempting to listen on {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to listen on address using {}", std::any::type_name::() ); let addr = match self.1.listen_on(id, addr) { Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to listen on {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to listen on address using {}", std::any::type_name::() ); addr @@ -94,17 +93,17 @@ where } fn dial(&mut self, addr: Multiaddr) -> Result> { - trace!( - "Attempting to dial {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", std::any::type_name::() ); let addr = match self.0.dial(addr) { Ok(connec) => return Ok(EitherFuture::First(connec)), Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to dial {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to dial address using {}", std::any::type_name::() ); addr @@ -114,17 +113,17 @@ where } }; - trace!( - "Attempting to dial {} using {}", - addr, + tracing::trace!( + address=%addr, + "Attempting to dial address using {}", std::any::type_name::() ); let addr = match self.1.dial(addr) { Ok(connec) => return Ok(EitherFuture::Second(connec)), Err(TransportError::MultiaddrNotSupported(addr)) => { - debug!( - "Failed to dial {} using {}", - addr, + tracing::debug!( + address=%addr, + "Failed to dial address using {}", std::any::type_name::() ); addr diff --git a/core/src/transport/global_only.rs b/core/src/transport/global_only.rs index 4f1fe8ab794..0671b0e9984 100644 --- a/core/src/transport/global_only.rs +++ b/core/src/transport/global_only.rs @@ -22,7 +22,6 @@ use crate::{ multiaddr::{Multiaddr, Protocol}, transport::{ListenerId, TransportError, TransportEvent}, }; -use log::debug; use std::{ pin::Pin, task::{Context, Poll}, @@ -292,20 +291,20 @@ impl crate::Transport for Transport { match addr.iter().next() { Some(Protocol::Ip4(a)) => { if !ipv4_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=%a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial(addr) } Some(Protocol::Ip6(a)) => { if !ipv6_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=%a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial(addr) } _ => { - debug!("Not dialing unsupported Multiaddress {:?}.", addr); + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); Err(TransportError::MultiaddrNotSupported(addr)) } } @@ -318,20 +317,20 @@ impl crate::Transport for Transport { match addr.iter().next() { Some(Protocol::Ip4(a)) => { if !ipv4_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=?a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial_as_listener(addr) } Some(Protocol::Ip6(a)) => { if !ipv6_global::is_global(a) { - debug!("Not dialing non global IP address {:?}.", a); + tracing::debug!(ip=?a, "Not dialing non global IP address"); return Err(TransportError::MultiaddrNotSupported(addr)); } self.inner.dial_as_listener(addr) } _ => { - debug!("Not dialing unsupported Multiaddress {:?}.", addr); + tracing::debug!(address=%addr, "Not dialing unsupported Multiaddress"); Err(TransportError::MultiaddrNotSupported(addr)) } } diff --git a/core/src/upgrade/apply.rs b/core/src/upgrade/apply.rs index aefce686f01..15cb0348cf3 100644 --- a/core/src/upgrade/apply.rs +++ b/core/src/upgrade/apply.rs @@ -21,7 +21,6 @@ use crate::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade, UpgradeError}; use crate::{connection::ConnectedPoint, Negotiated}; use futures::{future::Either, prelude::*}; -use log::debug; use multistream_select::{self, DialerSelectFuture, ListenerSelectFuture}; use std::{mem, pin::Pin, task::Context, task::Poll}; @@ -141,11 +140,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded inbound stream to {name}"); + tracing::trace!(upgrade=%name, "Upgraded inbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade inbound stream to {name}"); + tracing::debug!(upgrade=%name, "Failed to upgrade inbound stream"); return Poll::Ready(Err(UpgradeError::Apply(e))); } } @@ -223,11 +222,11 @@ where return Poll::Pending; } Poll::Ready(Ok(x)) => { - log::trace!("Upgraded outbound stream to {name}",); + tracing::trace!(upgrade=%name, "Upgraded outbound stream"); return Poll::Ready(Ok(x)); } Poll::Ready(Err(e)) => { - debug!("Failed to upgrade outbound stream to {name}",); + tracing::debug!(upgrade=%name, "Failed to upgrade outbound stream",); return Poll::Ready(Err(UpgradeError::Apply(e))); } } diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 712e26f1c44..9a4f2b4df86 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] tokio = { version = "1.33", features = ["full"] } clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index e92be18c279..b071e717731 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -29,6 +29,7 @@ use libp2p::{autonat, identify, identity, noise, tcp, yamux, PeerId}; use std::error::Error; use std::net::Ipv4Addr; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -45,7 +46,9 @@ struct Opt { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index 00ccd641da8..d1c0c005861 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -27,6 +27,7 @@ use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{autonat, identify, identity, noise, tcp, yamux}; use std::error::Error; use std::net::Ipv4Addr; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p autonat")] @@ -37,7 +38,9 @@ struct Opt { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index cc2017e6e2c..e18f6d9c531 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -17,10 +17,10 @@ crate-type = ["cdylib"] [dependencies] anyhow = "1.0.72" -env_logger = "0.10" futures = "0.3.29" -log = "0.4" rand = "0.8" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] axum = "0.6.19" diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 062a7978a01..609d72479c4 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -28,13 +28,13 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { .build(); let addr = libp2p_endpoint.parse::()?; - log::info!("Dialing {addr}"); + tracing::info!("Dialing {addr}"); swarm.dial(addr)?; loop { match swarm.next().await.unwrap() { SwarmEvent::Behaviour(ping::Event { result: Err(e), .. }) => { - log::error!("Ping failed: {:?}", e); + tracing::error!("Ping failed: {:?}", e); break; } @@ -43,10 +43,10 @@ pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { result: Ok(rtt), .. }) => { - log::info!("Ping successful: RTT: {rtt:?}, from {peer}"); + tracing::info!("Ping successful: RTT: {rtt:?}, from {peer}"); body.append_p(&format!("RTT: {rtt:?} at {}", Date::new_0().to_string()))?; } - evt => log::info!("Swarm event: {:?}", evt), + evt => tracing::info!("Swarm event: {:?}", evt), } } diff --git a/examples/browser-webrtc/src/main.rs b/examples/browser-webrtc/src/main.rs index 4ee86cd1229..97d1ab30250 100644 --- a/examples/browser-webrtc/src/main.rs +++ b/examples/browser-webrtc/src/main.rs @@ -22,10 +22,9 @@ use tower_http::cors::{Any, CorsLayer}; #[tokio::main] async fn main() -> anyhow::Result<()> { - env_logger::builder() - .parse_filters("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") - .parse_default_env() - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter("browser_webrtc_example=debug,libp2p_webrtc=info,libp2p_ping=debug") + .try_init(); let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() @@ -56,11 +55,13 @@ async fn main() -> anyhow::Result<()> { .iter() .any(|e| e == Protocol::Ip4(Ipv4Addr::LOCALHOST)) { - log::debug!("Ignoring localhost address to make sure the example works in Firefox"); + tracing::debug!( + "Ignoring localhost address to make sure the example works in Firefox" + ); continue; } - log::info!("Listening on: {address}"); + tracing::info!(%address, "Listening"); break address; } @@ -74,7 +75,7 @@ async fn main() -> anyhow::Result<()> { loop { tokio::select! { swarm_event = swarm.next() => { - log::trace!("Swarm Event: {:?}", swarm_event) + tracing::trace!(?swarm_event) }, _ = tokio::signal::ctrl_c() => { break; @@ -110,7 +111,7 @@ pub(crate) async fn serve(libp2p_transport: Multiaddr) { let addr = SocketAddr::new(listen_addr.into(), 8080); - log::info!("Serving client files at http://{addr}"); + tracing::info!(url=%format!("http://{addr}"), "Serving client files at url"); axum::Server::bind(&addr) .serve(server.into_make_service()) @@ -141,7 +142,7 @@ async fn get_index( /// Serves the static files generated by `wasm-pack`. async fn get_static_file(Path(path): Path) -> Result { - log::debug!("Serving static file: {path}"); + tracing::debug!(file_path=%path, "Serving static file"); let content = StaticFiles::get(&path).ok_or(StatusCode::NOT_FOUND)?.data; let content_type = mime_guess::from_path(path) diff --git a/examples/chat/Cargo.toml b/examples/chat/Cargo.toml index 7973b56eb47..b5af806501b 100644 --- a/examples/chat/Cargo.toml +++ b/examples/chat/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] tokio = { version = "1.33", features = ["full"] } async-trait = "0.1" -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "mdns", "noise", "macros", "tcp", "yamux", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/chat/src/main.rs b/examples/chat/src/main.rs index 0a261873f35..24f8b19d0c4 100644 --- a/examples/chat/src/main.rs +++ b/examples/chat/src/main.rs @@ -27,6 +27,7 @@ use std::error::Error; use std::hash::{Hash, Hasher}; use std::time::Duration; use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; // We create a custom network behaviour that combines Gossipsub and Mdns. #[derive(NetworkBehaviour)] @@ -37,6 +38,10 @@ struct MyBehaviour { #[tokio::main] async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() .with_tcp( diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index 994eed0283e..c6704ab0c03 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -10,12 +10,13 @@ release = false [dependencies] clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" futures = "0.3.29" futures-timer = "3.0" libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } log = "0.4" tokio = { version = "1.29", features = ["macros", "net", "rt", "signal"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 6a87e351e02..91beaa02c67 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -28,9 +28,9 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use log::info; use std::error::Error; use std::str::FromStr; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p DCUtR client")] @@ -71,7 +71,9 @@ impl FromStr for Mode { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); @@ -120,7 +122,7 @@ async fn main() -> Result<(), Box> { event = swarm.next() => { match event.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } event => panic!("{event:?}"), } @@ -149,14 +151,14 @@ async fn main() -> Result<(), Box> { SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Sent { .. })) => { - info!("Told relay its public address."); + tracing::info!("Told relay its public address"); told_relay_observed_addr = true; } SwarmEvent::Behaviour(BehaviourEvent::Identify(identify::Event::Received { info: identify::Info { observed_addr, .. }, .. })) => { - info!("Relay told us our observed address: {observed_addr}"); + tracing::info!(address=%observed_addr, "Relay told us our observed address"); learned_observed_addr = true; } event => panic!("{event:?}"), @@ -189,31 +191,31 @@ async fn main() -> Result<(), Box> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {:?}", address); + tracing::info!(%address, "Listening on address"); } SwarmEvent::Behaviour(BehaviourEvent::RelayClient( relay::client::Event::ReservationReqAccepted { .. }, )) => { assert!(opts.mode == Mode::Listen); - info!("Relay accepted our reservation request."); + tracing::info!("Relay accepted our reservation request"); } SwarmEvent::Behaviour(BehaviourEvent::RelayClient(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Dcutr(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Identify(event)) => { - info!("{:?}", event) + tracing::info!(?event) } SwarmEvent::Behaviour(BehaviourEvent::Ping(_)) => {} SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => { - info!("Outgoing connection error to {:?}: {:?}", peer_id, error); + tracing::info!(peer=?peer_id, "Outgoing connection failed: {error}"); } _ => {} } diff --git a/examples/distributed-key-value-store/Cargo.toml b/examples/distributed-key-value-store/Cargo.toml index 6fdc0ec72b8..dc084b30091 100644 --- a/examples/distributed-key-value-store/Cargo.toml +++ b/examples/distributed-key-value-store/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "dns", "kad", "mdns", "noise", "macros", "tcp", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index b8ecd059fc8..1843520838b 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -31,10 +31,13 @@ use libp2p::{ tcp, yamux, }; use std::error::Error; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // We create a custom network behaviour that combines Kademlia and mDNS. #[derive(NetworkBehaviour)] diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index 22599f5fa38..d0f394fc3df 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -13,9 +13,10 @@ serde = { version = "1.0", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } clap = { version = "4.4.7", features = ["derive"] } either = "1.9" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } void = "1.0.2" [lints] diff --git a/examples/file-sharing/src/main.rs b/examples/file-sharing/src/main.rs index 4b6d368fc47..ad1a12b3b02 100644 --- a/examples/file-sharing/src/main.rs +++ b/examples/file-sharing/src/main.rs @@ -31,10 +31,13 @@ use libp2p::{core::Multiaddr, multiaddr::Protocol}; use std::error::Error; use std::io::Write; use std::path::PathBuf; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/identify/Cargo.toml b/examples/identify/Cargo.toml index ac36290e7a2..48449636c61 100644 --- a/examples/identify/Cargo.toml +++ b/examples/identify/Cargo.toml @@ -11,9 +11,10 @@ release = false [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["async-std", "dns", "dcutr", "identify", "macros", "noise", "ping", "relay", "rendezvous", "tcp", "tokio","yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index d6be9cb9435..3c40addbcf8 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -23,10 +23,13 @@ use futures::StreamExt; use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; use std::error::Error; +use tracing_subscriber::EnvFilter; #[async_std::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_async_std() diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index f57d158b1e6..537b82c24bf 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -16,6 +16,8 @@ env_logger = "0.10" futures = "0.3.29" anyhow = "1.0.75" libp2p = { path = "../../libp2p", features = [ "tokio", "dns", "kad", "noise", "tcp", "yamux", "rsa"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ipfs-kad/src/main.rs b/examples/ipfs-kad/src/main.rs index dcb0ef95335..0d11bdd851a 100644 --- a/examples/ipfs-kad/src/main.rs +++ b/examples/ipfs-kad/src/main.rs @@ -28,6 +28,7 @@ use anyhow::{bail, Result}; use clap::Parser; use futures::StreamExt; use libp2p::{bytes::BufMut, identity, kad, noise, swarm::SwarmEvent, tcp, yamux, PeerId}; +use tracing_subscriber::EnvFilter; const BOOTNODES: [&str; 4] = [ "QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN", @@ -38,7 +39,9 @@ const BOOTNODES: [&str; 4] = [ #[tokio::main] async fn main() -> Result<()> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // Create a random key for ourselves. let local_key = identity::Keypair::generate_ed25519(); diff --git a/examples/ipfs-private/Cargo.toml b/examples/ipfs-private/Cargo.toml index 5a8bbd79b63..20cafabe079 100644 --- a/examples/ipfs-private/Cargo.toml +++ b/examples/ipfs-private/Cargo.toml @@ -12,9 +12,10 @@ release = false tokio = { version = "1.33", features = ["rt-multi-thread", "macros", "io-std"] } async-trait = "0.1" either = "1.9" -env_logger = "0.10" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "tokio", "gossipsub", "dns", "identify", "kad", "macros", "noise", "ping", "pnet", "tcp", "websocket", "yamux"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index 861648fecdd..12bd985cdf0 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -33,6 +33,7 @@ use libp2p::{ }; use std::{env, error::Error, fs, path::Path, str::FromStr}; use tokio::{io, io::AsyncBufReadExt, select}; +use tracing_subscriber::EnvFilter; /// Get the current ipfs repo path, either from the IPFS_PATH environment variable or /// from the default $HOME/.ipfs @@ -87,7 +88,9 @@ fn parse_legacy_multiaddr(text: &str) -> Result> { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ipfs_path = get_ipfs_path(); println!("using IPFS_PATH {ipfs_path:?}"); diff --git a/examples/metrics/Cargo.toml b/examples/metrics/Cargo.toml index 2cb904172d9..c8f74a17ebd 100644 --- a/examples/metrics/Cargo.toml +++ b/examples/metrics/Cargo.toml @@ -9,13 +9,17 @@ license = "MIT" release = false [dependencies] -env_logger = "0.10.0" futures = "0.3.29" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } -libp2p = { path = "../../libp2p", features = ["async-std", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } -log = "0.4.20" -tokio = { version = "1", features = ["rt-multi-thread"] } +libp2p = { path = "../../libp2p", features = ["tokio", "metrics", "ping", "noise", "identify", "tcp", "yamux", "macros"] } +opentelemetry = { version = "0.20.0", features = ["rt-tokio", "metrics"] } +opentelemetry-otlp = { version = "0.13.0", features = ["metrics"]} +opentelemetry_api = "0.20.0" prometheus-client = { workspace = true } +tokio = { version = "1", features = ["full"] } +tracing = "0.1.37" +tracing-opentelemetry = "0.21.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/metrics/README.md b/examples/metrics/README.md index fc73cbd7410..160536985f1 100644 --- a/examples/metrics/README.md +++ b/examples/metrics/README.md @@ -1,6 +1,6 @@ ## Description -The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics`. +The example showcases how to run a p2p network with **libp2p** and collect metrics using `libp2p-metrics` as well as span data via `opentelemetry`. It sets up multiple nodes in the network and measures various metrics, such as `libp2p_ping`, to evaluate the network's performance. ## Usage @@ -34,6 +34,45 @@ To run the example, follow these steps: After executing the command, you should see a long list of metrics printed to the terminal. Make sure to check the `libp2p_ping` metrics, which should have a value greater than zero (`>0`). +## Opentelemetry + +To see the span data collected as part of the `Swarm`s activity, start up an opentelemetry collector: + +```sh +docker compose up +``` + +Then, configure tracing to output spans: + +```shell +export RUST_LOG=info,[ConnectionHandler::poll]=trace,[NetworkBehaviour::poll]=trace +``` + +Next, (re)-start the two example for it to connect to the OTEL collector. +Finally, open the Jaeger UI in a browser and explore the spans: http://localhost:16686. + +### Filtering spans + +For a precise documentation, please see the following documentation in tracing: . + +`rust-libp2p` consistently applies spans to the following functions: + +- `ConnectionHandler::poll` implementations +- `NetworkBehaviour::poll` implementations + +The above spans are all called exactly that: `ConnectionHandler::poll` and `NetworkBehaviour::poll`. +You can activate _all_ of them by setting: + +``` +RUST_LOG=[ConnectionHandler::poll]=trace +``` + +If you just wanted to see the spans of the `libp2p_ping` crate, you can filter like this: + +``` +RUST_LOG=libp2p_ping[ConnectionHandler::poll]=trace +``` + ## Conclusion This example demonstrates how to utilize the `libp2p-metrics` crate to collect and analyze metrics in a libp2p network. diff --git a/examples/metrics/docker-compose.yml b/examples/metrics/docker-compose.yml new file mode 100644 index 00000000000..06d8d5becfe --- /dev/null +++ b/examples/metrics/docker-compose.yml @@ -0,0 +1,23 @@ +version: "2" +services: + # Jaeger + jaeger-all-in-one: + image: jaegertracing/all-in-one:latest + restart: always + ports: + - "16686:16686" + - "14268" + - "14250" + + # Collector + otel-collector: + image: otel/opentelemetry-collector:0.88.0 + restart: always + command: ["--config=/etc/otel-collector-config.yaml"] + volumes: + - ./otel-collector-config.yaml:/etc/otel-collector-config.yaml + ports: + - "13133:13133" # health_check extension + - "4317:4317" # OTLP gRPC receiver + depends_on: + - jaeger-all-in-one diff --git a/examples/metrics/otel-collector-config.yaml b/examples/metrics/otel-collector-config.yaml new file mode 100644 index 00000000000..8755848cd6e --- /dev/null +++ b/examples/metrics/otel-collector-config.yaml @@ -0,0 +1,25 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + +exporters: + debug: + otlp: + endpoint: jaeger-all-in-one:4317 + tls: + insecure: true + +processors: + batch: + +service: + telemetry: + logs: + level: "debug" + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [debug, otlp] diff --git a/examples/metrics/src/http_service.rs b/examples/metrics/src/http_service.rs index 46cb7aacb84..8c77d724ea3 100644 --- a/examples/metrics/src/http_service.rs +++ b/examples/metrics/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::{error, info}; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -33,18 +32,14 @@ const METRICS_CONTENT_TYPE: &str = "application/openmetrics-text;charset=utf-8;v pub(crate) async fn metrics_server(registry: Registry) -> Result<(), std::io::Error> { // Serve on localhost. - let addr = ([127, 0, 0, 1], 8080).into(); - - // Use the tokio runtime to run the hyper server. - let rt = tokio::runtime::Runtime::new()?; - rt.block_on(async { - let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); - info!("Metrics server on http://{}/metrics", server.local_addr()); - if let Err(e) = server.await { - error!("server error: {}", e); - } - Ok(()) - }) + let addr = ([127, 0, 0, 1], 0).into(); + + let server = Server::bind(&addr).serve(MakeMetricService::new(registry)); + tracing::info!(metrics_server=%format!("http://{}/metrics", server.local_addr())); + if let Err(e) = server.await { + tracing::error!("server error: {}", e); + } + Ok(()) } pub(crate) struct MetricService { diff --git a/examples/metrics/src/main.rs b/examples/metrics/src/main.rs index 09d4f7a5941..18db1084d2f 100644 --- a/examples/metrics/src/main.rs +++ b/examples/metrics/src/main.rs @@ -20,25 +20,28 @@ #![doc = include_str!("../README.md")] -use env_logger::Env; -use futures::{executor::block_on, StreamExt}; +use futures::StreamExt; use libp2p::core::Multiaddr; use libp2p::metrics::{Metrics, Recorder}; use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{identify, identity, noise, ping, tcp, yamux}; -use log::info; +use opentelemetry::sdk; +use opentelemetry_api::KeyValue; use prometheus_client::registry::Registry; use std::error::Error; -use std::thread; use std::time::Duration; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::{EnvFilter, Layer}; mod http_service; -fn main() -> Result<(), Box> { - env_logger::Builder::from_env(Env::default().default_filter_or("info")).init(); +#[tokio::main] +async fn main() -> Result<(), Box> { + setup_tracing()?; let mut swarm = libp2p::SwarmBuilder::with_new_identity() - .with_async_std() + .with_tokio() .with_tcp( tcp::Config::default(), noise::Config::new, @@ -53,31 +56,52 @@ fn main() -> Result<(), Box> { if let Some(addr) = std::env::args().nth(1) { let remote: Multiaddr = addr.parse()?; swarm.dial(remote)?; - info!("Dialed {}", addr) + tracing::info!(address=%addr, "Dialed address") } let mut metric_registry = Registry::default(); let metrics = Metrics::new(&mut metric_registry); - thread::spawn(move || block_on(http_service::metrics_server(metric_registry))); + tokio::spawn(http_service::metrics_server(metric_registry)); - block_on(async { - loop { - match swarm.select_next_some().await { - SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { - info!("{:?}", ping_event); - metrics.record(&ping_event); - } - SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { - info!("{:?}", identify_event); - metrics.record(&identify_event); - } - swarm_event => { - info!("{:?}", swarm_event); - metrics.record(&swarm_event); - } + loop { + match swarm.select_next_some().await { + SwarmEvent::Behaviour(BehaviourEvent::Ping(ping_event)) => { + tracing::info!(?ping_event); + metrics.record(&ping_event); + } + SwarmEvent::Behaviour(BehaviourEvent::Identify(identify_event)) => { + tracing::info!(?identify_event); + metrics.record(&identify_event); + } + swarm_event => { + tracing::info!(?swarm_event); + metrics.record(&swarm_event); } } - }); + } +} + +fn setup_tracing() -> Result<(), Box> { + let tracer = opentelemetry_otlp::new_pipeline() + .tracing() + .with_exporter(opentelemetry_otlp::new_exporter().tonic()) + .with_trace_config( + sdk::trace::Config::default().with_resource(sdk::Resource::new(vec![KeyValue::new( + "service.name", + "libp2p", + )])), + ) + .install_batch(opentelemetry::runtime::Tokio)?; + + tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_filter(EnvFilter::from_default_env())) + .with( + tracing_opentelemetry::layer() + .with_tracer(tracer) + .with_filter(EnvFilter::from_default_env()), + ) + .try_init()?; + Ok(()) } diff --git a/examples/ping/Cargo.toml b/examples/ping/Cargo.toml index dd32c3744d8..58cee54409e 100644 --- a/examples/ping/Cargo.toml +++ b/examples/ping/Cargo.toml @@ -9,10 +9,11 @@ license = "MIT" release = false [dependencies] -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["noise", "ping", "tcp", "tokio", "yamux"] } tokio = { version = "1.33.0", features = ["full"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/ping/src/main.rs b/examples/ping/src/main.rs index d89415132e5..911b0384f89 100644 --- a/examples/ping/src/main.rs +++ b/examples/ping/src/main.rs @@ -23,9 +23,14 @@ use futures::prelude::*; use libp2p::{noise, ping, swarm::SwarmEvent, tcp, yamux, Multiaddr}; use std::{error::Error, time::Duration}; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() .with_tcp( diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 5da0d55b1d4..7017bfdad64 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -12,9 +12,10 @@ release = false clap = { version = "4.4.7", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" -futures = "0.3.29" +futures = "0.3.2" libp2p = { path = "../../libp2p", features = [ "async-std", "noise", "macros", "ping", "tcp", "identify", "yamux", "relay", "quic"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/relay-server/src/main.rs b/examples/relay-server/src/main.rs index 2f86f9b938e..bf5817454f8 100644 --- a/examples/relay-server/src/main.rs +++ b/examples/relay-server/src/main.rs @@ -33,9 +33,12 @@ use libp2p::{ }; use std::error::Error; use std::net::{Ipv4Addr, Ipv6Addr}; +use tracing_subscriber::EnvFilter; fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opt::parse(); diff --git a/examples/rendezvous/Cargo.toml b/examples/rendezvous/Cargo.toml index 0c2c32f0edb..f20e5f519ae 100644 --- a/examples/rendezvous/Cargo.toml +++ b/examples/rendezvous/Cargo.toml @@ -11,11 +11,11 @@ release = false [dependencies] async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" -env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "identify", "macros", "noise", "ping", "rendezvous", "tcp", "tokio", "yamux"] } -log = "0.4" tokio = { version = "1.33", features = ["rt-multi-thread", "macros", "time"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/rendezvous/src/bin/rzv-discover.rs b/examples/rendezvous/src/bin/rzv-discover.rs index 42a5a20b6ad..edd3d10a0ce 100644 --- a/examples/rendezvous/src/bin/rzv-discover.rs +++ b/examples/rendezvous/src/bin/rzv-discover.rs @@ -27,12 +27,15 @@ use libp2p::{ }; use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; const NAMESPACE: &str = "rendezvous"; #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" @@ -62,7 +65,7 @@ async fn main() -> Result<(), Box> { tokio::select! { event = swarm.select_next_some() => match event { SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { - log::info!( + tracing::info!( "Connected to rendezvous point, discovering nodes in '{}' namespace ...", NAMESPACE ); @@ -84,7 +87,7 @@ async fn main() -> Result<(), Box> { for registration in registrations { for address in registration.record.addresses() { let peer = registration.record.peer_id(); - log::info!("Discovered peer {} at {}", peer, address); + tracing::info!(%peer, %address, "Discovered peer"); let p2p_suffix = Protocol::P2p(peer); let address_with_p2p = @@ -103,10 +106,10 @@ async fn main() -> Result<(), Box> { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!(%peer, "Ping is {}ms", rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } }, _ = discover_tick.tick(), if cookie.is_some() => diff --git a/examples/rendezvous/src/bin/rzv-identify.rs b/examples/rendezvous/src/bin/rzv-identify.rs index be644dbb9f8..1d545592829 100644 --- a/examples/rendezvous/src/bin/rzv-identify.rs +++ b/examples/rendezvous/src/bin/rzv-identify.rs @@ -25,10 +25,13 @@ use libp2p::{ tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" @@ -62,14 +65,14 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Identify(identify::Event::Received { @@ -80,7 +83,7 @@ async fn main() { rendezvous_point, None, ) { - log::error!("Failed to register: {error}"); + tracing::error!("Failed to register: {error}"); return; } } @@ -91,7 +94,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -105,7 +108,7 @@ async fn main() { error, }, )) => { - log::error!( + tracing::error!( "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", rendezvous_node, namespace, @@ -118,10 +121,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/rendezvous/src/bin/rzv-register.rs b/examples/rendezvous/src/bin/rzv-register.rs index 928dcdd1625..bd848238d4a 100644 --- a/examples/rendezvous/src/bin/rzv-register.rs +++ b/examples/rendezvous/src/bin/rzv-register.rs @@ -25,10 +25,13 @@ use libp2p::{ tcp, yamux, Multiaddr, }; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rendezvous_point_address = "/ip4/127.0.0.1/tcp/62649".parse::().unwrap(); let rendezvous_point = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN" @@ -61,14 +64,14 @@ async fn main() { while let Some(event) = swarm.next().await { match event { SwarmEvent::NewListenAddr { address, .. } => { - log::info!("Listening on {}", address); + tracing::info!("Listening on {}", address); } SwarmEvent::ConnectionClosed { peer_id, cause: Some(error), .. } if peer_id == rendezvous_point => { - log::error!("Lost connection to rendezvous point {}", error); + tracing::error!("Lost connection to rendezvous point {}", error); } SwarmEvent::ConnectionEstablished { peer_id, .. } if peer_id == rendezvous_point => { if let Err(error) = swarm.behaviour_mut().rendezvous.register( @@ -76,10 +79,10 @@ async fn main() { rendezvous_point, None, ) { - log::error!("Failed to register: {error}"); + tracing::error!("Failed to register: {error}"); return; } - log::info!("Connection established with rendezvous point {}", peer_id); + tracing::info!("Connection established with rendezvous point {}", peer_id); } // once `/identify` did its job, we know our external address and can register SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( @@ -89,7 +92,7 @@ async fn main() { rendezvous_node, }, )) => { - log::info!( + tracing::info!( "Registered for namespace '{}' at rendezvous point {} for the next {} seconds", namespace, rendezvous_node, @@ -103,7 +106,7 @@ async fn main() { error, }, )) => { - log::error!( + tracing::error!( "Failed to register: rendezvous_node={}, namespace={}, error_code={:?}", rendezvous_node, namespace, @@ -116,10 +119,10 @@ async fn main() { result: Ok(rtt), .. })) if peer != rendezvous_point => { - log::info!("Ping to {} is {}ms", peer, rtt.as_millis()) + tracing::info!("Ping to {} is {}ms", peer, rtt.as_millis()) } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/rendezvous/src/main.rs b/examples/rendezvous/src/main.rs index a1b811f4f11..a15bc1ca2d3 100644 --- a/examples/rendezvous/src/main.rs +++ b/examples/rendezvous/src/main.rs @@ -28,10 +28,13 @@ use libp2p::{ }; use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // Results in PeerID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN which is // used as the rendezvous point by the other peer examples. @@ -60,15 +63,15 @@ async fn main() -> Result<(), Box> { while let Some(event) = swarm.next().await { match event { SwarmEvent::ConnectionEstablished { peer_id, .. } => { - log::info!("Connected to {}", peer_id); + tracing::info!("Connected to {}", peer_id); } SwarmEvent::ConnectionClosed { peer_id, .. } => { - log::info!("Disconnected from {}", peer_id); + tracing::info!("Disconnected from {}", peer_id); } SwarmEvent::Behaviour(MyBehaviourEvent::Rendezvous( rendezvous::server::Event::PeerRegistered { peer, registration }, )) => { - log::info!( + tracing::info!( "Peer {} registered for namespace '{}'", peer, registration.namespace @@ -80,14 +83,14 @@ async fn main() -> Result<(), Box> { registrations, }, )) => { - log::info!( + tracing::info!( "Served peer {} with {} registrations", enquirer, registrations.len() ); } other => { - log::debug!("Unhandled {:?}", other); + tracing::debug!("Unhandled {:?}", other); } } } diff --git a/examples/upnp/Cargo.toml b/examples/upnp/Cargo.toml index 02110c33840..940f3dff65f 100644 --- a/examples/upnp/Cargo.toml +++ b/examples/upnp/Cargo.toml @@ -12,6 +12,7 @@ release = false tokio = { version = "1", features = ["rt-multi-thread", "macros"] } futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["tokio", "dns", "macros", "noise", "ping", "tcp", "yamux", "upnp"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [lints] workspace = true diff --git a/examples/upnp/src/main.rs b/examples/upnp/src/main.rs index c602a687db7..fd0764990d1 100644 --- a/examples/upnp/src/main.rs +++ b/examples/upnp/src/main.rs @@ -23,9 +23,14 @@ use futures::prelude::*; use libp2p::{noise, swarm::SwarmEvent, upnp, yamux, Multiaddr}; use std::error::Error; +use tracing_subscriber::EnvFilter; #[tokio::main] async fn main() -> Result<(), Box> { + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + let mut swarm = libp2p::SwarmBuilder::with_new_identity() .with_tokio() .with_tcp( diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml index 4d067117260..14e5793f141 100644 --- a/hole-punching-tests/Cargo.toml +++ b/hole-punching-tests/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1" env_logger = "0.10.0" futures = "0.3.29" libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros", "noise", "ping", "relay", "tcp", "yamux", "quic"] } -log = "0.4" +tracing = "0.1.37" redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } tokio = { version = "1.29.1", features = ["full"] } serde = { version = "1.0.190", features = ["derive"] } diff --git a/hole-punching-tests/src/main.rs b/hole-punching-tests/src/main.rs index 72b81f776ad..4f81cd65480 100644 --- a/hole-punching-tests/src/main.rs +++ b/hole-punching-tests/src/main.rs @@ -104,7 +104,7 @@ async fn main() -> Result<()> { _, _, ) => { - log::info!("Relay accepted our reservation request."); + tracing::info!("Relay accepted our reservation request."); redis .push(LISTEN_CLIENT_PEER_ID, swarm.local_peer_id()) @@ -118,7 +118,7 @@ async fn main() -> Result<()> { _, _, ) => { - log::info!("Successfully hole-punched to {remote_peer_id}"); + tracing::info!("Successfully hole-punched to {remote_peer_id}"); hole_punched_peer_connection = Some(connection_id); } @@ -144,7 +144,7 @@ async fn main() -> Result<()> { _, _, ) => { - log::info!("Failed to hole-punched to {remote_peer_id}"); + tracing::info!("Failed to hole-punched to {remote_peer_id}"); return Err(anyhow::Error::new(error)); } ( @@ -225,7 +225,7 @@ async fn client_listen_on_transport( listen_addresses += 1; } - log::info!("Listening on {address}"); + tracing::info!("Listening on {address}"); } } Ok(()) @@ -292,7 +292,7 @@ impl RedisClient { async fn push(&mut self, key: &str, value: impl ToString) -> Result<()> { let value = value.to_string(); - log::debug!("Pushing {key}={value} to redis"); + tracing::debug!("Pushing {key}={value} to redis"); self.inner.rpush(key, value).await?; @@ -304,7 +304,7 @@ impl RedisClient { V: FromStr + fmt::Display, V::Err: std::error::Error + Send + Sync + 'static, { - log::debug!("Fetching {key} from redis"); + tracing::debug!("Fetching {key} from redis"); let value = self .inner @@ -314,7 +314,7 @@ impl RedisClient { .with_context(|| format!("Failed to get value for {key} from redis"))? .parse()?; - log::debug!("{key}={value}"); + tracing::debug!("{key}={value}"); Ok(value) } diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 6e25699d9ed..e09e8b0e2b2 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -17,7 +17,7 @@ bs58 = { version = "0.5.0", optional = true } ed25519-dalek = { version = "2.0", optional = true } hkdf = { version = "0.12.3", optional = true } libsecp256k1 = { version = "0.7.0", optional = true } -log = "0.4" +tracing = "0.1.37" multihash = { version = "0.19.1", optional = true } p256 = { version = "0.13", default-features = false, features = [ "ecdsa", "std", "pem"], optional = true } quick-protobuf = "0.8.1" diff --git a/identity/src/keypair.rs b/identity/src/keypair.rs index 41e2181d2a9..bdfb68c0091 100644 --- a/identity/src/keypair.rs +++ b/identity/src/keypair.rs @@ -672,7 +672,7 @@ impl TryFrom for PublicKey { )?), #[cfg(not(feature = "ed25519"))] proto::KeyType::Ed25519 => { - log::debug!("support for ed25519 was disabled at compile-time"); + tracing::debug!("support for ed25519 was disabled at compile-time"); Err(DecodingError::missing_feature("ed25519")) } #[cfg(all(feature = "rsa", not(target_arch = "wasm32")))] @@ -685,7 +685,7 @@ impl TryFrom for PublicKey { } #[cfg(any(not(feature = "rsa"), target_arch = "wasm32"))] proto::KeyType::RSA => { - log::debug!("support for RSA was disabled at compile-time"); + tracing::debug!("support for RSA was disabled at compile-time"); Err(DecodingError::missing_feature("rsa")) } #[cfg(feature = "secp256k1")] @@ -695,7 +695,7 @@ impl TryFrom for PublicKey { })?), #[cfg(not(feature = "secp256k1"))] proto::KeyType::Secp256k1 => { - log::debug!("support for secp256k1 was disabled at compile-time"); + tracing::debug!("support for secp256k1 was disabled at compile-time"); Err(DecodingError::missing_feature("secp256k1")) } #[cfg(feature = "ecdsa")] @@ -706,7 +706,7 @@ impl TryFrom for PublicKey { )?), #[cfg(not(feature = "ecdsa"))] proto::KeyType::ECDSA => { - log::debug!("support for ECDSA was disabled at compile-time"); + tracing::debug!("support for ECDSA was disabled at compile-time"); Err(DecodingError::missing_feature("ecdsa")) } } diff --git a/interop-tests/Cargo.toml b/interop-tests/Cargo.toml index e49562a759a..3caad98dfa2 100644 --- a/interop-tests/Cargo.toml +++ b/interop-tests/Cargo.toml @@ -14,11 +14,11 @@ crate-type = ["cdylib", "rlib"] [dependencies] anyhow = "1" either = "1.9.0" -env_logger = "0.10.0" futures = "0.3.29" -log = "0.4" -serde = { version = "1", features = ["derive"] } rand = "0.8.5" +serde = { version = "1", features = ["derive"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] axum = "0.6" @@ -36,7 +36,7 @@ serde_json = "1" thirtyfour = "=0.32.0-rc.8" # https://github.com/stevepryde/thirtyfour/issues/169 tokio = { version = "1.33.0", features = ["full"] } tower-http = { version = "0.4", features = ["cors", "fs", "trace"] } -tracing = "0.1" +tracing = "0.1.37" tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(target_arch = "wasm32")'.dependencies] diff --git a/interop-tests/src/arch.rs b/interop-tests/src/arch.rs index d90af53abb1..52000f90a86 100644 --- a/interop-tests/src/arch.rs +++ b/interop-tests/src/arch.rs @@ -11,7 +11,6 @@ pub(crate) mod native { use std::time::Duration; use anyhow::{bail, Context, Result}; - use env_logger::{Env, Target}; use futures::future::BoxFuture; use futures::FutureExt; use libp2p::identity::Keypair; @@ -20,15 +19,16 @@ pub(crate) mod native { use libp2p_mplex as mplex; use libp2p_webrtc as webrtc; use redis::AsyncCommands; + use tracing_subscriber::EnvFilter; use crate::{Muxer, SecProtocol, Transport}; pub(crate) type Instant = std::time::Instant; pub(crate) fn init_logger() { - env_logger::Builder::from_env(Env::default().default_filter_or("info")) - .target(Target::Stdout) - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); } pub(crate) fn sleep(duration: Duration) -> BoxFuture<'static, ()> { diff --git a/interop-tests/src/bin/wasm_ping.rs b/interop-tests/src/bin/wasm_ping.rs index a228b913930..8269ff064ad 100644 --- a/interop-tests/src/bin/wasm_ping.rs +++ b/interop-tests/src/bin/wasm_ping.rs @@ -15,7 +15,6 @@ use tokio::process::Child; use tokio::sync::mpsc; use tower_http::cors::CorsLayer; use tower_http::trace::TraceLayer; -use tracing::{error, warn}; use tracing_subscriber::{fmt, prelude::*, EnvFilter}; use interop_tests::{BlpopRequest, Report}; @@ -144,16 +143,17 @@ async fn redis_blpop( ) -> Result>, StatusCode> { let client = state.0.redis_client; let mut conn = client.get_async_connection().await.map_err(|e| { - warn!("Failed to connect to redis: {e}"); + tracing::warn!("Failed to connect to redis: {e}"); StatusCode::INTERNAL_SERVER_ERROR })?; let res = conn .blpop(&request.key, request.timeout as usize) .await .map_err(|e| { - warn!( - "Failed to get list elem {} within timeout {}: {e}", - request.key, request.timeout + tracing::warn!( + key=%request.key, + timeout=%request.timeout, + "Failed to get list elem key within timeout: {e}" ); StatusCode::INTERNAL_SERVER_ERROR })?; @@ -167,7 +167,7 @@ async fn post_results( request: Json>, ) -> Result<(), StatusCode> { state.0.results_tx.send(request.0).await.map_err(|_| { - error!("Failed to send results"); + tracing::error!("Failed to send results"); StatusCode::INTERNAL_SERVER_ERROR }) } diff --git a/interop-tests/src/lib.rs b/interop-tests/src/lib.rs index d48fc289d4b..0154bec51a4 100644 --- a/interop-tests/src/lib.rs +++ b/interop-tests/src/lib.rs @@ -47,7 +47,7 @@ pub async fn run_test( let (mut swarm, local_addr) = build_swarm(ip, transport, sec_protocol, muxer, build_behaviour).await?; - log::info!("Running ping test: {}", swarm.local_peer_id()); + tracing::info!(local_peer=%swarm.local_peer_id(), "Running ping test"); // See https://github.com/libp2p/rust-libp2p/issues/4071. #[cfg(not(target_arch = "wasm32"))] @@ -74,7 +74,7 @@ pub async fn run_test( let handshake_start = Instant::now(); swarm.dial(other.parse::()?)?; - log::info!("Test instance, dialing multiaddress on: {}.", other); + tracing::info!(listener=%other, "Test instance, dialing multiaddress"); let rtt = loop { if let Some(SwarmEvent::Behaviour(BehaviourEvent::Ping(ping::Event { @@ -82,7 +82,7 @@ pub async fn run_test( .. }))) = swarm.next().await { - log::info!("Ping successful: {rtt:?}"); + tracing::info!(?rtt, "Ping successful"); break rtt.as_micros() as f32 / 1000.; } }; @@ -101,9 +101,9 @@ pub async fn run_test( Some(id) => id, }; - log::info!( - "Test instance, listening for incoming connections on: {:?}.", - local_addr + tracing::info!( + address=%local_addr, + "Test instance, listening for incoming connections on address" ); loop { @@ -129,7 +129,7 @@ pub async fn run_test( loop { let event = swarm.next().await.unwrap(); - log::debug!("{event:?}"); + tracing::debug!("{event:?}"); } } .boxed(), @@ -164,7 +164,7 @@ pub async fn run_test_wasm( muxer, ) .await; - log::info!("Sending test result: {result:?}"); + tracing::info!(?result, "Sending test result"); reqwest::Client::new() .post(&format!("http://{}/results", base_url)) .json(&result.map_err(|e| e.to_string())) diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 8ea8ce19b26..a47f8bfdaa1 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -139,14 +139,13 @@ libp2p-websocket = { workspace = true, optional = true } [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } async-trait = "0.1" -either = "1.8.0" -env_logger = "0.10.0" clap = { version = "4.1.6", features = ["derive"] } tokio = { version = "1.15", features = [ "io-util", "io-std", "macros", "rt", "rt-multi-thread"] } libp2p-mplex = { workspace = true } libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["tokio"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index b92d7b51c8a..db515595bfc 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -56,9 +56,9 @@ //! //! [dependencies] //! libp2p = { version = "0.52", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } -//! futures = "0.3" -//! env_logger = "0.10.0" -//! async-std = { version = "1.12", features = ["attributes"] } +//! futures = "0.3.21" +//! async-std = { version = "1.12.0", features = ["attributes"] } +//! tracing-subscriber = { version = "0.3", features = ["env-filter"] } //! ``` //! //! ## Network identity @@ -72,10 +72,11 @@ //! //! ```rust //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity(); //! @@ -96,10 +97,11 @@ //! ```rust //! use libp2p::{identity, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -138,11 +140,12 @@ //! ```rust //! use libp2p::swarm::NetworkBehaviour; //! use libp2p::{identity, ping, PeerId}; +//! use tracing_subscriber::EnvFilter; //! use std::error::Error; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -168,10 +171,11 @@ //! use libp2p::swarm::NetworkBehaviour; //! use libp2p::{identity, ping, PeerId}; //! use std::error::Error; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -202,10 +206,11 @@ //! use libp2p::{identity, ping, PeerId}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { -//! env_logger::init(); +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); //! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() @@ -252,9 +257,12 @@ //! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( @@ -294,9 +302,12 @@ //! use libp2p::{identity, ping, Multiaddr, PeerId}; //! use std::error::Error; //! use std::time::Duration; +//! use tracing_subscriber::EnvFilter; //! //! #[async_std::main] //! async fn main() -> Result<(), Box> { +//! tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).init(); +//! //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( diff --git a/misc/memory-connection-limits/Cargo.toml b/misc/memory-connection-limits/Cargo.toml index bf2a0384570..ae6bb386373 100644 --- a/misc/memory-connection-limits/Cargo.toml +++ b/misc/memory-connection-limits/Cargo.toml @@ -14,8 +14,8 @@ memory-stats = { version = "1", features = ["always_use_statm"] } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true, features = ["peerid"] } -log = "0.4" sysinfo = "0.29" +tracing = "0.1.37" void = "1" [dev-dependencies] diff --git a/misc/memory-connection-limits/src/lib.rs b/misc/memory-connection-limits/src/lib.rs index 01ff04552e7..5bc5f1068a3 100644 --- a/misc/memory-connection-limits/src/lib.rs +++ b/misc/memory-connection-limits/src/lib.rs @@ -127,7 +127,7 @@ impl Behaviour { let stats = match memory_stats::memory_stats() { Some(stats) => stats, None => { - log::warn!("Failed to retrieve process memory stats"); + tracing::warn!("Failed to retrieve process memory stats"); return; } }; diff --git a/misc/multistream-select/Cargo.toml b/misc/multistream-select/Cargo.toml index 6bd072070e7..e33478c1a08 100644 --- a/misc/multistream-select/Cargo.toml +++ b/misc/multistream-select/Cargo.toml @@ -13,18 +13,18 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1" futures = "0.3" -log = "0.4" +tracing = "0.1.37" pin-project = "1.1.3" smallvec = "1.11.1" unsigned-varint = "0.7" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" futures_ringbuf = "0.4.0" quickcheck = { workspace = true } rand = "0.8" rw-stream-sink = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/misc/multistream-select/src/dialer_select.rs b/misc/multistream-select/src/dialer_select.rs index 8caa7b0e0a2..83bb4909041 100644 --- a/misc/multistream-select/src/dialer_select.rs +++ b/misc/multistream-select/src/dialer_select.rs @@ -131,7 +131,7 @@ where if let Err(err) = Pin::new(&mut io).start_send(Message::Protocol(p.clone())) { return Poll::Ready(Err(From::from(err))); } - log::debug!("Dialer: Proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Proposed protocol"); if this.protocols.peek().is_some() { *this.state = State::FlushProtocol { io, protocol } @@ -143,7 +143,7 @@ where // the dialer supports for this negotiation. Notably, // the dialer expects a regular `V1` response. Version::V1Lazy => { - log::debug!("Dialer: Expecting proposed protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Expecting proposed protocol"); let hl = HeaderLine::from(Version::V1Lazy); let io = Negotiated::expecting(io.into_reader(), p, Some(hl)); return Poll::Ready(Ok((protocol, io))); @@ -180,14 +180,14 @@ where *this.state = State::AwaitProtocol { io, protocol }; } Message::Protocol(ref p) if p.as_ref() == protocol.as_ref() => { - log::debug!("Dialer: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Dialer: Received confirmation for protocol"); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); } Message::NotAvailable => { - log::debug!( - "Dialer: Received rejection of protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Dialer: Received rejection of protocol" ); let protocol = this.protocols.next().ok_or(NegotiationError::Failed)?; *this.state = State::SendProtocol { io, protocol } @@ -208,9 +208,10 @@ mod tests { use crate::listener_select_proto; use async_std::future::timeout; use async_std::net::{TcpListener, TcpStream}; - use log::info; use quickcheck::{Arbitrary, Gen, GenRange}; use std::time::Duration; + use tracing::metadata::LevelFilter; + use tracing_subscriber::EnvFilter; #[test] fn select_proto_basic() { @@ -266,7 +267,13 @@ mod tests { ListenerProtos(listen_protos): ListenerProtos, DialPayload(dial_payload): DialPayload, ) { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::DEBUG.into()) + .from_env_lossy(), + ) + .try_init(); async_std::task::block_on(async move { let listener = TcpListener::bind("0.0.0.0:0").await.unwrap(); @@ -312,7 +319,7 @@ mod tests { // got confirmation of the last proposed protocol, when `V1Lazy` // is used. - info!("Writing early data"); + tracing::info!("Writing early data"); io.write_all(&dial_payload).await.unwrap(); match io.complete().await { @@ -324,7 +331,7 @@ mod tests { server.await; client.await; - info!("---------------------------------------") + tracing::info!("---------------------------------------") }); } diff --git a/misc/multistream-select/src/length_delimited.rs b/misc/multistream-select/src/length_delimited.rs index cff2f4abc39..6515d00c717 100644 --- a/misc/multistream-select/src/length_delimited.rs +++ b/misc/multistream-select/src/length_delimited.rs @@ -170,7 +170,7 @@ where if (buf[*pos - 1] & 0x80) == 0 { // MSB is not set, indicating the end of the length prefix. let (len, _) = unsigned_varint::decode::u16(buf).map_err(|e| { - log::debug!("invalid length prefix: {}", e); + tracing::debug!("invalid length prefix: {e}"); io::Error::new(io::ErrorKind::InvalidData, "invalid length prefix") })?; diff --git a/misc/multistream-select/src/listener_select.rs b/misc/multistream-select/src/listener_select.rs index d0037a78619..21c507096e2 100644 --- a/misc/multistream-select/src/listener_select.rs +++ b/misc/multistream-select/src/listener_select.rs @@ -52,7 +52,7 @@ where .filter_map(|n| match Protocol::try_from(n.as_ref()) { Ok(p) => Some((n, p)), Err(e) => { - log::warn!( + tracing::warn!( "Listener: Ignoring invalid protocol: {} due to {}", n.as_ref(), e @@ -186,7 +186,7 @@ where // the dialer also raises `NegotiationError::Failed` when finally // reading the `N/A` response. if let ProtocolError::InvalidMessage = &err { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with invalid \ message after protocol rejection." ); @@ -194,7 +194,7 @@ where } if let ProtocolError::IoError(e) = &err { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!( + tracing::trace!( "Listener: Negotiation failed with EOF \ after protocol rejection." ); @@ -228,10 +228,10 @@ where }); let message = if protocol.is_some() { - log::debug!("Listener: confirming protocol: {}", p); + tracing::debug!(protocol=%p, "Listener: confirming protocol"); Message::Protocol(p.clone()) } else { - log::debug!("Listener: rejecting protocol: {}", p.as_ref()); + tracing::debug!(protocol=%p.as_ref(), "Listener: rejecting protocol"); Message::NotAvailable }; @@ -287,9 +287,9 @@ where // Otherwise expect to receive another message. match protocol { Some(protocol) => { - log::debug!( - "Listener: sent confirmed protocol: {}", - protocol.as_ref() + tracing::debug!( + protocol=%protocol.as_ref(), + "Listener: sent confirmed protocol" ); let io = Negotiated::completed(io.into_inner()); return Poll::Ready(Ok((protocol, io))); diff --git a/misc/multistream-select/src/negotiated.rs b/misc/multistream-select/src/negotiated.rs index 941b60765ca..a24014a4f5f 100644 --- a/misc/multistream-select/src/negotiated.rs +++ b/misc/multistream-select/src/negotiated.rs @@ -171,7 +171,7 @@ impl Negotiated { if let Message::Protocol(p) = &msg { if p.as_ref() == protocol.as_ref() { - log::debug!("Negotiated: Received confirmation for protocol: {}", p); + tracing::debug!(protocol=%p, "Negotiated: Received confirmation for protocol"); *this.state = State::Completed { io: io.into_inner(), }; @@ -317,7 +317,7 @@ where StateProj::Expecting { io, .. } => { let close_poll = io.poll_close(cx); if let Poll::Ready(Ok(())) = close_poll { - log::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending.") + tracing::debug!("Stream closed. Confirmation from remote for optimstic protocol negotiation still pending") } close_poll } diff --git a/misc/multistream-select/src/protocol.rs b/misc/multistream-select/src/protocol.rs index be2f3122da0..d5c2bfa773a 100644 --- a/misc/multistream-select/src/protocol.rs +++ b/misc/multistream-select/src/protocol.rs @@ -403,7 +403,7 @@ where return Poll::Ready(None); }; - log::trace!("Received message: {:?}", msg); + tracing::trace!(message=?msg, "Received message"); Poll::Ready(Some(Ok(msg))) } diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index efaa43a8658..e69c4f0b5cd 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -13,17 +13,17 @@ license = "MIT" [dependencies] base64 = "0.21" clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" futures = "0.3" futures-timer = "3" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] } -log = "0.4" prometheus-client = { workspace = true } serde = "1.0.190" serde_derive = "1.0.125" serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } zeroize = "1" [lints] diff --git a/misc/server/src/http_service.rs b/misc/server/src/http_service.rs index 1f5ebaff593..7905933fbf5 100644 --- a/misc/server/src/http_service.rs +++ b/misc/server/src/http_service.rs @@ -21,7 +21,6 @@ use hyper::http::StatusCode; use hyper::service::Service; use hyper::{Body, Method, Request, Response, Server}; -use log::info; use prometheus_client::encoding::text::encode; use prometheus_client::registry::Registry; use std::future::Future; @@ -38,11 +37,7 @@ pub(crate) async fn metrics_server( let addr = ([0, 0, 0, 0], 8888).into(); let server = Server::bind(&addr).serve(MakeMetricService::new(registry, metrics_path.clone())); - info!( - "Metrics server on http://{}{}", - server.local_addr(), - metrics_path - ); + tracing::info!(metrics_server=%format!("http://{}{}", server.local_addr(), metrics_path)); server.await?; Ok(()) } diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index 0573aae5c6f..d42675ec5f9 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -9,7 +9,6 @@ use libp2p::metrics::{Metrics, Recorder}; use libp2p::swarm::SwarmEvent; use libp2p::tcp; use libp2p::{identify, noise, yamux}; -use log::{debug, info, warn}; use prometheus_client::metrics::info::Info; use prometheus_client::registry::Registry; use std::error::Error; @@ -17,6 +16,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::task::Poll; use std::time::Duration; +use tracing_subscriber::EnvFilter; use zeroize::Zeroizing; mod behaviour; @@ -47,7 +47,9 @@ struct Opts { #[tokio::main] async fn main() -> Result<(), Box> { - env_logger::init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opt = Opts::parse(); @@ -84,25 +86,25 @@ async fn main() -> Result<(), Box> { .build(); if config.addresses.swarm.is_empty() { - warn!("No listen addresses configured."); + tracing::warn!("No listen addresses configured"); } for address in &config.addresses.swarm { match swarm.listen_on(address.clone()) { Ok(_) => {} Err(e @ libp2p::TransportError::MultiaddrNotSupported(_)) => { - warn!("Failed to listen on {address}, continuing anyways, {e}") + tracing::warn!(%address, "Failed to listen on address, continuing anyways, {e}") } Err(e) => return Err(e.into()), } } if config.addresses.append_announce.is_empty() { - warn!("No external addresses configured."); + tracing::warn!("No external addresses configured"); } for address in &config.addresses.append_announce { swarm.add_external_address(address.clone()) } - info!( + tracing::info!( "External addresses: {:?}", swarm.external_addresses().collect::>() ); @@ -117,7 +119,7 @@ async fn main() -> Result<(), Box> { ); tokio::spawn(async move { if let Err(e) = http_service::metrics_server(metric_registry, opt.metrics_path).await { - log::error!("Metrics server failed: {e}"); + tracing::error!("Metrics server failed: {e}"); } }); @@ -137,7 +139,7 @@ async fn main() -> Result<(), Box> { metrics.record(&event); match event { SwarmEvent::Behaviour(behaviour::BehaviourEvent::Identify(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); metrics.record(&e); if let identify::Event::Received { @@ -162,24 +164,24 @@ async fn main() -> Result<(), Box> { } } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Ping(e)) => { - debug!("{:?}", e); + tracing::debug!("{:?}", e); metrics.record(&e); } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Kademlia(e)) => { - debug!("{:?}", e); + tracing::debug!("{:?}", e); metrics.record(&e); } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Relay(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); metrics.record(&e) } SwarmEvent::Behaviour(behaviour::BehaviourEvent::Autonat(e)) => { - info!("{:?}", e); + tracing::info!("{:?}", e); // TODO: Add metric recording for `NatStatus`. // metrics.record(&e) } SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address:?}"); + tracing::info!(%address, "Listening on address"); } _ => {} } diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml index 4401ef9bc44..868ab8db8fb 100644 --- a/misc/webrtc-utils/Cargo.toml +++ b/misc/webrtc-utils/Cargo.toml @@ -11,13 +11,13 @@ version = "0.1.0" publish = true [dependencies] +asynchronous-codec = "0.6" bytes = "1" futures = "0.3" hex = "0.4" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } -log = "0.4.19" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" @@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"] } sha2 = "0.10.8" thiserror = "1" tinytemplate = "1.2" -asynchronous-codec = "0.6" +tracing = "0.1.37" [dev-dependencies] hex-literal = "0.4" diff --git a/misc/webrtc-utils/src/sdp.rs b/misc/webrtc-utils/src/sdp.rs index 7c4facaf27e..0796548f449 100644 --- a/misc/webrtc-utils/src/sdp.rs +++ b/misc/webrtc-utils/src/sdp.rs @@ -34,7 +34,7 @@ pub fn answer(addr: SocketAddr, server_fingerprint: Fingerprint, client_ufrag: & client_ufrag, ); - log::trace!("Created SDP answer: {answer}"); + tracing::trace!(%answer, "Created SDP answer"); answer } diff --git a/misc/webrtc-utils/src/stream/drop_listener.rs b/misc/webrtc-utils/src/stream/drop_listener.rs index b638ea84b09..9745e3d4364 100644 --- a/misc/webrtc-utils/src/stream/drop_listener.rs +++ b/misc/webrtc-utils/src/stream/drop_listener.rs @@ -79,7 +79,7 @@ where return Poll::Ready(Ok(())); } Poll::Ready(Err(Canceled)) => { - log::info!("Stream dropped without graceful close, sending Reset"); + tracing::info!("Stream dropped without graceful close, sending Reset"); *state = State::SendingReset { stream }; continue; } diff --git a/muxers/mplex/Cargo.toml b/muxers/mplex/Cargo.toml index c4286e16169..c38b11dca9e 100644 --- a/muxers/mplex/Cargo.toml +++ b/muxers/mplex/Cargo.toml @@ -16,23 +16,23 @@ futures = "0.3.29" asynchronous-codec = "0.6" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" nohash-hasher = "0.2" parking_lot = "0.12" rand = "0.8" smallvec = "1.11.1" +tracing = "0.1.37" unsigned-varint = { version = "0.7", features = ["asynchronous_codec"] } [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } criterion = "0.5" -env_logger = "0.10" futures = "0.3" libp2p-identity = { workspace = true, features = ["rand"] } libp2p-muxer-test-harness = { path = "../test-harness" } libp2p-plaintext = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[bench]] name = "split_send_size" diff --git a/muxers/mplex/benches/split_send_size.rs b/muxers/mplex/benches/split_send_size.rs index 86f84ceab2c..9a9814d2f2a 100644 --- a/muxers/mplex/benches/split_send_size.rs +++ b/muxers/mplex/benches/split_send_size.rs @@ -35,6 +35,7 @@ use libp2p_mplex as mplex; use libp2p_plaintext as plaintext; use std::pin::Pin; use std::time::Duration; +use tracing_subscriber::EnvFilter; type BenchTransport = transport::Boxed<(PeerId, muxing::StreamMuxerBox)>; @@ -51,7 +52,9 @@ const BENCH_SIZES: [usize; 8] = [ ]; fn prepare(c: &mut Criterion) { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let payload: Vec = vec![1; 1024 * 1024]; diff --git a/muxers/mplex/src/io.rs b/muxers/mplex/src/io.rs index 8002ad383d6..0dd8b9ea6a9 100644 --- a/muxers/mplex/src/io.rs +++ b/muxers/mplex/src/io.rs @@ -24,7 +24,6 @@ use asynchronous_codec::Framed; use bytes::Bytes; use futures::task::{waker_ref, ArcWake, AtomicWaker, WakerRef}; use futures::{prelude::*, ready, stream::Fuse}; -use log::{debug, trace}; use nohash_hasher::{IntMap, IntSet}; use parking_lot::Mutex; use smallvec::SmallVec; @@ -117,7 +116,7 @@ where /// Creates a new multiplexed I/O stream. pub(crate) fn new(io: C, config: MplexConfig) -> Self { let id = ConnectionId(rand::random()); - debug!("New multiplexed connection: {}", id); + tracing::debug!(connection=%id, "New multiplexed connection"); Multiplexed { id, config, @@ -254,9 +253,11 @@ where // Check the stream limits. if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams reached ({})", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + total_substreams=%self.substreams.len(), + max_substreams=%self.config.max_substreams, + "Maximum number of substreams reached" ); self.notifier_open.register(cx.waker()); return Poll::Pending; @@ -276,11 +277,11 @@ where buf: Default::default(), }, ); - debug!( - "{}: New outbound substream: {} (total {})", - self.id, - stream_id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%stream_id, + total_substreams=%self.substreams.len(), + "New outbound substream" ); // The flush is delayed and the `Open` frame may be sent // together with other frames in the same transport packet. @@ -348,7 +349,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending close for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending close for substream" + ); self.pending_frames .push_front(Frame::Close { stream_id: id }); } @@ -356,7 +361,11 @@ where if self.check_max_pending_frames().is_err() { return; } - trace!("{}: Pending reset for stream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Pending reset for substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -476,11 +485,11 @@ where frame @ Frame::Open { .. } => { if let Some(id) = self.on_open(frame.remote_id())? { self.open_buffer.push_front(id); - trace!( - "{}: Buffered new inbound stream {} (total: {})", - self.id, - id, - self.open_buffer.len() + tracing::trace!( + connection=%self.id, + inbound_stream=%id, + inbound_buffer_len=%self.open_buffer.len(), + "Buffered new inbound stream" ); self.notifier_read.wake_next_stream(); } @@ -516,7 +525,11 @@ where self.guard_open()?; ready!(self.poll_flush(cx))?; - trace!("{}: Flushed substream {}", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Flushed substream" + ); Poll::Ready(Ok(())) } @@ -554,7 +567,11 @@ where self.substreams.insert(id, SubstreamState::Open { buf }); Poll::Pending } else { - debug!("{}: Closed substream {} (half-close)", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream (half-close)" + ); self.substreams .insert(id, SubstreamState::SendClosed { buf }); Poll::Ready(Ok(())) @@ -569,7 +586,11 @@ where .insert(id, SubstreamState::RecvClosed { buf }); Poll::Pending } else { - debug!("{}: Closed substream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Closed substream" + ); self.substreams.insert(id, SubstreamState::Closed { buf }); Poll::Ready(Ok(())) } @@ -589,7 +610,7 @@ where match ready!(self.io.poll_ready_unpin(&mut Context::from_waker(&waker))) { Ok(()) => { let frame = frame(); - trace!("{}: Sending {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Sending frame"); match self.io.start_send_unpin(frame) { Ok(()) => Poll::Ready(Ok(())), Err(e) => Poll::Ready(self.on_error(e)), @@ -618,7 +639,11 @@ where // Perform any pending flush before reading. if let Some(id) = &stream_id { if self.pending_flush_open.contains(id) { - trace!("{}: Executing pending flush for {}.", self.id, id); + tracing::trace!( + connection=%self.id, + substream=%id, + "Executing pending flush for substream" + ); ready!(self.poll_flush(cx))?; self.pending_flush_open = Default::default(); } @@ -634,9 +659,9 @@ where if !self.notifier_read.wake_read_stream(*blocked_id) { // No task dedicated to the blocked stream woken, so schedule // this task again to have a chance at progress. - trace!( - "{}: No task to read from blocked stream. Waking current task.", - self.id + tracing::trace!( + connection=%self.id, + "No task to read from blocked stream. Waking current task." ); cx.waker().clone().wake(); } else if let Some(id) = stream_id { @@ -664,7 +689,7 @@ where }; match ready!(self.io.poll_next_unpin(&mut Context::from_waker(&waker))) { Some(Ok(frame)) => { - trace!("{}: Received {:?}", self.id, frame); + tracing::trace!(connection=%self.id, ?frame, "Received frame"); Poll::Ready(Ok(frame)) } Some(Err(e)) => Poll::Ready(self.on_error(e)), @@ -677,9 +702,10 @@ where let id = id.into_local(); if self.substreams.contains_key(&id) { - debug!( - "{}: Received unexpected `Open` frame for open substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Received unexpected `Open` frame for open substream", ); return self.on_error(io::Error::new( io::ErrorKind::Other, @@ -688,12 +714,17 @@ where } if self.substreams.len() >= self.config.max_substreams { - debug!( - "{}: Maximum number of substreams exceeded: {}", - self.id, self.config.max_substreams + tracing::debug!( + connection=%self.id, + max_substreams=%self.config.max_substreams, + "Maximum number of substreams exceeded" ); self.check_max_pending_frames()?; - debug!("{}: Pending reset for new stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for new substream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); return Ok(None); @@ -706,11 +737,11 @@ where }, ); - debug!( - "{}: New inbound substream: {} (total {})", - self.id, - id, - self.substreams.len() + tracing::debug!( + connection=%self.id, + substream=%id, + total_substreams=%self.substreams.len(), + "New inbound substream" ); Ok(Some(id)) @@ -721,23 +752,27 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::Closed { .. } => { - trace!( - "{}: Ignoring reset for mutually closed substream {}.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring reset for mutually closed substream" ); } SubstreamState::Reset { .. } => { - trace!( - "{}: Ignoring redundant reset for already reset substream {}", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring redundant reset for already reset substream" ); } SubstreamState::RecvClosed { buf } | SubstreamState::SendClosed { buf } | SubstreamState::Open { buf } => { - debug!("{}: Substream {} reset by remote.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream reset by remote" + ); self.substreams.insert(id, SubstreamState::Reset { buf }); // Notify tasks interested in reading from that stream, // so they may read the EOF. @@ -745,10 +780,10 @@ where } } } else { - trace!( - "{}: Ignoring `Reset` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Reset` for unknown substream, possibly dropped earlier" ); } } @@ -758,32 +793,36 @@ where if let Some(state) = self.substreams.remove(&id) { match state { SubstreamState::RecvClosed { .. } | SubstreamState::Closed { .. } => { - debug!( - "{}: Ignoring `Close` frame for closed substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for closed substream" ); self.substreams.insert(id, state); } SubstreamState::Reset { buf } => { - debug!( - "{}: Ignoring `Close` frame for already reset substream {}", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Ignoring `Close` frame for already reset substream" ); self.substreams.insert(id, SubstreamState::Reset { buf }); } SubstreamState::SendClosed { buf } => { - debug!( - "{}: Substream {} closed by remote (SendClosed -> Closed).", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (SendClosed -> Closed)" ); self.substreams.insert(id, SubstreamState::Closed { buf }); // Notify tasks interested in reading, so they may read the EOF. self.notifier_read.wake_read_stream(id); } SubstreamState::Open { buf } => { - debug!( - "{}: Substream {} closed by remote (Open -> RecvClosed)", - self.id, id + tracing::debug!( + connection=%self.id, + substream=%id, + "Substream closed by remote (Open -> RecvClosed)" ); self.substreams .insert(id, SubstreamState::RecvClosed { buf }); @@ -792,10 +831,10 @@ where } } } else { - trace!( - "{}: Ignoring `Close` for unknown substream {}. Possibly dropped earlier.", - self.id, - id + tracing::trace!( + connection=%self.id, + substream=%id, + "Ignoring `Close` for unknown substream, possibly dropped earlier." ); } } @@ -829,7 +868,11 @@ where /// Records a fatal error for the multiplexed I/O stream. fn on_error(&mut self, e: io::Error) -> io::Result { - debug!("{}: Multiplexed connection failed: {:?}", self.id, e); + tracing::debug!( + connection=%self.id, + "Multiplexed connection failed: {:?}", + e + ); self.status = Status::Err(io::Error::new(e.kind(), e.to_string())); self.pending_frames = Default::default(); self.substreams = Default::default(); @@ -872,11 +915,11 @@ where let state = if let Some(state) = self.substreams.get_mut(&id) { state } else { - trace!( - "{}: Dropping data {:?} for unknown substream {}", - self.id, - data, - id + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for unknown substream" ); return Ok(()); }; @@ -884,33 +927,41 @@ where let buf = if let Some(buf) = state.recv_buf_open() { buf } else { - trace!( - "{}: Dropping data {:?} for closed or reset substream {}", - self.id, - data, - id + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + "Dropping data for closed or reset substream", ); return Ok(()); }; debug_assert!(buf.len() <= self.config.max_buffer_len); - trace!( - "{}: Buffering {:?} for stream {} (total: {})", - self.id, - data, - id, - buf.len() + 1 + tracing::trace!( + connection=%self.id, + substream=%id, + data=?data, + data_buffer=%buf.len() + 1, + "Buffering data for substream" ); buf.push(data); self.notifier_read.wake_read_stream(id); if buf.len() > self.config.max_buffer_len { - debug!("{}: Frame buffer of stream {} is full.", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Frame buffer of substream is full" + ); match self.config.max_buffer_behaviour { MaxBufferBehaviour::ResetStream => { let buf = buf.clone(); self.check_max_pending_frames()?; self.substreams.insert(id, SubstreamState::Reset { buf }); - debug!("{}: Pending reset for stream {}", self.id, id); + tracing::debug!( + connection=%self.id, + substream=%id, + "Pending reset for stream" + ); self.pending_frames .push_front(Frame::Reset { stream_id: id }); } @@ -1179,7 +1230,10 @@ mod tests { #[test] fn max_buffer_behaviour() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, overflow: NonZeroU8) { let mut r_buf = BytesMut::new(); @@ -1314,7 +1368,10 @@ mod tests { #[test] fn close_on_error() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(cfg: MplexConfig, num_streams: NonZeroU8) { let num_streams = cmp::min(cfg.max_substreams, num_streams.get() as usize); diff --git a/muxers/test-harness/Cargo.toml b/muxers/test-harness/Cargo.toml index 7eee7b3150b..05cef9ad49f 100644 --- a/muxers/test-harness/Cargo.toml +++ b/muxers/test-harness/Cargo.toml @@ -13,9 +13,9 @@ release = false [dependencies] libp2p-core = { workspace = true } futures = "0.3.29" -log = "0.4" futures-timer = "3.0.2" futures_ringbuf = "0.4.0" +tracing = "0.1.37" [lints] workspace = true diff --git a/muxers/test-harness/src/lib.rs b/muxers/test-harness/src/lib.rs index 233fe3a478c..16c71f414f0 100644 --- a/muxers/test-harness/src/lib.rs +++ b/muxers/test-harness/src/lib.rs @@ -149,20 +149,20 @@ async fn run( loop { match futures::future::select(dialer.next(), listener.next()).await { Either::Left((Some(Event::SetupComplete), _)) => { - log::info!("Dialer opened outbound stream"); + tracing::info!("Dialer opened outbound stream"); } Either::Left((Some(Event::ProtocolComplete), _)) => { - log::info!("Dialer completed protocol"); + tracing::info!("Dialer completed protocol"); dialer_complete = true } Either::Left((Some(Event::Timeout), _)) => { panic!("Dialer protocol timed out"); } Either::Right((Some(Event::SetupComplete), _)) => { - log::info!("Listener received inbound stream"); + tracing::info!("Listener received inbound stream"); } Either::Right((Some(Event::ProtocolComplete), _)) => { - log::info!("Listener completed protocol"); + tracing::info!("Listener completed protocol"); listener_complete = true } Either::Right((Some(Event::Timeout), _)) => { diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index c7c08365090..ec3d4b85c5b 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -15,7 +15,7 @@ futures = "0.3.29" libp2p-core = { workspace = true } thiserror = "1.0" yamux = "0.12" -log = "0.4" +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.7.0", features = ["attributes"] } diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index 073a5723d2e..d10cdfa244c 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -81,6 +81,7 @@ where type Substream = Stream; type Error = Error; + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_inbound", skip(self, cx))] fn poll_inbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -97,6 +98,7 @@ where Poll::Pending } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_outbound", skip(self, cx))] fn poll_outbound( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -106,12 +108,14 @@ where Poll::Ready(Ok(Stream(stream))) } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll_close", skip(self, cx))] fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { ready!(self.connection.poll_close(cx).map_err(Error)?); Poll::Ready(Ok(())) } + #[tracing::instrument(level = "trace", name = "StreamMuxer::poll", skip(self, cx))] fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -121,7 +125,10 @@ where let inbound_stream = ready!(this.poll_inner(cx))?; if this.inbound_stream_buffer.len() >= MAX_BUFFERED_INBOUND_STREAMS { - log::warn!("dropping {} because buffer is full", inbound_stream.0); + tracing::warn!( + stream=%inbound_stream.0, + "dropping stream because buffer is full" + ); drop(inbound_stream); } else { this.inbound_stream_buffer.push_back(inbound_stream); diff --git a/protocols/autonat/Cargo.toml b/protocols/autonat/Cargo.toml index 9acad187586..cadddfa91fd 100644 --- a/protocols/autonat/Cargo.toml +++ b/protocols/autonat/Cargo.toml @@ -19,16 +19,16 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-request-response = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" -rand = "0.8" quick-protobuf = "0.8" +rand = "0.8" +tracing = "0.1.37" quick-protobuf-codec = { workspace = true } asynchronous-codec = "0.6.2" [dev-dependencies] async-std = { version = "1.10", features = ["attributes"] } -env_logger = "0.10" libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/autonat/src/behaviour.rs b/protocols/autonat/src/behaviour.rs index e9a73fd3fcb..06c945eb888 100644 --- a/protocols/autonat/src/behaviour.rs +++ b/protocols/autonat/src/behaviour.rs @@ -435,6 +435,7 @@ impl NetworkBehaviour for Behaviour { as NetworkBehaviour>::ConnectionHandler; type ToSwarm = Event; + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/autonat/src/behaviour/as_client.rs b/protocols/autonat/src/behaviour/as_client.rs index 6f37d32620b..7c9242d47cc 100644 --- a/protocols/autonat/src/behaviour/as_client.rs +++ b/protocols/autonat/src/behaviour/as_client.rs @@ -112,7 +112,7 @@ impl<'a> HandleInnerEvent for AsClient<'a> { response, }, } => { - log::debug!("Outbound dial-back request returned {:?}.", response); + tracing::debug!(?response, "Outbound dial-back request returned response"); let probe_id = self .ongoing_outbound @@ -154,10 +154,10 @@ impl<'a> HandleInnerEvent for AsClient<'a> { error, request_id, } => { - log::debug!( - "Outbound Failure {} when on dial-back request to peer {}.", + tracing::debug!( + %peer, + "Outbound Failure {} when on dial-back request to peer.", error, - peer ); let probe_id = self .ongoing_outbound @@ -275,13 +275,13 @@ impl<'a> AsClient<'a> { ) -> Result { let _ = self.last_probe.insert(Instant::now()); if addresses.is_empty() { - log::debug!("Outbound dial-back request aborted: No dial-back addresses."); + tracing::debug!("Outbound dial-back request aborted: No dial-back addresses"); return Err(OutboundProbeError::NoAddresses); } let server = match self.random_server() { Some(s) => s, None => { - log::debug!("Outbound dial-back request aborted: No qualified server."); + tracing::debug!("Outbound dial-back request aborted: No qualified server"); return Err(OutboundProbeError::NoServer); } }; @@ -293,7 +293,7 @@ impl<'a> AsClient<'a> { }, ); self.throttled_servers.push((server, Instant::now())); - log::debug!("Send dial-back request to peer {}.", server); + tracing::debug!(peer=%server, "Send dial-back request to peer"); self.ongoing_outbound.insert(request_id, probe_id); Ok(server) } @@ -344,10 +344,10 @@ impl<'a> AsClient<'a> { return None; } - log::debug!( - "Flipped assumed NAT status from {:?} to {:?}", - self.nat_status, - reported_status + tracing::debug!( + old_status=?self.nat_status, + new_status=?reported_status, + "Flipped assumed NAT status" ); let old_status = self.nat_status.clone(); diff --git a/protocols/autonat/src/behaviour/as_server.rs b/protocols/autonat/src/behaviour/as_server.rs index 65c9738647e..6185ecc50e2 100644 --- a/protocols/autonat/src/behaviour/as_server.rs +++ b/protocols/autonat/src/behaviour/as_server.rs @@ -110,9 +110,9 @@ impl<'a> HandleInnerEvent for AsServer<'a> { let probe_id = self.probe_id.next(); match self.resolve_inbound_request(peer, request) { Ok(addrs) => { - log::debug!( - "Inbound dial request from Peer {} with dial-back addresses {:?}.", - peer, + tracing::debug!( + %peer, + "Inbound dial request from peer with dial-back addresses {:?}", addrs ); @@ -140,10 +140,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { ]) } Err((status_text, error)) => { - log::debug!( - "Reject inbound dial request from peer {}: {}.", - peer, - status_text + tracing::debug!( + %peer, + status=%status_text, + "Reject inbound dial request from peer" ); let response = DialResponse { @@ -167,10 +167,10 @@ impl<'a> HandleInnerEvent for AsServer<'a> { error, request_id, } => { - log::debug!( - "Inbound Failure {} when on dial-back request from peer {}.", - error, - peer + tracing::debug!( + %peer, + "Inbound Failure {} when on dial-back request from peer", + error ); let probe_id = match self.ongoing_inbound.get(&peer) { @@ -206,10 +206,10 @@ impl<'a> AsServer<'a> { return None; } - log::debug!( - "Dial-back to peer {} succeeded at addr {:?}.", - peer, - address + tracing::debug!( + %peer, + %address, + "Dial-back to peer succeeded" ); let (probe_id, _, _, channel) = self.ongoing_inbound.remove(peer).unwrap(); @@ -232,11 +232,19 @@ impl<'a> AsServer<'a> { error: &DialError, ) -> Option { let (probe_id, _, _, channel) = peer.and_then(|p| self.ongoing_inbound.remove(&p))?; - log::debug!( - "Dial-back to peer {} failed with error {:?}.", - peer.unwrap(), - error - ); + + match peer { + Some(p) => tracing::debug!( + peer=%p, + "Dial-back to peer failed with error {:?}", + error + ), + None => tracing::debug!( + "Dial-back to non existent peer failed with error {:?}", + error + ), + }; + let response_error = ResponseError::DialError; let response = DialResponse { result: Err(response_error.clone()), diff --git a/protocols/autonat/src/protocol.rs b/protocols/autonat/src/protocol.rs index 904af6473e2..b28f70cadf4 100644 --- a/protocols/autonat/src/protocol.rs +++ b/protocols/autonat/src/protocol.rs @@ -129,7 +129,7 @@ impl DialRequest { { (peer_id, addrs) } else { - log::debug!("Received malformed dial message."); + tracing::debug!("Received malformed dial message"); return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid dial message", @@ -146,7 +146,7 @@ impl DialRequest { .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) @@ -207,7 +207,7 @@ impl TryFrom for ResponseError { proto::ResponseStatus::E_BAD_REQUEST => Ok(ResponseError::BadRequest), proto::ResponseStatus::E_INTERNAL_ERROR => Ok(ResponseError::InternalError), proto::ResponseStatus::OK => { - log::debug!("Received response with status code OK but expected error."); + tracing::debug!("Received response with status code OK but expected error"); Err(io::Error::new( io::ErrorKind::InvalidData, "invalid response error type", @@ -251,7 +251,7 @@ impl DialResponse { result: Err(ResponseError::try_from(status)?), }, _ => { - log::debug!("Received malformed response message."); + tracing::debug!("Received malformed response message"); return Err(io::Error::new( io::ErrorKind::InvalidData, "invalid dial response message", diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 0e59585a416..9079f4f8a97 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -19,10 +19,10 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } thiserror = "1.0" +tracing = "0.1.37" void = "1" lru = "0.11.1" futures-bounded = { workspace = true } @@ -30,7 +30,6 @@ futures-bounded = { workspace = true } [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } libp2p-noise = { workspace = true } @@ -42,6 +41,7 @@ libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } rand = "0.8" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index 6aecc596c71..d0b46abb0b4 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -268,9 +268,7 @@ impl NetworkBehaviour for Behaviour { match handler_event { Either::Left(handler::relayed::Event::InboundConnectNegotiated { remote_addrs }) => { - log::debug!( - "Attempting to hole-punch as dialer to {event_source} using {remote_addrs:?}" - ); + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as dialer"); let opts = DialOpts::peer_id(event_source) .addresses(remote_addrs) @@ -302,9 +300,7 @@ impl NetworkBehaviour for Behaviour { // Maybe treat these as transient and retry? } Either::Left(handler::relayed::Event::OutboundConnectNegotiated { remote_addrs }) => { - log::debug!( - "Attempting to hole-punch as listener to {event_source} using {remote_addrs:?}" - ); + tracing::debug!(target=%event_source, addresses=?remote_addrs, "Attempting to hole-punch as dialer"); let opts = DialOpts::peer_id(event_source) .condition(dial_opts::PeerCondition::Always) diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 9d600d234e5..b4daefce15f 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -110,7 +110,7 @@ impl Handler { )) .is_err() { - log::warn!( + tracing::warn!( "New inbound connect stream while still upgrading previous one. Replacing previous with new.", ); } @@ -142,7 +142,7 @@ impl Handler { )) .is_err() { - log::warn!( + tracing::warn!( "New outbound connect stream while still upgrading previous one. Replacing previous with new.", ); } @@ -224,6 +224,7 @@ impl ConnectionHandler for Handler { false } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/dcutr/src/protocol/inbound.rs b/protocols/dcutr/src/protocol/inbound.rs index 95665843724..b8f90daf3a1 100644 --- a/protocols/dcutr/src/protocol/inbound.rs +++ b/protocols/dcutr/src/protocol/inbound.rs @@ -50,14 +50,14 @@ pub(crate) async fn handshake( .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) // Filter out relayed addresses. .filter(|a| { if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); + tracing::debug!(address=%a, "Dropping relayed address"); false } else { true diff --git a/protocols/dcutr/src/protocol/outbound.rs b/protocols/dcutr/src/protocol/outbound.rs index 67c7116d706..d9cb60a01f6 100644 --- a/protocols/dcutr/src/protocol/outbound.rs +++ b/protocols/dcutr/src/protocol/outbound.rs @@ -68,14 +68,14 @@ pub(crate) async fn handshake( .filter_map(|a| match Multiaddr::try_from(a.to_vec()) { Ok(a) => Some(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); None } }) // Filter out relayed addresses. .filter(|a| { if a.iter().any(|p| p == Protocol::P2pCircuit) { - log::debug!("Dropping relayed address {a}"); + tracing::debug!(address=%a, "Dropping relayed address"); false } else { true diff --git a/protocols/dcutr/tests/lib.rs b/protocols/dcutr/tests/lib.rs index 1c5ddb5a972..a939fbccd11 100644 --- a/protocols/dcutr/tests/lib.rs +++ b/protocols/dcutr/tests/lib.rs @@ -30,10 +30,13 @@ use libp2p_relay as relay; use libp2p_swarm::{Config, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut relay = build_relay(); let mut dst = build_client(); diff --git a/protocols/floodsub/Cargo.toml b/protocols/floodsub/Cargo.toml index 7acdd851655..9d5776c56b7 100644 --- a/protocols/floodsub/Cargo.toml +++ b/protocols/floodsub/Cargo.toml @@ -19,12 +19,12 @@ futures = "0.3.29" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" smallvec = "1.11.1" thiserror = "1.0.50" +tracing = "0.1.37" # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 5b6b89fea87..7fa9f3001b1 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -34,7 +34,6 @@ use libp2p_swarm::{ dial_opts::DialOpts, CloseConnection, ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, OneShotHandler, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::warn; use smallvec::SmallVec; use std::collections::hash_map::{DefaultHasher, HashMap}; use std::task::{Context, Poll}; @@ -224,7 +223,7 @@ impl Floodsub { .any(|t| message.topics.iter().any(|u| t == u)); if self_subscribed { if let Err(e @ CuckooError::NotEnoughSpace) = self.received.add(&message) { - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, @@ -363,7 +362,7 @@ impl NetworkBehaviour for Floodsub { Ok(InnerMessage::Rx(event)) => event, Ok(InnerMessage::Sent) => return, Err(e) => { - log::debug!("Failed to send floodsub message: {e}"); + tracing::debug!("Failed to send floodsub message: {e}"); self.events.push_back(ToSwarm::CloseConnection { peer_id: propagation_source, connection: CloseConnection::One(connection_id), @@ -415,7 +414,7 @@ impl NetworkBehaviour for Floodsub { Ok(false) => continue, // Message already existed. Err(e @ CuckooError::NotEnoughSpace) => { // Message added, but some other removed. - warn!( + tracing::warn!( "Message was added to 'received' Cuckoofilter but some \ other message was removed as a consequence: {}", e, diff --git a/protocols/gossipsub/Cargo.toml b/protocols/gossipsub/Cargo.toml index d76f9a3e364..1c2758e44b4 100644 --- a/protocols/gossipsub/Cargo.toml +++ b/protocols/gossipsub/Cargo.toml @@ -28,7 +28,6 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } libp2p-swarm = { workspace = true } -log = "0.4.20" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" @@ -36,6 +35,7 @@ regex = "1.10.2" serde = { version = "1", optional = true, features = ["derive"] } sha2 = "0.10.8" smallvec = "1.11.1" +tracing = "0.1.37" unsigned-varint = { version = "0.7.2", features = ["asynchronous_codec"] } void = "1.0.2" @@ -44,13 +44,13 @@ prometheus-client = { workspace = true } [dev-dependencies] async-std = { version = "1.6.3", features = ["unstable"] } -env_logger = "0.10.0" hex = "0.4.2" libp2p-core = { workspace = true } libp2p-yamux = { workspace = true } libp2p-noise = { workspace = true } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 2a3a13ea6e7..f1069658b73 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -31,7 +31,6 @@ use std::{ use futures::StreamExt; use futures_ticker::Ticker; -use log::{debug, error, trace, warn}; use prometheus_client::registry::Registry; use rand::{seq::SliceRandom, thread_rng}; @@ -523,14 +522,14 @@ where /// Returns [`Ok(true)`] if the subscription worked. Returns [`Ok(false)`] if we were already /// subscribed. pub fn subscribe(&mut self, topic: &Topic) -> Result { - debug!("Subscribing to topic: {}", topic); + tracing::debug!(%topic, "Subscribing to topic"); let topic_hash = topic.hash(); if !self.subscription_filter.can_subscribe(&topic_hash) { return Err(SubscriptionError::NotAllowed); } if self.mesh.get(&topic_hash).is_some() { - debug!("Topic: {} is already in the mesh.", topic); + tracing::debug!(%topic, "Topic is already in the mesh"); return Ok(false); } @@ -548,7 +547,7 @@ where .into_protobuf(); for peer in peer_list { - debug!("Sending SUBSCRIBE to peer: {:?}", peer); + tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); self.send_message(peer, event.clone()) .map_err(SubscriptionError::PublishError)?; } @@ -557,7 +556,7 @@ where // call JOIN(topic) // this will add new peers to the mesh for the topic self.join(&topic_hash); - debug!("Subscribed to topic: {}", topic); + tracing::debug!(%topic, "Subscribed to topic"); Ok(true) } @@ -565,11 +564,11 @@ where /// /// Returns [`Ok(true)`] if we were subscribed to this topic. pub fn unsubscribe(&mut self, topic: &Topic) -> Result { - debug!("Unsubscribing from topic: {}", topic); + tracing::debug!(%topic, "Unsubscribing from topic"); let topic_hash = topic.hash(); if self.mesh.get(&topic_hash).is_none() { - debug!("Already unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); // we are not subscribed return Ok(false); } @@ -588,7 +587,7 @@ where .into_protobuf(); for peer in peer_list { - debug!("Sending UNSUBSCRIBE to peer: {}", peer.to_string()); + tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); self.send_message(peer, event.clone())?; } } @@ -597,7 +596,7 @@ where // this will remove the topic from the mesh self.leave(&topic_hash); - debug!("Unsubscribed from topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); Ok(true) } @@ -641,14 +640,14 @@ where if self.duplicate_cache.contains(&msg_id) { // This message has already been seen. We don't re-publish messages that have already // been published on the network. - warn!( - "Not publishing a message that has already been published. Msg-id {}", - msg_id + tracing::warn!( + message=%msg_id, + "Not publishing a message that has already been published" ); return Err(PublishError::Duplicate); } - trace!("Publishing message: {:?}", msg_id); + tracing::trace!(message=%msg_id, "Publishing message"); let topic_hash = raw_message.topic.clone(); @@ -689,7 +688,7 @@ where // Gossipsub peers if self.mesh.get(&topic_hash).is_none() { - debug!("Topic: {:?} not in the mesh", topic_hash); + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); // If we have fanout peers add them to the map. if self.fanout.contains_key(&topic_hash) { for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { @@ -715,7 +714,7 @@ where // Add the new peers to the fanout and recipient peers self.fanout.insert(topic_hash.clone(), new_peers.clone()); for peer in new_peers { - debug!("Peer added to fanout: {:?}", peer); + tracing::debug!(%peer, "Peer added to fanout"); recipient_peers.insert(peer); } } @@ -746,7 +745,7 @@ where // Send to peers we know are subscribed to the topic. let msg_bytes = event.get_size(); for peer_id in recipient_peers.iter() { - trace!("Sending message to peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Sending message to peer"); self.send_message(*peer_id, event.clone())?; if let Some(m) = self.metrics.as_mut() { @@ -754,7 +753,7 @@ where } } - debug!("Published message: {:?}", &msg_id); + tracing::debug!(message=%msg_id, "Published message"); if let Some(metrics) = self.metrics.as_mut() { metrics.register_published_message(&topic_hash); @@ -795,9 +794,9 @@ where (raw_message.clone(), originating_peers) } None => { - warn!( - "Message not in cache. Ignoring forwarding. Message Id: {}", - msg_id + tracing::warn!( + message=%msg_id, + "Message not in cache. Ignoring forwarding" ); if let Some(metrics) = self.metrics.as_mut() { metrics.memcache_miss(); @@ -842,14 +841,14 @@ where } Ok(true) } else { - warn!("Rejected message not in cache. Message Id: {}", msg_id); + tracing::warn!(message=%msg_id, "Rejected message not in cache"); Ok(false) } } /// Adds a new peer to the list of explicitly connected peers. pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Adding explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Adding explicit peer"); self.explicit_peers.insert(*peer_id); @@ -859,7 +858,7 @@ where /// This removes the peer from explicitly connected peers, note that this does not disconnect /// the peer. pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { - debug!("Removing explicit peer {}", peer_id); + tracing::debug!(peer=%peer_id, "Removing explicit peer"); self.explicit_peers.remove(peer_id); } @@ -867,14 +866,14 @@ where /// created by this peer will be rejected. pub fn blacklist_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.insert(*peer_id) { - debug!("Peer has been blacklisted: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); } } /// Removes a peer from the blacklist if it has previously been blacklisted. pub fn remove_blacklisted_peer(&mut self, peer_id: &PeerId) { if self.blacklisted_peers.remove(peer_id) { - debug!("Peer has been removed from the blacklist: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); } } @@ -943,11 +942,11 @@ where /// Gossipsub JOIN(topic) - adds topic peers to mesh and sends them GRAFT messages. fn join(&mut self, topic_hash: &TopicHash) { - debug!("Running JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); // if we are already in the mesh, return if self.mesh.contains_key(topic_hash) { - debug!("JOIN: The topic is already in the mesh, ignoring JOIN"); + tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); return; } @@ -960,9 +959,9 @@ where // check if we have mesh_n peers in fanout[topic] and add them to the mesh if we do, // removing the fanout entry. if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { - debug!( - "JOIN: Removing peers from the fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Removing peers from the fanout for topic" ); // remove explicit peers, peers with negative scores, and backoffed peers @@ -975,9 +974,10 @@ where // Add up to mesh_n of them them to the mesh // NOTE: These aren't randomly added, currently FIFO let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); - debug!( - "JOIN: Adding {:?} peers from the fanout for topic: {:?}", - add_peers, topic_hash + tracing::debug!( + topic=%topic_hash, + "JOIN: Adding {:?} peers from the fanout for topic", + add_peers ); added_peers.extend(peers.iter().cloned().take(add_peers)); @@ -1012,7 +1012,7 @@ where ); added_peers.extend(new_peers.clone()); // add them to the mesh - debug!( + tracing::debug!( "JOIN: Inserting {:?} random peers into the mesh", new_peers.len() ); @@ -1027,7 +1027,7 @@ where for peer_id in added_peers { // Send a GRAFT control message - debug!("JOIN: Sending Graft message to peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(&peer_id, topic_hash.clone()); } @@ -1055,7 +1055,7 @@ where m.set_mesh_peers(topic_hash, mesh_peers) } - debug!("Completed JOIN for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); } /// Creates a PRUNE gossipsub action. @@ -1072,7 +1072,7 @@ where match self.connected_peers.get(peer).map(|v| &v.kind) { Some(PeerKind::Floodsub) => { - error!("Attempted to prune a Floodsub peer"); + tracing::error!("Attempted to prune a Floodsub peer"); } Some(PeerKind::Gossipsub) => { // GossipSub v1.0 -- no peer exchange, the peer won't be able to parse it anyway @@ -1083,7 +1083,7 @@ where }; } None => { - error!("Attempted to Prune an unknown peer"); + tracing::error!("Attempted to Prune an unknown peer"); } _ => {} // Gossipsub 1.1 peer perform the `Prune` } @@ -1122,7 +1122,7 @@ where /// Gossipsub LEAVE(topic) - Notifies mesh\[topic\] peers with PRUNE messages. fn leave(&mut self, topic_hash: &TopicHash) { - debug!("Running LEAVE for topic {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); // If our mesh contains the topic, send prune to peers and delete it from the mesh if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { @@ -1131,7 +1131,7 @@ where } for peer in peers { // Send a PRUNE control message - debug!("LEAVE: Sending PRUNE to peer: {:?}", peer); + tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); let on_unsubscribe = true; let control = self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); @@ -1148,14 +1148,14 @@ where ); } } - debug!("Completed LEAVE for topic: {:?}", topic_hash); + tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); } /// Checks if the given peer is still connected and if not dials the peer again. fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { if !self.peer_topics.contains_key(peer_id) { // Connect to peer - debug!("Connecting to explicit peer {:?}", peer_id); + tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); self.events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer_id).build(), }); @@ -1193,9 +1193,10 @@ where fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec)>) { // We ignore IHAVE gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IHAVE: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + %score, + "IHAVE: ignoring peer with score below threshold" ); return; } @@ -1204,25 +1205,27 @@ where let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); *peer_have += 1; if *peer_have > self.config.max_ihave_messages() { - debug!( - "IHAVE: peer {} has advertised too many times ({}) within this heartbeat \ + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has advertised too many times ({}) within this heartbeat \ interval; ignoring", - peer_id, *peer_have + *peer_have ); return; } if let Some(iasked) = self.count_sent_iwant.get(peer_id) { if *iasked >= self.config.max_ihave_length() { - debug!( - "IHAVE: peer {} has already advertised too many messages ({}); ignoring", - peer_id, *iasked + tracing::debug!( + peer=%peer_id, + "IHAVE: peer has already advertised too many messages ({}); ignoring", + *iasked ); return; } } - trace!("Handling IHAVE for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); let mut iwant_ids = HashSet::new(); @@ -1244,9 +1247,9 @@ where for (topic, ids) in ihave_msgs { // only process the message if we are subscribed if !self.mesh.contains_key(&topic) { - debug!( - "IHAVE: Ignoring IHAVE - Not subscribed to topic: {:?}", - topic + tracing::debug!( + %topic, + "IHAVE: Ignoring IHAVE - Not subscribed to topic" ); continue; } @@ -1270,11 +1273,11 @@ where } // Send the list of IWANT control messages - debug!( - "IHAVE: Asking for {} out of {} messages from {}", + tracing::debug!( + peer=%peer_id, + "IHAVE: Asking for {} out of {} messages from peer", iask, - iwant_ids.len(), - peer_id + iwant_ids.len() ); // Ask in random order @@ -1297,9 +1300,9 @@ where Instant::now() + self.config.iwant_followup_time(), ); } - trace!( - "IHAVE: Asking for the following messages from {}: {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "IHAVE: Asking for the following messages from peer: {:?}", iwant_ids_vec ); @@ -1311,7 +1314,7 @@ where }, ); } - trace!("Completed IHAVE handling for peer: {:?}", peer_id); + tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); } /// Handles an IWANT control message. Checks our cache of messages. If the message exists it is @@ -1319,14 +1322,15 @@ where fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec) { // We ignore IWANT gossip from any peer whose score is below the gossip threshold if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { - debug!( - "IWANT: ignoring peer {:?} with score below threshold [score = {}]", - peer_id, score + tracing::debug!( + peer=%peer_id, + "IWANT: ignoring peer with score below threshold [score = {}]", + score ); return; } - debug!("Handling IWANT for peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); // build a hashmap of available messages let mut cached_messages = HashMap::new(); @@ -1335,10 +1339,10 @@ where // cached_messages mapping if let Some((msg, count)) = self.mcache.get_with_iwant_counts(&id, peer_id) { if count > self.config.gossip_retransimission() { - debug!( - "IWANT: Peer {} has asked for message {} too many times; ignoring \ - request", - peer_id, &id + tracing::debug!( + peer=%peer_id, + message=%id, + "IWANT: Peer has asked for message too many times; ignoring request" ); } else { cached_messages.insert(id.clone(), msg.clone()); @@ -1347,7 +1351,7 @@ where } if !cached_messages.is_empty() { - debug!("IWANT: Sending cached messages to peer: {:?}", peer_id); + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); // Send the messages to the peer let message_list: Vec<_> = cached_messages.into_iter().map(|entry| entry.1).collect(); @@ -1366,7 +1370,7 @@ where let msg_bytes = message.get_size(); if self.send_message(*peer_id, message).is_err() { - error!("Failed to send cached messages. Messages too large"); + tracing::error!("Failed to send cached messages. Messages too large"); } else if let Some(m) = self.metrics.as_mut() { // Sending of messages succeeded, register them on the internal metrics. for topic in topics.iter() { @@ -1374,13 +1378,13 @@ where } } } - debug!("Completed IWANT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); } /// Handles GRAFT control messages. If subscribed to the topic, adds the peer to mesh, if not, /// responds with PRUNE messages. fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec) { - debug!("Handling GRAFT message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); let mut to_prune_topics = HashSet::new(); @@ -1401,7 +1405,7 @@ where // we don't GRAFT to/from explicit peers; complain loudly if this happens if self.explicit_peers.contains(peer_id) { - warn!("GRAFT: ignoring request from direct peer {}", peer_id); + tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); // this is possibly a bug from non-reciprocal configuration; send a PRUNE for all topics to_prune_topics = topics.into_iter().collect(); // but don't PX @@ -1413,9 +1417,10 @@ where if let Some(peers) = self.mesh.get_mut(&topic_hash) { // if the peer is already in the mesh ignore the graft if peers.contains(peer_id) { - debug!( - "GRAFT: Received graft for peer {:?} that is already in topic {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%&topic_hash, + "GRAFT: Received graft for peer that is already in topic" ); continue; } @@ -1424,9 +1429,9 @@ where if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) { if backoff_time > now { - warn!( - "[Penalty] Peer attempted graft within backoff time, penalizing {}", - peer_id + tracing::warn!( + peer=%peer_id, + "[Penalty] Peer attempted graft within backoff time, penalizing" ); // add behavioural penalty if let Some((peer_score, ..)) = &mut self.peer_score { @@ -1457,10 +1462,11 @@ where // check the score if below_zero { // we don't GRAFT peers with negative score - debug!( - "GRAFT: ignoring peer {:?} with negative score [score = {}, \ - topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "GRAFT: ignoring peer with negative score" ); // we do send them PRUNE however, because it's a matter of protocol correctness to_prune_topics.insert(topic_hash.clone()); @@ -1479,9 +1485,10 @@ where } // add peer to the mesh - debug!( - "GRAFT: Mesh link added for peer: {:?} in topic: {:?}", - peer_id, &topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Mesh link added for peer in topic" ); if peers.insert(*peer_id) { @@ -1506,9 +1513,10 @@ where } else { // don't do PX when there is an unknown topic to avoid leaking our peers do_px = false; - debug!( - "GRAFT: Received graft for unknown topic {:?} from peer {:?}", - &topic_hash, peer_id + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "GRAFT: Received graft for unknown topic from peer" ); // spam hardening: ignore GRAFTs for unknown topics continue; @@ -1524,9 +1532,9 @@ where .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) .collect(); // Send the prune messages to the peer - debug!( - "GRAFT: Not subscribed to topics - Sending PRUNE to peer: {}", - peer_id + tracing::debug!( + peer=%peer_id, + "GRAFT: Not subscribed to topics - Sending PRUNE to peer" ); if let Err(e) = self.send_message( @@ -1538,10 +1546,10 @@ where } .into_protobuf(), ) { - error!("Failed to send PRUNE: {:?}", e); + tracing::error!("Failed to send PRUNE: {:?}", e); } } - debug!("Completed GRAFT handling for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } fn remove_peer_from_mesh( @@ -1556,10 +1564,10 @@ where if let Some(peers) = self.mesh.get_mut(topic_hash) { // remove the peer if it exists in the mesh if peers.remove(peer_id) { - debug!( - "PRUNE: Removing peer: {} from the mesh for topic: {}", - peer_id.to_string(), - topic_hash + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "PRUNE: Removing peer from the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_removed(topic_hash, reason, 1) @@ -1599,7 +1607,7 @@ where peer_id: &PeerId, prune_data: Vec<(TopicHash, Vec, Option)>, ) { - debug!("Handling PRUNE message for peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); let (below_threshold, score) = self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); for (topic_hash, px, backoff) in prune_data { @@ -1610,10 +1618,11 @@ where if !px.is_empty() { // we ignore PX from peers with insufficient score if below_threshold { - debug!( - "PRUNE: ignoring PX from peer {:?} with insufficient score \ - [score ={} topic = {}]", - peer_id, score, topic_hash + tracing::debug!( + peer=%peer_id, + %score, + topic=%topic_hash, + "PRUNE: ignoring PX from peer with insufficient score" ); continue; } @@ -1630,7 +1639,7 @@ where } } } - debug!("Completed PRUNE handling for peer: {}", peer_id.to_string()); + tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); } fn px_connect(&mut self, mut px: Vec) { @@ -1670,17 +1679,17 @@ where raw_message: &mut RawMessage, propagation_source: &PeerId, ) -> bool { - debug!( - "Handling message: {:?} from peer: {}", - msg_id, - propagation_source.to_string() + tracing::debug!( + peer=%propagation_source, + message=%msg_id, + "Handling message from peer" ); // Reject any message from a blacklisted peer if self.blacklisted_peers.contains(propagation_source) { - debug!( - "Rejecting message from blacklisted peer: {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Rejecting message from blacklisted peer" ); if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { peer_score.reject_message( @@ -1697,9 +1706,10 @@ where // Also reject any message that originated from a blacklisted peer if let Some(source) = raw_message.source.as_ref() { if self.blacklisted_peers.contains(source) { - debug!( - "Rejecting message from peer {} because of blacklisted source: {}", - propagation_source, source + tracing::debug!( + peer=%propagation_source, + %source, + "Rejecting message from peer because of blacklisted source" ); self.handle_invalid_message( propagation_source, @@ -1727,9 +1737,10 @@ where }; if self_published { - debug!( - "Dropping message {} claiming to be from self but forwarded from {}", - msg_id, propagation_source + tracing::debug!( + message=%msg_id, + source=%propagation_source, + "Dropping message claiming to be from self but forwarded from source" ); self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); return false; @@ -1755,7 +1766,7 @@ where let message = match self.data_transform.inbound_transform(raw_message.clone()) { Ok(message) => message, Err(e) => { - debug!("Invalid message. Transform error: {:?}", e); + tracing::debug!("Invalid message. Transform error: {:?}", e); // Reject the message and return self.handle_invalid_message( propagation_source, @@ -1777,16 +1788,16 @@ where } if !self.duplicate_cache.insert(msg_id.clone()) { - debug!("Message already received, ignoring. Message: {}", msg_id); + tracing::debug!(message=%msg_id, "Message already received, ignoring"); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); } self.mcache.observe_duplicate(&msg_id, propagation_source); return; } - debug!( - "Put message {:?} in duplicate_cache and resolve promises", - msg_id + tracing::debug!( + message=%msg_id, + "Put message in duplicate_cache and resolve promises" ); // Record the received message with the metrics @@ -1806,7 +1817,7 @@ where // Dispatch the message to the user if we are subscribed to any of the topics if self.mesh.contains_key(&message.topic) { - debug!("Sending received message to user"); + tracing::debug!("Sending received message to user"); self.events .push_back(ToSwarm::GenerateEvent(Event::Message { propagation_source: *propagation_source, @@ -1814,9 +1825,9 @@ where message, })); } else { - debug!( - "Received message on a topic we are not subscribed to: {:?}", - message.topic + tracing::debug!( + topic=%message.topic, + "Received message on a topic we are not subscribed to" ); return; } @@ -1832,9 +1843,9 @@ where ) .is_err() { - error!("Failed to forward message. Too large"); + tracing::error!("Failed to forward message. Too large"); } - debug!("Completed message handling for message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Completed message handling for message"); } } @@ -1876,10 +1887,10 @@ where subscriptions: &[Subscription], propagation_source: &PeerId, ) { - debug!( - "Handling subscriptions: {:?}, from source: {}", + tracing::debug!( + source=%propagation_source, + "Handling subscriptions: {:?}", subscriptions, - propagation_source.to_string() ); let mut unsubscribed_peers = Vec::new(); @@ -1887,9 +1898,9 @@ where let subscribed_topics = match self.peer_topics.get_mut(propagation_source) { Some(topics) => topics, None => { - error!( - "Subscription by unknown peer: {}", - propagation_source.to_string() + tracing::error!( + peer=%propagation_source, + "Subscription by unknown peer" ); return; } @@ -1907,10 +1918,10 @@ where { Ok(topics) => topics, Err(s) => { - error!( - "Subscription filter error: {}; ignoring RPC from peer {}", - s, - propagation_source.to_string() + tracing::error!( + peer=%propagation_source, + "Subscription filter error: {}; ignoring RPC from peer", + s ); return; } @@ -1924,10 +1935,10 @@ where match subscription.action { SubscriptionAction::Subscribe => { if peer_list.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding gossip peer: {} to topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding gossip peer to topic" ); } @@ -1956,19 +1967,19 @@ where if peers.len() < self.config.mesh_n_low() && peers.insert(*propagation_source) { - debug!( - "SUBSCRIPTION: Adding peer {} to the mesh for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Adding peer to the mesh for topic" ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Subscribed, 1) } // send graft to the peer - debug!( - "Sending GRAFT to peer {} for topic {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "Sending GRAFT to peer for topic" ); if let Some((peer_score, ..)) = &mut self.peer_score { peer_score.graft(propagation_source, topic_hash.clone()); @@ -1985,10 +1996,10 @@ where } SubscriptionAction::Unsubscribe => { if peer_list.remove(propagation_source) { - debug!( - "SUBSCRIPTION: Removing gossip peer: {} from topic: {:?}", - propagation_source.to_string(), - topic_hash + tracing::debug!( + peer=%propagation_source, + topic=%topic_hash, + "SUBSCRIPTION: Removing gossip peer from topic" ); } @@ -2044,7 +2055,7 @@ where ) .is_err() { - error!("Failed sending grafts. Message too large"); + tracing::error!("Failed sending grafts. Message too large"); } // Notify the application of the subscriptions @@ -2052,9 +2063,9 @@ where self.events.push_back(event); } - trace!( - "Completed handling subscriptions from source: {:?}", - propagation_source + tracing::trace!( + source=%propagation_source, + "Completed handling subscriptions from source" ); } @@ -2072,7 +2083,7 @@ where /// Heartbeat function which shifts the memcache and updates the mesh. fn heartbeat(&mut self) { - debug!("Starting heartbeat"); + tracing::debug!("Starting heartbeat"); let start = Instant::now(); self.heartbeat_ticks += 1; @@ -2128,10 +2139,11 @@ where } if peer_score < 0.0 { - debug!( - "HEARTBEAT: Prune peer {:?} with negative score [score = {}, topic = \ - {}]", - peer_id, peer_score, topic_hash + tracing::debug!( + peer=%peer_id, + score=%peer_score, + topic=%topic_hash, + "HEARTBEAT: Prune peer with negative score" ); let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); @@ -2151,9 +2163,9 @@ where // too little peers - add some if peers.len() < self.config.mesh_n_low() { - debug!( - "HEARTBEAT: Mesh low. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh low. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_low() ); @@ -2176,7 +2188,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) } @@ -2185,9 +2197,9 @@ where // too many peers - remove some if peers.len() > self.config.mesh_n_high() { - debug!( - "HEARTBEAT: Mesh high. Topic: {} Contains: {} needs: {}", - topic_hash, + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Mesh high. Topic contains: {} needs: {}", peers.len(), self.config.mesh_n_high() ); @@ -2270,7 +2282,7 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!("Updating mesh, new mesh: {:?}", peer_list); + tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) } @@ -2337,9 +2349,10 @@ where current_topic.push(topic_hash.clone()); } // update the mesh - debug!( - "Opportunistically graft in topic {} with peers {:?}", - topic_hash, peer_list + tracing::debug!( + topic=%topic_hash, + "Opportunistically graft in topic with peers {:?}", + peer_list ); if let Some(m) = self.metrics.as_mut() { m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) @@ -2360,9 +2373,9 @@ where let fanout_ttl = self.config.fanout_ttl(); self.fanout_last_pub.retain(|topic_hash, last_pub_time| { if *last_pub_time + fanout_ttl < Instant::now() { - debug!( - "HEARTBEAT: Fanout topic removed due to timeout. Topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Fanout topic removed due to timeout" ); fanout.remove(topic_hash); return false; @@ -2385,9 +2398,9 @@ where match self.peer_topics.get(peer) { Some(topics) => { if !topics.contains(topic_hash) || peer_score < publish_threshold { - debug!( - "HEARTBEAT: Peer removed from fanout for topic: {:?}", - topic_hash + tracing::debug!( + topic=%topic_hash, + "HEARTBEAT: Peer removed from fanout for topic" ); to_remove_peers.push(*peer); } @@ -2404,7 +2417,7 @@ where // not enough peers if peers.len() < self.config.mesh_n() { - debug!( + tracing::debug!( "HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", peers.len(), self.config.mesh_n() @@ -2427,7 +2440,7 @@ where } if self.peer_score.is_some() { - trace!("Mesh message deliveries: {:?}", { + tracing::trace!("Mesh message deliveries: {:?}", { self.mesh .iter() .map(|(t, peers)| { @@ -2466,7 +2479,7 @@ where // shift the memcache self.mcache.shift(); - debug!("Completed Heartbeat"); + tracing::debug!("Completed Heartbeat"); if let Some(metrics) = self.metrics.as_mut() { let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); metrics.observe_heartbeat_duration(duration); @@ -2486,7 +2499,7 @@ where // if we are emitting more than GossipSubMaxIHaveLength message_ids, truncate the list if message_ids.len() > self.config.max_ihave_length() { // we do the truncation (with shuffling) per peer below - debug!( + tracing::debug!( "too many messages for gossip; will truncate IHAVE list ({} messages)", message_ids.len() ); @@ -2515,7 +2528,7 @@ where }, ); - debug!("Gossiping IHAVE to {} peers.", to_msg_peers.len()); + tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); for peer in to_msg_peers { let mut peer_message_ids = message_ids.clone(); @@ -2610,7 +2623,7 @@ where ) .is_err() { - error!("Failed to send control messages. Message too large"); + tracing::error!("Failed to send control messages. Message too large"); } } @@ -2650,7 +2663,7 @@ where ) .is_err() { - error!("Failed to send prune messages. Message too large"); + tracing::error!("Failed to send prune messages. Message too large"); } } } @@ -2672,7 +2685,7 @@ where } } - debug!("Forwarding message: {:?}", msg_id); + tracing::debug!(message=%msg_id, "Forwarding message"); let mut recipient_peers = HashSet::new(); { @@ -2717,13 +2730,13 @@ where let msg_bytes = event.get_size(); for peer in recipient_peers.iter() { - debug!("Sending message: {:?} to peer {:?}", msg_id, peer); + tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); self.send_message(*peer, event.clone())?; if let Some(m) = self.metrics.as_mut() { m.msg_sent(&message.topic, msg_bytes); } } - debug!("Completed forwarding message"); + tracing::debug!("Completed forwarding message"); Ok(true) } else { Ok(false) @@ -2846,7 +2859,7 @@ where ) .is_err() { - error!("Failed to flush control pool. Message too large"); + tracing::error!("Failed to flush control pool. Message too large"); } } @@ -2913,7 +2926,7 @@ where if object_size + 2 > self.config.max_transmit_size() { // This should not be possible. All received and published messages have already // been vetted to fit within the size. - error!("Individual message too large to fragment"); + tracing::error!("Individual message too large to fragment"); return Err(PublishError::MessageTooLarge); } @@ -3018,9 +3031,9 @@ where if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3043,9 +3056,9 @@ where if other_established == 0 { // Ignore connections from blacklisted peers. if self.blacklisted_peers.contains(&peer_id) { - debug!("Ignoring connection from blacklisted peer: {}", peer_id); + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); } else { - debug!("New peer connected: {}", peer_id); + tracing::debug!(peer=%peer_id, "New peer connected"); // We need to send our subscriptions to the newly-connected node. let mut subscriptions = vec![]; for topic_hash in self.mesh.keys() { @@ -3069,7 +3082,7 @@ where ) .is_err() { - error!("Failed to send subscriptions, message too large"); + tracing::error!("Failed to send subscriptions, message too large"); } } } @@ -3098,9 +3111,9 @@ where if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint ) } @@ -3137,7 +3150,7 @@ where } } else { // remove from mesh, topic_peers, peer_topic and the fanout - debug!("Peer disconnected: {}", peer_id); + tracing::debug!(peer=%peer_id, "Peer disconnected"); { let topics = match self.peer_topics.get(&peer_id) { Some(topics) => topics, @@ -3167,18 +3180,19 @@ where if let Some(peer_list) = self.topic_peers.get_mut(topic) { if !peer_list.remove(&peer_id) { // debugging purposes - warn!( - "Disconnected node: {} not in topic_peers peer list", - peer_id + tracing::warn!( + peer=%peer_id, + "Disconnected node: peer not in topic_peers" ); } if let Some(m) = self.metrics.as_mut() { m.set_topic_peers(topic, peer_list.len()) } } else { - warn!( - "Disconnected node: {} with topic: {:?} not in topic_peers", - &peer_id, &topic + tracing::warn!( + peer=%peer_id, + topic=%topic, + "Disconnected node: peer with topic not in topic_peers" ); } @@ -3230,18 +3244,18 @@ where if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { peer_score.remove_ip(&peer_id, &ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - &peer_id, + tracing::trace!( + peer=%&peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_old ) } if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { peer_score.add_ip(&peer_id, ip); } else { - trace!( - "Couldn't extract ip from endpoint of peer {} with endpoint {:?}", - peer_id, + tracing::trace!( + peer=%peer_id, + "Couldn't extract ip from endpoint of peer with endpoint {:?}", endpoint_new ) } @@ -3300,9 +3314,9 @@ where } if let PeerKind::NotSupported = kind { - debug!( - "Peer does not support gossipsub protocols. {}", - propagation_source + tracing::debug!( + peer=%propagation_source, + "Peer does not support gossipsub protocols" ); self.events .push_back(ToSwarm::GenerateEvent(Event::GossipsubNotSupported { @@ -3312,9 +3326,10 @@ where // Only change the value if the old value is Floodsub (the default set in // `NetworkBehaviour::on_event` with FromSwarm::ConnectionEstablished). // All other PeerKind changes are ignored. - debug!( - "New peer type found: {} for peer: {}", - kind, propagation_source + tracing::debug!( + peer=%propagation_source, + peer_type=%kind, + "New peer type found for peer" ); if let PeerKind::Floodsub = conn.kind { conn.kind = kind; @@ -3337,7 +3352,7 @@ where if let (true, _) = self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) { - debug!("RPC Dropped from greylisted peer {}", propagation_source); + tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); return; } @@ -3353,11 +3368,11 @@ where } else { // log the invalid messages for (message, validation_error) in invalid_messages { - warn!( - "Invalid message. Reason: {:?} propagation_peer {} source {:?}", + tracing::warn!( + peer=%propagation_source, + source=?message.source, + "Invalid message from peer. Reason: {:?}", validation_error, - propagation_source.to_string(), - message.source ); } } @@ -3368,7 +3383,7 @@ where if self.config.max_messages_per_rpc().is_some() && Some(count) >= self.config.max_messages_per_rpc() { - warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); + tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); break; } self.handle_received_message(raw_message, &propagation_source); @@ -3411,6 +3426,7 @@ where } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -3566,7 +3582,7 @@ fn get_random_peers_dynamic( // if we have less than needed, return them let n = n_map(gossip_peers.len()); if gossip_peers.len() <= n { - debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); + tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); return gossip_peers.into_iter().collect(); } @@ -3574,7 +3590,7 @@ fn get_random_peers_dynamic( let mut rng = thread_rng(); gossip_peers.partial_shuffle(&mut rng, n); - debug!("RANDOM PEERS: Got {:?} peers", n); + tracing::debug!("RANDOM PEERS: Got {:?} peers", n); gossip_peers.into_iter().take(n).collect() } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index dba5db4c01d..cf24ed8d8dc 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -4656,7 +4656,10 @@ fn test_limit_number_of_message_ids_inside_ihave() { #[test] fn test_iwant_penalties() { - let _ = env_logger::try_init(); + use tracing_subscriber::EnvFilter; + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = ConfigBuilder::default() .iwant_followup_time(Duration::from_secs(4)) diff --git a/protocols/gossipsub/src/gossip_promises.rs b/protocols/gossipsub/src/gossip_promises.rs index 827206afe8d..9538622c0dc 100644 --- a/protocols/gossipsub/src/gossip_promises.rs +++ b/protocols/gossipsub/src/gossip_promises.rs @@ -23,7 +23,6 @@ use crate::MessageId; use crate::ValidationError; use instant::Instant; use libp2p_identity::PeerId; -use log::debug; use std::collections::HashMap; /// Tracks recently sent `IWANT` messages and checks if peers respond to them. @@ -85,9 +84,10 @@ impl GossipPromises { if *expires < now { let count = result.entry(*peer_id).or_insert(0); *count += 1; - debug!( - "[Penalty] The peer {} broke the promise to deliver message {} in time!", - peer_id, msg + tracing::debug!( + peer=%peer_id, + message=%msg, + "[Penalty] The peer broke the promise to deliver message in time!" ); false } else { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 44258bb5394..4f3dd5c9f63 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -187,7 +187,7 @@ impl EnabledHandler { } // new inbound substream. Replace the current one, if it exists. - log::trace!("New inbound substream request"); + tracing::trace!("New inbound substream request"); self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream)); } @@ -258,7 +258,7 @@ impl EnabledHandler { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message)); } Poll::Ready(Some(Err(error))) => { - log::debug!("Failed to read from inbound stream: {error}"); + tracing::debug!("Failed to read from inbound stream: {error}"); // Close this side of the stream. If the // peer is still around, they will re-establish their // outbound stream i.e. our inbound stream. @@ -267,7 +267,7 @@ impl EnabledHandler { } // peer closed the stream Poll::Ready(None) => { - log::debug!("Inbound stream closed by remote"); + tracing::debug!("Inbound stream closed by remote"); self.inbound_substream = Some(InboundSubstreamState::Closing(substream)); } @@ -285,7 +285,7 @@ impl EnabledHandler { // Don't close the connection but just drop the inbound substream. // In case the remote has more to send, they will open up a new // substream. - log::debug!("Inbound substream error while closing: {e}"); + tracing::debug!("Inbound substream error while closing: {e}"); } self.inbound_substream = None; break; @@ -335,14 +335,16 @@ impl EnabledHandler { Some(OutboundSubstreamState::PendingFlush(substream)) } Err(e) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!( + "Failed to send message on outbound stream: {e}" + ); self.outbound_substream = None; break; } } } Poll::Ready(Err(e)) => { - log::debug!("Failed to send message on outbound stream: {e}"); + tracing::debug!("Failed to send message on outbound stream: {e}"); self.outbound_substream = None; break; } @@ -361,7 +363,7 @@ impl EnabledHandler { Some(OutboundSubstreamState::WaitingOutput(substream)) } Poll::Ready(Err(e)) => { - log::debug!("Failed to flush outbound stream: {e}"); + tracing::debug!("Failed to flush outbound stream: {e}"); self.outbound_substream = None; break; } @@ -418,7 +420,7 @@ impl ConnectionHandler for Handler { } }, Handler::Disabled(_) => { - log::debug!("Handler is disabled. Dropping message {:?}", message); + tracing::debug!(?message, "Handler is disabled. Dropping message"); } } } @@ -427,6 +429,7 @@ impl ConnectionHandler for Handler { matches!(self, Handler::Enabled(h) if h.in_mesh) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -469,7 +472,7 @@ impl ConnectionHandler for Handler { handler.inbound_substream_attempts += 1; if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of inbound substreams attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -483,7 +486,7 @@ impl ConnectionHandler for Handler { handler.outbound_substream_attempts += 1; if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS { - log::warn!( + tracing::warn!( "The maximum number of outbound substream attempts has been exceeded" ); *self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts); @@ -506,7 +509,7 @@ impl ConnectionHandler for Handler { error: StreamUpgradeError::Timeout, .. }) => { - log::debug!("Dial upgrade error: Protocol negotiation timeout"); + tracing::debug!("Dial upgrade error: Protocol negotiation timeout"); } ConnectionEvent::DialUpgradeError(DialUpgradeError { error: StreamUpgradeError::Apply(e), @@ -517,7 +520,7 @@ impl ConnectionHandler for Handler { .. }) => { // The protocol is not supported - log::debug!( + tracing::debug!( "The remote peer does not support gossipsub on this connection" ); *self = Handler::Disabled(DisabledHandler::ProtocolUnsupported { @@ -528,7 +531,7 @@ impl ConnectionHandler for Handler { error: StreamUpgradeError::Io(e), .. }) => { - log::debug!("Protocol negotiation failed: {e}") + tracing::debug!("Protocol negotiation failed: {e}") } ConnectionEvent::AddressChange(_) | ConnectionEvent::ListenUpgradeError(_) diff --git a/protocols/gossipsub/src/mcache.rs b/protocols/gossipsub/src/mcache.rs index e85a5bf9c6a..ef4a93bc936 100644 --- a/protocols/gossipsub/src/mcache.rs +++ b/protocols/gossipsub/src/mcache.rs @@ -21,7 +21,6 @@ use crate::topic::TopicHash; use crate::types::{MessageId, RawMessage}; use libp2p_identity::PeerId; -use log::{debug, trace}; use std::collections::hash_map::Entry; use std::fmt::Debug; use std::{ @@ -87,7 +86,7 @@ impl MessageCache { entry.insert((msg, HashSet::default())); self.history[0].push(cache_entry); - trace!("Put message {:?} in mcache", message_id); + tracing::trace!(message=?message_id, "Put message in mcache"); true } } @@ -191,13 +190,13 @@ impl MessageCache { // If GossipsubConfig::validate_messages is true, the implementing // application has to ensure that Gossipsub::validate_message gets called for // each received message within the cache timeout time." - debug!( - "The message with id {} got removed from the cache without being validated.", - &entry.mid + tracing::debug!( + message=%&entry.mid, + "The message got removed from the cache without being validated." ); } } - trace!("Remove message from the cache: {}", &entry.mid); + tracing::trace!(message=%&entry.mid, "Remove message from the cache"); self.iwant_counts.remove(&entry.mid); } diff --git a/protocols/gossipsub/src/peer_score.rs b/protocols/gossipsub/src/peer_score.rs index c6c918d6b2a..b370d2dfe06 100644 --- a/protocols/gossipsub/src/peer_score.rs +++ b/protocols/gossipsub/src/peer_score.rs @@ -26,7 +26,6 @@ use crate::time_cache::TimeCache; use crate::{MessageId, TopicHash}; use instant::Instant; use libp2p_identity::PeerId; -use log::{debug, trace, warn}; use std::collections::{hash_map, HashMap, HashSet}; use std::net::IpAddr; use std::time::Duration; @@ -274,13 +273,12 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::MessageDeficit); } - debug!( - "[Penalty] The peer {} has a mesh message deliveries deficit of {} in topic\ - {} and will get penalized by {}", - peer_id, - deficit, - topic, - p3 * topic_params.mesh_message_deliveries_weight + tracing::debug!( + peer=%peer_id, + %topic, + %deficit, + penalty=%topic_score, + "[Penalty] The peer has a mesh deliveries deficit and will be penalized" ); } @@ -326,10 +324,11 @@ impl PeerScore { if let Some(metrics) = metrics.as_mut() { metrics.register_score_penalty(Penalty::IPColocation); } - debug!( - "[Penalty] The peer {} gets penalized because of too many peers with the ip {}. \ - The surplus is {}. ", - peer_id, ip, surplus + tracing::debug!( + peer=%peer_id, + surplus_ip=%ip, + surplus=%surplus, + "[Penalty] The peer gets penalized because of too many peers with the same ip" ); score += p6 * self.params.ip_colocation_factor_weight; } @@ -347,9 +346,10 @@ impl PeerScore { pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { - debug!( - "[Penalty] Behavioral penalty for peer {}, count = {}.", - peer_id, count + tracing::debug!( + peer=%peer_id, + %count, + "[Penalty] Behavioral penalty for peer" ); peer_stats.behaviour_penalty += count as f64; } @@ -445,7 +445,7 @@ impl PeerScore { /// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) { - trace!("Add ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Add ip for peer"); let peer_stats = self.peer_stats.entry(*peer_id).or_default(); // Mark the peer as connected (currently the default is connected, but we don't want to @@ -462,20 +462,20 @@ impl PeerScore { if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) { peer_stats.known_ips.remove(ip); if let Some(peer_ids) = self.peer_ips.get_mut(ip) { - trace!("Remove ip for peer {}, ip: {}", peer_id, ip); + tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer"); peer_ids.remove(peer_id); } else { - trace!( - "No entry in peer_ips for ip {} which should get removed for peer {}", - ip, - peer_id + tracing::trace!( + peer=%peer_id, + %ip, + "No entry in peer_ips for ip which should get removed for peer" ); } } else { - trace!( - "No peer_stats for peer {} which should remove the ip {}", - peer_id, - ip + tracing::trace!( + peer=%peer_id, + %ip, + "No peer_stats for peer which should remove the ip" ); } } @@ -594,7 +594,12 @@ impl PeerScore { // this should be the first delivery trace if record.status != DeliveryStatus::Unknown { - warn!("Unexpected delivery trace: Message from {} was first seen {}s ago and has a delivery status {:?}", from, record.first_seen.elapsed().as_secs(), record.status); + tracing::warn!( + peer=%from, + status=?record.status, + first_seen=?record.first_seen.elapsed().as_secs(), + "Unexpected delivery trace" + ); return; } @@ -611,9 +616,9 @@ impl PeerScore { /// Similar to `reject_message` except does not require the message id or reason for an invalid message. pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) { - debug!( - "[Penalty] Message from {} rejected because of ValidationError or SelfOrigin", - from + tracing::debug!( + peer=%from, + "[Penalty] Message from peer rejected because of ValidationError or SelfOrigin" ); self.mark_invalid_message_delivery(from, topic_hash); @@ -778,10 +783,11 @@ impl PeerScore { if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params) { - debug!( - "[Penalty] Peer {} delivered an invalid message in topic {} and gets penalized \ + tracing::debug!( + peer=%peer_id, + topic=%topic_hash, + "[Penalty] Peer delivered an invalid message in topic and gets penalized \ for it", - peer_id, topic_hash ); topic_stats.invalid_message_deliveries += 1f64; } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 15d2f59755a..dcd509f6aa9 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -34,7 +34,6 @@ use futures::prelude::*; use libp2p_core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use libp2p_identity::{PeerId, PublicKey}; use libp2p_swarm::StreamProtocol; -use log::{debug, warn}; use quick_protobuf::Writer; use std::pin::Pin; use unsigned_varint::codec; @@ -169,7 +168,7 @@ impl GossipsubCodec { let from = match message.from.as_ref() { Some(v) => v, None => { - debug!("Signature verification failed: No source id given"); + tracing::debug!("Signature verification failed: No source id given"); return false; } }; @@ -177,7 +176,7 @@ impl GossipsubCodec { let source = match PeerId::from_bytes(from) { Ok(v) => v, Err(_) => { - debug!("Signature verification failed: Invalid Peer Id"); + tracing::debug!("Signature verification failed: Invalid Peer Id"); return false; } }; @@ -185,7 +184,7 @@ impl GossipsubCodec { let signature = match message.signature.as_ref() { Some(v) => v, None => { - debug!("Signature verification failed: No signature provided"); + tracing::debug!("Signature verification failed: No signature provided"); return false; } }; @@ -197,7 +196,7 @@ impl GossipsubCodec { _ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) { Ok(v) => v, Err(_) => { - warn!("Signature verification failed: No valid public key supplied"); + tracing::warn!("Signature verification failed: No valid public key supplied"); return false; } }, @@ -205,7 +204,9 @@ impl GossipsubCodec { // The key must match the peer_id if source != public_key.to_peer_id() { - warn!("Signature verification failed: Public key doesn't match source peer id"); + tracing::warn!( + "Signature verification failed: Public key doesn't match source peer id" + ); return false; } @@ -276,13 +277,17 @@ impl Decoder for GossipsubCodec { } ValidationMode::Anonymous => { if message.signature.is_some() { - warn!("Signature field was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Signature field was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SignaturePresent); } else if message.seqno.is_some() { - warn!("Sequence number was non-empty and anonymous validation mode is set"); + tracing::warn!( + "Sequence number was non-empty and anonymous validation mode is set" + ); invalid_kind = Some(ValidationError::SequenceNumberPresent); } else if message.from.is_some() { - warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); + tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set"); invalid_kind = Some(ValidationError::MessageSourcePresent); } } @@ -308,7 +313,7 @@ impl Decoder for GossipsubCodec { // verify message signatures if required if verify_signature && !GossipsubCodec::verify_signature(&message) { - warn!("Invalid signature for received message"); + tracing::warn!("Invalid signature for received message"); // Build the invalid message (ignoring further validation of sequence number // and source) @@ -332,10 +337,10 @@ impl Decoder for GossipsubCodec { if seq_no.is_empty() { None } else if seq_no.len() != 8 { - debug!( - "Invalid sequence number length for received message. SeqNo: {:?} Size: {}", - seq_no, - seq_no.len() + tracing::debug!( + sequence_number=?seq_no, + sequence_length=%seq_no.len(), + "Invalid sequence number length for received message" ); let message = RawMessage { source: None, // don't bother inform the application @@ -355,7 +360,7 @@ impl Decoder for GossipsubCodec { } } else { // sequence number was not present - debug!("Sequence number not present but expected"); + tracing::debug!("Sequence number not present but expected"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), @@ -381,7 +386,7 @@ impl Decoder for GossipsubCodec { Ok(peer_id) => Some(peer_id), // valid peer id Err(_) => { // invalid peer id, add to invalid messages - debug!("Message source has an invalid PeerId"); + tracing::debug!("Message source has an invalid PeerId"); let message = RawMessage { source: None, // don't bother inform the application data: message.data.unwrap_or_default(), diff --git a/protocols/gossipsub/src/subscription_filter.rs b/protocols/gossipsub/src/subscription_filter.rs index 9f883f12a1b..09c323d7904 100644 --- a/protocols/gossipsub/src/subscription_filter.rs +++ b/protocols/gossipsub/src/subscription_filter.rs @@ -20,7 +20,6 @@ use crate::types::Subscription; use crate::TopicHash; -use log::debug; use std::collections::{BTreeSet, HashMap, HashSet}; pub trait TopicSubscriptionFilter { @@ -66,7 +65,7 @@ pub trait TopicSubscriptionFilter { if self.allow_incoming_subscription(s) { true } else { - debug!("Filtered incoming subscription {:?}", s); + tracing::debug!(subscription=?s, "Filtered incoming subscription"); false } }); diff --git a/protocols/gossipsub/tests/smoke.rs b/protocols/gossipsub/tests/smoke.rs index e8577bc78cf..c8876428b4e 100644 --- a/protocols/gossipsub/tests/smoke.rs +++ b/protocols/gossipsub/tests/smoke.rs @@ -25,11 +25,10 @@ use libp2p_gossipsub as gossipsub; use libp2p_gossipsub::{MessageAuthenticity, ValidationMode}; use libp2p_swarm::Swarm; use libp2p_swarm_test::SwarmExt as _; -use log::debug; use quickcheck::{QuickCheck, TestResult}; use rand::{seq::SliceRandom, SeedableRng}; use std::{task::Poll, time::Duration}; - +use tracing_subscriber::EnvFilter; struct Graph { nodes: SelectAll>, } @@ -129,14 +128,16 @@ async fn build_node() -> Swarm { #[test] fn multi_hop_propagation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(num_nodes: u8, seed: u64) -> TestResult { if !(2..=50).contains(&num_nodes) { return TestResult::discard(); } - debug!("number nodes: {:?}, seed: {:?}", num_nodes, seed); + tracing::debug!(number_of_nodes=%num_nodes, seed=%seed); async_std::task::block_on(async move { let mut graph = Graph::new_connected(num_nodes as usize, seed).await; diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index 6db132b0189..9b0ef0eb139 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -18,20 +18,20 @@ futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" lru = "0.12.0" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" smallvec = "1.11.1" thiserror = "1.0" +tracing = "0.1.37" void = "1.0" either = "1.9.0" [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10" libp2p-swarm-test = { path = "../../swarm-test" } libp2p-swarm = { workspace = true, features = ["macros"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index 4f017dd1a9e..75ddfc812bf 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -168,7 +168,7 @@ impl Behaviour { { for p in peers { if !self.connected.contains_key(&p) { - log::debug!("Not pushing to {p} because we are not connected"); + tracing::debug!(peer=%p, "Not pushing to peer because we are not connected"); continue; } @@ -286,9 +286,10 @@ impl NetworkBehaviour for Behaviour { // No-op, we already observed this address. } Entry::Occupied(mut already_observed) => { - log::info!( - "Our observed address on connection {id} changed from {} to {observed}", - already_observed.get() + tracing::info!( + old_address=%already_observed.get(), + new_address=%observed, + "Our observed address on connection {id} changed", ); *already_observed.get_mut() = observed.clone(); @@ -312,6 +313,7 @@ impl NetworkBehaviour for Behaviour { } } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(event) = self.events.pop_front() { return Poll::Ready(event); diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 966c7b378e0..963397e2274 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -36,10 +36,10 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; -use log::{warn, Level}; use smallvec::SmallVec; use std::collections::HashSet; use std::{io, task::Context, task::Poll, time::Duration}; +use tracing::Level; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); const MAX_CONCURRENT_STREAMS_PER_CONNECTION: usize = 10; @@ -167,7 +167,7 @@ impl Handler { ) .is_err() { - warn!("Dropping inbound stream because we are at capacity"); + tracing::warn!("Dropping inbound stream because we are at capacity"); } else { self.exchanged_one_periodic_identify = true; } @@ -178,7 +178,9 @@ impl Handler { .try_push(protocol::recv_push(stream).map_ok(Success::ReceivedIdentifyPush)) .is_err() { - warn!("Dropping inbound identify push stream because we are at capacity"); + tracing::warn!( + "Dropping inbound identify push stream because we are at capacity" + ); } } } @@ -200,7 +202,7 @@ impl Handler { .try_push(protocol::recv_identify(stream).map_ok(Success::ReceivedIdentify)) .is_err() { - warn!("Dropping outbound identify stream because we are at capacity"); + tracing::warn!("Dropping outbound identify stream because we are at capacity"); } } future::Either::Right(stream) => { @@ -213,7 +215,9 @@ impl Handler { ) .is_err() { - warn!("Dropping outbound identify push stream because we are at capacity"); + tracing::warn!( + "Dropping outbound identify push stream because we are at capacity" + ); } } } @@ -312,6 +316,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -406,18 +411,20 @@ impl ConnectionHandler for Handler { | ConnectionEvent::ListenUpgradeError(_) | ConnectionEvent::RemoteProtocolsChange(_) => {} ConnectionEvent::LocalProtocolsChange(change) => { - let before = log::log_enabled!(Level::Debug) + let before = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) .unwrap_or_default(); let protocols_changed = self.local_supported_protocols.on_protocols_change(change); - let after = log::log_enabled!(Level::Debug) + let after = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) .unwrap_or_default(); if protocols_changed && self.exchanged_one_periodic_identify { - log::debug!( - "Supported listen protocols changed from [{before}] to [{after}], pushing to {}", - self.remote_peer_id + tracing::debug!( + peer=%self.remote_peer_id, + %before, + %after, + "Supported listen protocols changed, pushing to peer" ); self.events diff --git a/protocols/identify/src/protocol.rs b/protocols/identify/src/protocol.rs index 803b79bf79c..c6b22b00c0a 100644 --- a/protocols/identify/src/protocol.rs +++ b/protocols/identify/src/protocol.rs @@ -25,7 +25,6 @@ use libp2p_core::{multiaddr, Multiaddr}; use libp2p_identity as identity; use libp2p_identity::PublicKey; use libp2p_swarm::StreamProtocol; -use log::{debug, trace}; use std::convert::TryFrom; use std::io; use thiserror::Error; @@ -94,7 +93,7 @@ pub(crate) async fn send_identify(io: T, info: Info) -> Result>) -> Vec { .filter_map(|bytes| match Multiaddr::try_from(bytes) { Ok(a) => Some(a), Err(e) => { - debug!("Unable to parse multiaddr: {e:?}"); + tracing::debug!("Unable to parse multiaddr: {e:?}"); None } }) @@ -181,7 +180,7 @@ fn parse_protocols(protocols: Vec) -> Vec { .filter_map(|p| match StreamProtocol::try_from_owned(p) { Ok(p) => Some(p), Err(e) => { - debug!("Received invalid protocol from peer: {e}"); + tracing::debug!("Received invalid protocol from peer: {e}"); None } }) @@ -192,7 +191,7 @@ fn parse_public_key(public_key: Option>) -> Option { public_key.and_then(|key| match PublicKey::try_decode_protobuf(&key) { Ok(k) => Some(k), Err(e) => { - debug!("Unable to decode public key: {e:?}"); + tracing::debug!("Unable to decode public key: {e:?}"); None } }) @@ -202,7 +201,7 @@ fn parse_observed_addr(observed_addr: Option>) -> Option { observed_addr.and_then(|bytes| match Multiaddr::try_from(bytes) { Ok(a) => Some(a), Err(e) => { - debug!("Unable to parse observed multiaddr: {e:?}"); + tracing::debug!("Unable to parse observed multiaddr: {e:?}"); None } }) diff --git a/protocols/identify/tests/smoke.rs b/protocols/identify/tests/smoke.rs index 9a61ccccdd4..5cccc09d863 100644 --- a/protocols/identify/tests/smoke.rs +++ b/protocols/identify/tests/smoke.rs @@ -6,10 +6,13 @@ use libp2p_swarm_test::SwarmExt; use std::collections::HashSet; use std::iter; use std::time::{Duration, Instant}; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn periodic_identify() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -83,7 +86,9 @@ async fn periodic_identify() { } #[async_std::test] async fn only_emits_address_candidate_once_per_connection() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new( @@ -153,7 +158,9 @@ async fn only_emits_address_candidate_once_per_connection() { #[async_std::test] async fn identify_push() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -203,7 +210,9 @@ async fn identify_push() { #[async_std::test] async fn discover_peer_after_disconnect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut swarm1 = Swarm::new_ephemeral(|identity| { identify::Behaviour::new(identify::Config::new("a".to_string(), identity.public())) @@ -254,7 +263,9 @@ async fn discover_peer_after_disconnect() { #[async_std::test] async fn configured_interval_starts_after_first_identify() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let identify_interval = Duration::from_secs(5); diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 213cdc9623d..9410ff0eebe 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -17,7 +17,6 @@ either = "1.9" fnv = "1.0" asynchronous-codec = "0.6" futures = "0.3.29" -log = "0.4" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } quick-protobuf = "0.8" @@ -33,10 +32,10 @@ futures-timer = "3.0.2" instant = "0.1.12" serde = { version = "1.0", optional = true, features = ["derive"] } thiserror = "1" +tracing = "0.1.37" [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" futures-timer = "3.0" libp2p-identify = { path = "../identify" } libp2p-noise = { workspace = true } @@ -44,6 +43,7 @@ libp2p-swarm = { path = "../../swarm", features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] serde = ["dep:serde", "bytes/serde"] diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 0b187955e39..cc80b9c1be9 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -47,7 +47,6 @@ use libp2p_swarm::{ ListenAddresses, NetworkBehaviour, NotifyHandler, StreamProtocol, THandler, THandlerInEvent, THandlerOutEvent, ToSwarm, }; -use log::{debug, info, warn}; use smallvec::SmallVec; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::fmt; @@ -56,6 +55,7 @@ use std::task::{Context, Poll, Waker}; use std::time::Duration; use std::vec; use thiserror::Error; +use tracing::Level; pub use crate::query::QueryStats; @@ -561,7 +561,7 @@ where RoutingUpdate::Success } kbucket::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + tracing::debug!(%peer, "Bucket full. Peer not added to routing table"); RoutingUpdate::Failed } kbucket::InsertResult::Pending { disconnected } => { @@ -1012,7 +1012,7 @@ where let num_connections = self.connections.len(); - log::debug!( + tracing::debug!( "Re-configuring {} established connection{}", num_connections, if num_connections > 1 { "s" } else { "" } @@ -1037,7 +1037,7 @@ where self.mode = match (self.external_addresses.as_slice(), self.mode) { ([], Mode::Server) => { - log::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); + tracing::debug!("Switching to client-mode because we no longer have any confirmed external addresses"); Mode::Client } @@ -1047,11 +1047,11 @@ where Mode::Client } (confirmed_external_addresses, Mode::Client) => { - if log::log_enabled!(log::Level::Debug) { + if tracing::enabled!(Level::DEBUG) { let confirmed_external_addresses = to_comma_separated_list(confirmed_external_addresses); - log::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); + tracing::debug!("Switching to server-mode assuming that one of [{confirmed_external_addresses}] is externally reachable"); } Mode::Server @@ -1086,13 +1086,13 @@ where let local_id = self.kbuckets.local_key().preimage(); let others_iter = peers.filter(|p| &p.node_id != local_id); if let Some(query) = self.queries.get_mut(query_id) { - log::trace!("Request to {:?} in query {:?} succeeded.", source, query_id); + tracing::trace!(peer=%source, query=?query_id, "Request to peer in query succeeded"); for peer in others_iter.clone() { - log::trace!( - "Peer {:?} reported by {:?} in query {:?}.", - peer, - source, - query_id + tracing::trace!( + ?peer, + %source, + query=?query_id, + "Peer reported by source in query" ); let addrs = peer.multiaddrs.iter().cloned().collect(); query.inner.addresses.insert(peer.node_id, addrs); @@ -1282,7 +1282,10 @@ where self.queued_events.push_back(ToSwarm::GenerateEvent(event)); } kbucket::InsertResult::Full => { - debug!("Bucket full. Peer not added to routing table: {}", peer); + tracing::debug!( + %peer, + "Bucket full. Peer not added to routing table" + ); let address = addresses.first().clone(); self.queued_events.push_back(ToSwarm::GenerateEvent( Event::RoutablePeer { peer, address }, @@ -1319,7 +1322,7 @@ where /// Handles a finished (i.e. successful) query. fn query_finished(&mut self, q: Query) -> Option { let query_id = q.id(); - log::trace!("Query {:?} finished.", query_id); + tracing::trace!(query=?query_id, "Query finished"); let result = q.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1546,7 +1549,7 @@ where step: ProgressStep::first_and_last(), }), PutRecordContext::Replicate => { - debug!("Record replicated: {:?}", record.key); + tracing::debug!(record=?record.key, "Record replicated"); None } } @@ -1557,7 +1560,7 @@ where /// Handles a query that timed out. fn query_timeout(&mut self, query: Query) -> Option { let query_id = query.id(); - log::trace!("Query {:?} timed out.", query_id); + tracing::trace!(query=?query_id, "Query timed out"); let result = query.into_result(); match result.inner.info { QueryInfo::Bootstrap { @@ -1655,11 +1658,14 @@ where }), PutRecordContext::Replicate => match phase { PutRecordPhase::GetClosestPeers => { - warn!("Locating closest peers for replication failed: {:?}", err); + tracing::warn!( + "Locating closest peers for replication failed: {:?}", + err + ); None } PutRecordPhase::PutRecord { .. } => { - debug!("Replicating record failed: {:?}", err); + tracing::debug!("Replicating record failed: {:?}", err); None } }, @@ -1759,9 +1765,9 @@ where match self.record_filtering { StoreInserts::Unfiltered => match self.store.put(record.clone()) { Ok(()) => { - debug!( - "Record stored: {:?}; {} bytes", - record.key, + tracing::debug!( + record=?record.key, + "Record stored: {} bytes", record.value.len() ); self.queued_events.push_back(ToSwarm::GenerateEvent( @@ -1775,7 +1781,7 @@ where )); } Err(e) => { - info!("Record not stored: {:?}", e); + tracing::info!("Record not stored: {:?}", e); self.queued_events.push_back(ToSwarm::NotifyHandler { peer_id: source, handler: NotifyHandler::One(connection), @@ -1828,7 +1834,7 @@ where match self.record_filtering { StoreInserts::Unfiltered => { if let Err(e) = self.store.add_provider(record) { - info!("Provider record not stored: {:?}", e); + tracing::info!("Provider record not stored: {:?}", e); return; } @@ -1859,9 +1865,10 @@ where // of the error is not possible (and also not truly desirable or ergonomic). // The error passed in should rather be a dedicated enum. if addrs.remove(address).is_ok() { - debug!( - "Address '{}' removed from peer '{}' due to error.", - address, peer_id + tracing::debug!( + peer=%peer_id, + %address, + "Address removed from peer due to error." ); } else { // Despite apparently having no reachable address (any longer), @@ -1873,10 +1880,11 @@ where // into the same bucket. This is handled transparently by the // `KBucketsTable` and takes effect through `KBucketsTable::take_applied_pending` // within `Behaviour::poll`. - debug!( - "Last remaining address '{}' of peer '{}' is unreachable.", - address, peer_id, - ) + tracing::debug!( + peer=%peer_id, + %address, + "Last remaining address of peer is unreachable." + ); } } @@ -1920,22 +1928,27 @@ where // Update routing table. if let Some(addrs) = self.kbuckets.entry(&kbucket::Key::from(peer)).value() { if addrs.replace(old, new) { - debug!( - "Address '{}' replaced with '{}' for peer '{}'.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address replaced with new address for peer." ); } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as old address wasn't \ - present.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as old address wasn't present.", ); } } else { - debug!( - "Address '{}' not replaced with '{}' for peer '{}' as peer is not present in the \ - routing table.", - old, new, peer + tracing::debug!( + %peer, + old_address=%old, + new_address=%new, + "Old address not replaced with new address for peer as peer is not present in the \ + routing table." ); } @@ -2073,7 +2086,6 @@ where connected_point, peer, self.mode, - connection_id, ); self.preload_new_handler(&mut handler, connection_id, peer); @@ -2097,7 +2109,6 @@ where connected_point, peer, self.mode, - connection_id, ); self.preload_new_handler(&mut handler, connection_id, peer); @@ -2253,12 +2264,11 @@ where } } } - HandlerEvent::QueryError { query_id, error } => { - log::debug!( - "Request to {:?} in query {:?} failed with {:?}", - source, - query_id, + tracing::debug!( + peer=%source, + query=?query_id, + "Request to peer in query failed with {:?}", error ); // If the query to which the error relates is still active, @@ -2346,7 +2356,7 @@ where *step = step.next(); } else { - log::trace!("Record with key {:?} not found at {}", key, source); + tracing::trace!(record=?key, %source, "Record not found at source"); if let Caching::Enabled { max_peers } = self.caching { let source_key = kbucket::Key::from(source); let target_key = kbucket::Key::from(key.clone()); @@ -2387,13 +2397,13 @@ where let peers = success.clone(); let finished = query.try_finish(peers.iter()); if !finished { - debug!( - "PutRecord query ({:?}) reached quorum ({}/{}) with response \ - from peer {} but could not yet finish.", - query_id, + tracing::debug!( + peer=%source, + query=?query_id, + "PutRecord query reached quorum ({}/{}) with response \ + from peer but could not yet finish.", peers.len(), quorum, - source, ); } } @@ -2403,6 +2413,7 @@ where }; } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/kad/src/behaviour/test.rs b/protocols/kad/src/behaviour/test.rs index f75c59b64b0..522eebcba92 100644 --- a/protocols/kad/src/behaviour/test.rs +++ b/protocols/kad/src/behaviour/test.rs @@ -321,7 +321,9 @@ fn query_iter() { #[test] fn unresponsive_not_returned_direct() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); // Build one node. It contains fake addresses to non-existing nodes. We ask it to find a // random peer. We make sure that no fake address is returned. diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index fce77bc13e4..0f36800a904 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -33,10 +33,9 @@ use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, ConnectionId, Stream, StreamUpgradeError, - SubstreamProtocol, SupportedProtocols, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, + SupportedProtocols, }; -use log::trace; use std::collections::VecDeque; use std::task::Waker; use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; @@ -84,9 +83,6 @@ pub struct Handler { protocol_status: Option, remote_supported_protocols: SupportedProtocols, - - /// The ID of this connection. - connection_id: ConnectionId, } /// The states of protocol confirmation that a connection @@ -459,17 +455,20 @@ impl Handler { endpoint: ConnectedPoint, remote_peer_id: PeerId, mode: Mode, - connection_id: ConnectionId, ) -> Self { match &endpoint { ConnectedPoint::Dialer { .. } => { - log::debug!( - "Operating in {mode}-mode on new outbound connection to {remote_peer_id}" + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New outbound connection" ); } ConnectedPoint::Listener { .. } => { - log::debug!( - "Operating in {mode}-mode on new inbound connection to {remote_peer_id}" + tracing::debug!( + peer=%remote_peer_id, + mode=%mode, + "New inbound connection" ); } } @@ -486,7 +485,6 @@ impl Handler { pending_messages: Default::default(), protocol_status: None, remote_supported_protocols: Default::default(), - connection_id, } } @@ -550,16 +548,16 @@ impl Handler { ) }) { *s = InboundSubstreamState::Cancelled; - log::debug!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - Removed older substream waiting to be reused.", - self.remote_peer_id, + tracing::debug!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + Removed older substream waiting to be reused." ) } else { - log::warn!( - "New inbound substream to {:?} exceeds inbound substream limit. \ - No older substream waiting to be reused. Dropping new substream.", - self.remote_peer_id, + tracing::warn!( + peer=?self.remote_peer_id, + "New inbound substream to peer exceeds inbound substream limit. \ + No older substream waiting to be reused. Dropping new substream." ); return; } @@ -688,12 +686,18 @@ impl ConnectionHandler for Handler { match &self.endpoint { ConnectedPoint::Dialer { .. } => { - log::debug!( - "Now operating in {new_mode}-mode on outbound connection with {peer}" + tracing::debug!( + %peer, + mode=%new_mode, + "Changed mode on outbound connection" ) } ConnectedPoint::Listener { local_addr, .. } => { - log::debug!("Now operating in {new_mode}-mode on inbound connection with {peer} assuming that one of our external addresses routes to {local_addr}") + tracing::debug!( + %peer, + mode=%new_mode, + local_address=%local_addr, + "Changed mode on inbound connection assuming that one of our external addresses routes to the local address") } } @@ -702,6 +706,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -787,8 +792,6 @@ impl ConnectionHandler for Handler { self.protocol_status = Some(compute_new_protocol_status( remote_supports_our_kademlia_protocols, self.protocol_status, - self.remote_peer_id, - self.connection_id, )) } } @@ -799,8 +802,6 @@ impl ConnectionHandler for Handler { fn compute_new_protocol_status( now_supported: bool, current_status: Option, - remote_peer_id: PeerId, - connection_id: ConnectionId, ) -> ProtocolStatus { let current_status = match current_status { None => { @@ -820,9 +821,9 @@ fn compute_new_protocol_status( } if now_supported { - log::debug!("Remote {remote_peer_id} now supports our kademlia protocol on connection {connection_id}"); + tracing::debug!("Remote now supports our kademlia protocol"); } else { - log::debug!("Remote {remote_peer_id} no longer supports our kademlia protocol on connection {connection_id}"); + tracing::debug!("Remote no longer supports our kademlia protocol"); } ProtocolStatus { @@ -997,7 +998,7 @@ impl futures::Stream for InboundSubstreamState { mut substream, } => match substream.poll_next_unpin(cx) { Poll::Ready(Some(Ok(KadRequestMsg::Ping))) => { - log::warn!("Kademlia PING messages are unsupported"); + tracing::warn!("Kademlia PING messages are unsupported"); *this = InboundSubstreamState::Closing(substream); } @@ -1071,7 +1072,7 @@ impl futures::Stream for InboundSubstreamState { return Poll::Ready(None); } Poll::Ready(Some(Err(e))) => { - trace!("Inbound substream error: {:?}", e); + tracing::trace!("Inbound substream error: {:?}", e); return Poll::Ready(None); } }, @@ -1172,6 +1173,7 @@ fn process_kad_response(event: KadResponseMsg, query_id: QueryId) -> HandlerEven mod tests { use super::*; use quickcheck::{Arbitrary, Gen}; + use tracing_subscriber::EnvFilter; impl Arbitrary for ProtocolStatus { fn arbitrary(g: &mut Gen) -> Self { @@ -1184,15 +1186,12 @@ mod tests { #[test] fn compute_next_protocol_status_test() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(now_supported: bool, current: Option) { - let new = compute_new_protocol_status( - now_supported, - current, - PeerId::random(), - ConnectionId::new_unchecked(0), - ); + let new = compute_new_protocol_status(now_supported, current); match current { None => { diff --git a/protocols/kad/src/protocol.rs b/protocols/kad/src/protocol.rs index 1cf14745675..247b12bb4cd 100644 --- a/protocols/kad/src/protocol.rs +++ b/protocols/kad/src/protocol.rs @@ -106,7 +106,7 @@ impl TryFrom for KadPeer { match Multiaddr::try_from(addr) { Ok(a) => addrs.push(a), Err(e) => { - log::debug!("Unable to parse multiaddr: {e}"); + tracing::debug!("Unable to parse multiaddr: {e}"); } }; } diff --git a/protocols/kad/tests/client_mode.rs b/protocols/kad/tests/client_mode.rs index 5324e679ab9..f290a36b727 100644 --- a/protocols/kad/tests/client_mode.rs +++ b/protocols/kad/tests/client_mode.rs @@ -4,12 +4,15 @@ use libp2p_kad::store::MemoryStore; use libp2p_kad::{Behaviour, Config, Event, Mode}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; use Event::*; use MyBehaviourEvent::*; #[async_std::test] async fn server_gets_added_to_routing_table_by_client() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -32,7 +35,9 @@ async fn server_gets_added_to_routing_table_by_client() { #[async_std::test] async fn two_servers_add_each_other_to_routing_table() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server1 = Swarm::new_ephemeral(MyBehaviour::new); let mut server2 = Swarm::new_ephemeral(MyBehaviour::new); @@ -71,7 +76,9 @@ async fn two_servers_add_each_other_to_routing_table() { #[async_std::test] async fn adding_an_external_addresses_activates_server_mode_on_existing_connections() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); let mut server = Swarm::new_ephemeral(MyBehaviour::new); @@ -105,7 +112,9 @@ async fn adding_an_external_addresses_activates_server_mode_on_existing_connecti #[async_std::test] async fn set_client_to_server_mode() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut client = Swarm::new_ephemeral(MyBehaviour::new); client.behaviour_mut().kad.set_mode(Some(Mode::Client)); diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 78e31bc9980..ef67a7e51b1 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -19,11 +19,11 @@ if-watch = "3.1.0" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" rand = "0.8.3" smallvec = "1.11.1" socket2 = { version = "0.5.5", features = ["all"] } tokio = { version = "1.33", default-features = false, features = ["net", "time"], optional = true} +tracing = "0.1.37" trust-dns-proto = { version = "0.23.0", default-features = false, features = ["mdns"] } void = "1.0.2" @@ -33,13 +33,13 @@ async-io = ["dep:async-io", "dep:async-std", "if-watch/smol"] [dev-dependencies] async-std = { version = "1.9.0", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-swarm = { workspace = true, features = ["tokio", "async-std"] } libp2p-tcp = { workspace = true, features = ["tokio", "async-io"] } libp2p-yamux = { workspace = true } tokio = { version = "1.33", default-features = false, features = ["macros", "rt", "rt-multi-thread", "time"] } libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "use-async-std" diff --git a/protocols/mdns/src/behaviour.rs b/protocols/mdns/src/behaviour.rs index a460d56ad18..e1652db4762 100644 --- a/protocols/mdns/src/behaviour.rs +++ b/protocols/mdns/src/behaviour.rs @@ -282,6 +282,7 @@ where .on_swarm_event(&event); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -310,18 +311,20 @@ where Ok(iface_state) => { e.insert(P::spawn(iface_state)); } - Err(err) => log::error!("failed to create `InterfaceState`: {}", err), + Err(err) => { + tracing::error!("failed to create `InterfaceState`: {}", err) + } } } } Ok(IfEvent::Down(inet)) => { if let Some(handle) = self.if_tasks.remove(&inet.addr()) { - log::info!("dropping instance {}", inet.addr()); + tracing::info!(instance=%inet.addr(), "dropping instance"); handle.abort(); } } - Err(err) => log::error!("if watch returned an error: {}", err), + Err(err) => tracing::error!("if watch returned an error: {}", err), } } // Emit discovered event. @@ -337,7 +340,7 @@ where { *cur_expires = cmp::max(*cur_expires, expiration); } else { - log::info!("discovered: {} {}", peer, addr); + tracing::info!(%peer, address=%addr, "discovered peer on address"); self.discovered_nodes.push((peer, addr.clone(), expiration)); discovered.push((peer, addr)); } @@ -353,7 +356,7 @@ where let mut expired = Vec::new(); self.discovered_nodes.retain(|(peer, addr, expiration)| { if *expiration <= now { - log::info!("expired: {} {}", peer, addr); + tracing::info!(%peer, address=%addr, "expired peer on address"); expired.push((*peer, addr.clone())); return false; } diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 47601088fdc..7fe97c38381 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -117,7 +117,7 @@ where listen_addresses: Arc>, query_response_sender: mpsc::Sender<(PeerId, Multiaddr, Instant)>, ) -> io::Result { - log::info!("creating instance on iface {}", addr); + tracing::info!(address=%addr, "creating instance on iface address"); let recv_socket = match addr { IpAddr::V4(addr) => { let socket = Socket::new(Domain::IPV4, Type::DGRAM, Some(socket2::Protocol::UDP))?; @@ -184,7 +184,7 @@ where } pub(crate) fn reset_timer(&mut self) { - log::trace!("reset timer on {:#?} {:#?}", self.addr, self.probe_state); + tracing::trace!(address=%self.addr, probe_state=?self.probe_state, "reset timer"); let interval = *self.probe_state.interval(); self.timeout = T::interval(interval); } @@ -207,9 +207,9 @@ where loop { // 1st priority: Low latency: Create packet ASAP after timeout. if this.timeout.poll_next_unpin(cx).is_ready() { - log::trace!("sending query on iface {}", this.addr); + tracing::trace!(address=%this.addr, "sending query on iface"); this.send_buffer.push_back(build_query()); - log::trace!("tick on {:#?} {:#?}", this.addr, this.probe_state); + tracing::trace!(address=%this.addr, probe_state=?this.probe_state, "tick"); // Stop to probe when the initial interval reach the query interval if let ProbeState::Probing(interval) = this.probe_state { @@ -228,11 +228,11 @@ where if let Some(packet) = this.send_buffer.pop_front() { match this.send_socket.poll_write(cx, &packet, this.mdns_socket()) { Poll::Ready(Ok(_)) => { - log::trace!("sent packet on iface {}", this.addr); + tracing::trace!(address=%this.addr, "sent packet on iface address"); continue; } Poll::Ready(Err(err)) => { - log::error!("error sending packet on iface {} {}", this.addr, err); + tracing::error!(address=%this.addr, "error sending packet on iface address {}", err); continue; } Poll::Pending => { @@ -265,10 +265,10 @@ where .map_ok(|(len, from)| MdnsPacket::new_from_bytes(&this.recv_buffer[..len], from)) { Poll::Ready(Ok(Ok(Some(MdnsPacket::Query(query))))) => { - log::trace!( - "received query from {} on {}", - query.remote_addr(), - this.addr + tracing::trace!( + address=%this.addr, + remote_address=%query.remote_addr(), + "received query from remote address on address" ); this.send_buffer.extend(build_query_response( @@ -283,10 +283,10 @@ where continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::Response(response))))) => { - log::trace!( - "received response from {} on {}", - response.remote_addr(), - this.addr + tracing::trace!( + address=%this.addr, + remote_address=%response.remote_addr(), + "received response from remote address on address" ); this.discovered @@ -300,10 +300,10 @@ where continue; } Poll::Ready(Ok(Ok(Some(MdnsPacket::ServiceDiscovery(disc))))) => { - log::trace!( - "received service discovery from {} on {}", - disc.remote_addr(), - this.addr + tracing::trace!( + address=%this.addr, + remote_address=%disc.remote_addr(), + "received service discovery from remote address on address" ); this.send_buffer @@ -314,10 +314,10 @@ where // No more bytes available on the socket to read } Poll::Ready(Err(err)) => { - log::error!("failed reading datagram: {}", err); + tracing::error!("failed reading datagram: {}", err); } Poll::Ready(Ok(Err(err))) => { - log::debug!("Parsing mdns packet failed: {:?}", err); + tracing::debug!("Parsing mdns packet failed: {:?}", err); } Poll::Ready(Ok(Ok(None))) | Poll::Pending => {} } diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 6a10497e69f..61fd5d329b9 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -134,7 +134,7 @@ pub(crate) fn build_query_response<'a>( records.push(txt_record); } Err(e) => { - log::warn!("Excluding address {} from response: {:?}", addr, e); + tracing::warn!(address=%addr, "Excluding address from response: {:?}", e); } } diff --git a/protocols/mdns/tests/use-async-std.rs b/protocols/mdns/tests/use-async-std.rs index 6d45d92cdd9..549f70978af 100644 --- a/protocols/mdns/tests/use-async-std.rs +++ b/protocols/mdns/tests/use-async-std.rs @@ -24,17 +24,22 @@ use libp2p_mdns::{async_io::Behaviour, Config}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn test_discovery_async_std_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[async_std::test] async fn test_discovery_async_std_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -45,7 +50,9 @@ async fn test_discovery_async_std_ipv6() { #[async_std::test] async fn test_expired_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), @@ -78,7 +85,9 @@ async fn test_expired_async_std() { #[async_std::test] async fn test_no_expiration_on_close_async_std() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(120), query_interval: Duration::from_secs(10), diff --git a/protocols/mdns/tests/use-tokio.rs b/protocols/mdns/tests/use-tokio.rs index 50d6be0c00f..cf0d9f4bed4 100644 --- a/protocols/mdns/tests/use-tokio.rs +++ b/protocols/mdns/tests/use-tokio.rs @@ -22,17 +22,22 @@ use libp2p_mdns::{tokio::Behaviour, Config, Event}; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt as _; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn test_discovery_tokio_ipv4() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); run_discovery_test(Config::default()).await } #[tokio::test] async fn test_discovery_tokio_ipv6() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { enable_ipv6: true, @@ -43,7 +48,9 @@ async fn test_discovery_tokio_ipv6() { #[tokio::test] async fn test_expired_tokio() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let config = Config { ttl: Duration::from_secs(1), diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index 8180b928691..18ff36a5cff 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -12,8 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] anyhow = "1" -clap = { version = "4.4.7", features = ["derive"] } -env_logger = "0.10.0" +clap = { version = "4.4.6", features = ["derive"] } futures = "0.3.29" futures-bounded = { workspace = true } futures-timer = "3.0" @@ -27,10 +26,11 @@ libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-tcp = { workspace = true, features = ["tokio"] } libp2p-tls = { workspace = true } libp2p-yamux = { workspace = true } -log = "0.4" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } tokio = { version = "1.33", default-features = false, features = ["macros", "rt", "rt-multi-thread"] } void = "1" diff --git a/protocols/perf/src/bin/perf.rs b/protocols/perf/src/bin/perf.rs index 61371317ed2..9ac8f0a6cde 100644 --- a/protocols/perf/src/bin/perf.rs +++ b/protocols/perf/src/bin/perf.rs @@ -30,8 +30,8 @@ use libp2p::swarm::{NetworkBehaviour, Swarm, SwarmEvent}; use libp2p::SwarmBuilder; use libp2p_perf::{client, server}; use libp2p_perf::{Final, Intermediate, Run, RunParams, RunUpdate}; -use log::{error, info}; use serde::{Deserialize, Serialize}; +use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] #[clap(name = "libp2p perf client")] @@ -71,9 +71,9 @@ impl FromStr for Transport { #[tokio::main] async fn main() -> Result<()> { - env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")) - .format_timestamp_millis() - .init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let opts = Opts::parse(); match opts { @@ -121,20 +121,20 @@ async fn server(server_address: SocketAddr) -> Result<()> { loop { match swarm.next().await.unwrap() { SwarmEvent::NewListenAddr { address, .. } => { - info!("Listening on {address}"); + tracing::info!(%address, "Listening on address"); } SwarmEvent::IncomingConnection { .. } => {} e @ SwarmEvent::IncomingConnectionError { .. } => { - error!("{e:?}"); + tracing::error!("{e:?}"); } SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - info!("Established connection to {:?} via {:?}", peer_id, endpoint); + tracing::info!(peer=%peer_id, ?endpoint, "Established new connection"); } SwarmEvent::ConnectionClosed { .. } => {} SwarmEvent::Behaviour(server::Event { .. }) => { - info!("Finished run",) + tracing::info!("Finished run",) } e => panic!("{e:?}"), } @@ -168,7 +168,7 @@ async fn client( let mut swarm = swarm().await?; tokio::spawn(async move { - info!("start benchmark: custom"); + tracing::info!("start benchmark: custom"); let start = Instant::now(); @@ -241,7 +241,7 @@ async fn connect( let duration = start.elapsed(); let duration_seconds = duration.as_secs_f64(); - info!("established connection in {duration_seconds:.4} s"); + tracing::info!(elapsed_time=%format!("{duration_seconds:.4} s")); Ok(server_peer_id) } @@ -259,7 +259,7 @@ async fn perf( id: _, result: Ok(RunUpdate::Intermediate(progressed)), }) => { - info!("{progressed}"); + tracing::info!("{progressed}"); let Intermediate { duration, @@ -288,7 +288,7 @@ async fn perf( let run = Run { params, duration }; - info!("{run}"); + tracing::info!("{run}"); Ok(run) } diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index a4dc354fac0..79c73d55102 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -145,6 +145,7 @@ impl NetworkBehaviour for Behaviour { .push_back(ToSwarm::GenerateEvent(Event { id, result })); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, _cx))] fn poll( &mut self, _cx: &mut Context<'_>, diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index a9bb0c7d483..d5d05284a85 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -153,6 +153,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index c699f706d87..370bc2ae188 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -105,6 +105,7 @@ impl NetworkBehaviour for Behaviour { })) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, _cx))] fn poll( &mut self, _cx: &mut Context<'_>, diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index 4e739995b67..7f262ac4820 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -29,7 +29,7 @@ use libp2p_swarm::{ }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use log::error; +use tracing::error; use void::Void; use crate::Run; @@ -96,7 +96,7 @@ impl ConnectionHandler for Handler { .try_push(crate::protocol::receive_send(protocol).boxed()) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity"); + tracing::warn!("Dropping inbound stream because we are at capacity"); } } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { info, .. }) => { @@ -115,6 +115,7 @@ impl ConnectionHandler for Handler { } } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/perf/tests/lib.rs b/protocols/perf/tests/lib.rs index 447d8a06110..017d475befd 100644 --- a/protocols/perf/tests/lib.rs +++ b/protocols/perf/tests/lib.rs @@ -24,10 +24,13 @@ use libp2p_perf::{ }; use libp2p_swarm::{Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn perf() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut server = Swarm::new_ephemeral(|_| server::Behaviour::new()); let server_peer_id = *server.local_peer_id(); diff --git a/protocols/ping/Cargo.toml b/protocols/ping/Cargo.toml index e1ec6d75871..28ae5d39b62 100644 --- a/protocols/ping/Cargo.toml +++ b/protocols/ping/Cargo.toml @@ -18,16 +18,16 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" rand = "0.8" +tracing = "0.1.37" void = "1.0" [dev-dependencies] async-std = "1.6.2" -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros"] } libp2p-swarm-test = { path = "../../swarm-test" } quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 3a92ef4b249..71ebcd97261 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -23,7 +23,6 @@ use futures::future::{BoxFuture, Either}; use futures::prelude::*; use futures_timer::Delay; use libp2p_core::upgrade::ReadyUpgrade; -use libp2p_identity::PeerId; use libp2p_swarm::handler::{ ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, }; @@ -147,8 +146,6 @@ pub struct Handler { inbound: Option, /// Tracks the state of our handler. state: State, - /// The peer we are connected to. - peer: PeerId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -166,9 +163,8 @@ enum State { impl Handler { /// Builds a new [`Handler`] with the given configuration. - pub fn new(config: Config, peer: PeerId) -> Self { + pub fn new(config: Config) -> Self { Handler { - peer, config, interval: Delay::new(Duration::new(0, 0)), pending_errors: VecDeque::with_capacity(2), @@ -225,6 +221,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, _: Void) {} + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -254,11 +251,11 @@ impl ConnectionHandler for Handler { match fut.poll_unpin(cx) { Poll::Pending => {} Poll::Ready(Err(e)) => { - log::debug!("Inbound ping error: {:?}", e); + tracing::debug!("Inbound ping error: {:?}", e); self.inbound = None; } Poll::Ready(Ok(stream)) => { - log::trace!("answered inbound ping from {}", self.peer); + tracing::trace!("answered inbound ping from peer"); // A ping from a remote peer has been answered, wait for the next. self.inbound = Some(protocol::recv_ping(stream).boxed()); @@ -269,7 +266,7 @@ impl ConnectionHandler for Handler { loop { // Check for outbound ping failures. if let Some(error) = self.pending_errors.pop_back() { - log::debug!("Ping failure: {:?}", error); + tracing::debug!("Ping failure: {:?}", error); self.failures += 1; @@ -291,8 +288,7 @@ impl ConnectionHandler for Handler { break; } Poll::Ready(Ok((stream, rtt))) => { - log::debug!("latency to {} is {}ms", self.peer, rtt.as_millis()); - + tracing::debug!(?rtt, "ping succeeded"); self.failures = 0; self.interval.reset(self.config.interval); self.outbound = Some(OutboundState::Idle(stream)); diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 3e3d14477b5..3e17db300e7 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -111,21 +111,21 @@ impl NetworkBehaviour for Behaviour { fn handle_established_inbound_connection( &mut self, _: ConnectionId, - peer: PeerId, + _: PeerId, _: &Multiaddr, _: &Multiaddr, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.clone(), peer)) + Ok(Handler::new(self.config.clone())) } fn handle_established_outbound_connection( &mut self, _: ConnectionId, - peer: PeerId, + _: PeerId, _: &Multiaddr, _: Endpoint, ) -> Result, ConnectionDenied> { - Ok(Handler::new(self.config.clone(), peer)) + Ok(Handler::new(self.config.clone())) } fn on_connection_handler_event( @@ -141,6 +141,7 @@ impl NetworkBehaviour for Behaviour { }) } + #[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self))] fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { if let Some(e) = self.events.pop_back() { Poll::Ready(ToSwarm::GenerateEvent(e)) diff --git a/protocols/relay/Cargo.toml b/protocols/relay/Cargo.toml index bca55217a2a..7ad23af9b0a 100644 --- a/protocols/relay/Cargo.toml +++ b/protocols/relay/Cargo.toml @@ -21,23 +21,24 @@ instant = "0.1.12" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8.4" static_assertions = "1" thiserror = "1.0" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" libp2p-identity = { workspace = true, features = ["rand"] } libp2p-ping = { workspace = true } libp2p-plaintext = { workspace = true } libp2p-swarm = { workspace = true, features = ["macros", "async-std"] } +libp2p-swarm-test = { workspace = true } libp2p-yamux = { workspace = true } quickcheck = { workspace = true } -libp2p-swarm-test = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 5b9f1fe5843..98e2a5a53bb 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -707,7 +707,11 @@ impl NetworkBehaviour for Behaviour { } } - fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, _cx))] + fn poll( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll>> { if let Some(to_swarm) = self.queued_actions.pop_front() { return Poll::Ready(to_swarm); } diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index a2ba268392f..361fb8ac333 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -409,7 +409,7 @@ impl Handler { )) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } @@ -432,7 +432,7 @@ impl Handler { ) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } self.active_connect_requests @@ -505,7 +505,7 @@ impl ConnectionHandler for Handler { )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::DenyReservationReq { @@ -519,7 +519,7 @@ impl ConnectionHandler for Handler { )) .is_some() { - log::warn!("Dropping existing deny/accept future in favor of new one.") + tracing::warn!("Dropping existing deny/accept future in favor of new one") } } In::NegotiateOutboundConnect { @@ -588,6 +588,7 @@ impl ConnectionHandler for Handler { Instant::now().duration_since(idle_at) <= Duration::from_secs(10) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -651,11 +652,11 @@ impl ConnectionHandler for Handler { )); } Poll::Ready(Err(e)) => { - log::debug!("Inbound stream operation timed out: {e}"); + tracing::debug!("Inbound stream operation timed out: {e}"); continue; } Poll::Ready(Ok(Err(e))) => { - log::debug!("Inbound stream operation failed: {e}"); + tracing::debug!("Inbound stream operation failed: {e}"); continue; } Poll::Pending => { diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 3e79b60ef97..d884f15c7eb 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -35,7 +35,6 @@ use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; -use log::debug; use std::collections::VecDeque; use std::task::{Context, Poll}; use std::time::Duration; @@ -189,7 +188,7 @@ impl Handler { if let Err(e) = to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); } @@ -220,8 +219,9 @@ impl Handler { .try_push(circuit.deny(proto::Status::NO_RESERVATION)) .is_err() { - log::warn!( - "Dropping existing inbound circuit request to be denied from {src_peer_id} in favor of new one." + tracing::warn!( + peer=%src_peer_id, + "Dropping existing inbound circuit request to be denied from peer in favor of new one" ) } } @@ -270,6 +270,7 @@ impl ConnectionHandler for Handler { self.reservation.is_some() } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -319,7 +320,7 @@ impl ConnectionHandler for Handler { if let Err(e) = to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); continue; @@ -335,7 +336,7 @@ impl ConnectionHandler for Handler { outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), ))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); continue; @@ -367,7 +368,7 @@ impl ConnectionHandler for Handler { })) .is_err() { - log::debug!( + tracing::debug!( "Dropping newly established circuit because the listener is gone" ); continue; @@ -397,7 +398,7 @@ impl ConnectionHandler for Handler { outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()), ))) { - log::debug!("Unable to send error to listener: {}", e.into_send_error()) + tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) } self.reservation.failed(); continue; @@ -437,11 +438,11 @@ impl ConnectionHandler for Handler { } }, Poll::Ready(Ok(Err(e))) => { - log::debug!("An inbound circuit request failed: {e}"); + tracing::debug!("An inbound circuit request failed: {e}"); continue; } Poll::Ready(Err(e)) => { - log::debug!("An inbound circuit request timed out: {e}"); + tracing::debug!("An inbound circuit request timed out: {e}"); continue; } Poll::Pending => {} @@ -460,11 +461,11 @@ impl ConnectionHandler for Handler { match self.inflight_outbound_circuit_deny_requests.poll_unpin(cx) { Poll::Ready(Ok(Ok(()))) => continue, Poll::Ready(Ok(Err(error))) => { - log::debug!("Denying inbound circuit failed: {error}"); + tracing::debug!("Denying inbound circuit failed: {error}"); continue; } Poll::Ready(Err(futures_bounded::Timeout { .. })) => { - log::debug!("Denying inbound circuit timed out"); + tracing::debug!("Denying inbound circuit timed out"); continue; } Poll::Pending => {} @@ -493,7 +494,7 @@ impl ConnectionHandler for Handler { .try_push(inbound_stop::handle_open_circuit(stream)) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { @@ -511,7 +512,7 @@ impl ConnectionHandler for Handler { .try_push(outbound_hop::make_reservation(stream)) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } } PendingRequest::Connect { @@ -525,7 +526,7 @@ impl ConnectionHandler for Handler { .try_push(outbound_hop::open_circuit(stream, dst_peer_id)) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } } } @@ -617,12 +618,12 @@ impl Reservation { if let Err(e) = to_listener .start_send(pending_msgs.pop_front().expect("Called !is_empty().")) { - debug!("Failed to sent pending message to listener: {:?}", e); + tracing::debug!("Failed to sent pending message to listener: {:?}", e); *self = Reservation::None; } } Poll::Ready(Err(e)) => { - debug!("Channel to listener failed: {:?}", e); + tracing::debug!("Channel to listener failed: {:?}", e); *self = Reservation::None; } Poll::Pending => {} diff --git a/protocols/relay/src/protocol/inbound_hop.rs b/protocols/relay/src/protocol/inbound_hop.rs index 69ec495261f..951ae579a2d 100644 --- a/protocols/relay/src/protocol/inbound_hop.rs +++ b/protocols/relay/src/protocol/inbound_hop.rs @@ -58,7 +58,7 @@ pub struct ReservationReq { impl ReservationReq { pub async fn accept(self, addrs: Vec) -> Result<(), Error> { if addrs.is_empty() { - log::debug!( + tracing::debug!( "Accepting relay reservation without providing external addresses of local node. \ Thus the remote node might not be able to advertise its relayed address." ) diff --git a/protocols/relay/tests/lib.rs b/protocols/relay/tests/lib.rs index 39fc2b1f6dc..d57ab144e9f 100644 --- a/protocols/relay/tests/lib.rs +++ b/protocols/relay/tests/lib.rs @@ -38,10 +38,13 @@ use libp2p_swarm::{Config, DialError, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::error::Error; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[test] fn reservation() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -82,7 +85,9 @@ fn reservation() { #[test] fn new_reservation_to_same_relay_replaces_old() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -173,7 +178,9 @@ fn new_reservation_to_same_relay_replaces_old() { #[test] fn connect() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -257,7 +264,9 @@ async fn connection_established_to( #[test] fn handle_dial_failure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -276,7 +285,9 @@ fn handle_dial_failure() { #[test] fn propagate_reservation_error_to_listener() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -323,7 +334,9 @@ fn propagate_reservation_error_to_listener() { #[test] fn propagate_connect_error_to_unknown_peer_to_dialer() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); @@ -377,7 +390,9 @@ fn propagate_connect_error_to_unknown_peer_to_dialer() { #[test] fn reuse_connection() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let mut pool = LocalPool::new(); let relay_addr = Multiaddr::empty().with(Protocol::Memory(rand::random::())); diff --git a/protocols/rendezvous/Cargo.toml b/protocols/rendezvous/Cargo.toml index a56cf737656..c5f1c6e5729 100644 --- a/protocols/rendezvous/Cargo.toml +++ b/protocols/rendezvous/Cargo.toml @@ -21,24 +21,24 @@ libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } libp2p-request-response = { workspace = true } -log = "0.4" quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } rand = "0.8" thiserror = "1" +tracing = "0.1.37" void = "1" [dev-dependencies] -env_logger = "0.10.0" libp2p-swarm = { workspace = true, features = ["macros", "tokio"] } libp2p-noise = { workspace = true } libp2p-ping = { workspace = true } libp2p-identify = { workspace = true } -libp2p-yamux = { workspace = true } +libp2p-swarm-test = { path = "../../swarm-test" } libp2p-tcp = { workspace = true, features = ["tokio"] } +libp2p-yamux = { workspace = true } rand = "0.8" tokio = { version = "1.33", features = [ "rt-multi-thread", "time", "macros", "sync", "process", "fs", "net" ] } -libp2p-swarm-test = { path = "../../swarm-test" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index e4aedd9da7a..c6072533194 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -232,7 +232,7 @@ impl NetworkBehaviour for Behaviour { let registered = self.registered_namespaces.clone(); for ((rz_node, ns), ttl) in registered { if let Err(e) = self.register(ns, rz_node, Some(ttl)) { - log::warn!("refreshing registration failed: {e}") + tracing::warn!("refreshing registration failed: {e}") } } } diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 8911f2cea01..886b64cc829 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -194,7 +194,11 @@ impl NetworkBehaviour for Behaviour { request_id, error, }) => { - log::warn!("Inbound request {request_id} with peer {peer} failed: {error}"); + tracing::warn!( + %peer, + request=%request_id, + "Inbound request with peer failed: {error}" + ); continue; } diff --git a/protocols/rendezvous/tests/rendezvous.rs b/protocols/rendezvous/tests/rendezvous.rs index fec56365768..c2de88fd615 100644 --- a/protocols/rendezvous/tests/rendezvous.rs +++ b/protocols/rendezvous/tests/rendezvous.rs @@ -29,10 +29,13 @@ use libp2p_swarm::{DialError, Swarm, SwarmEvent}; use libp2p_swarm_test::SwarmExt; use std::convert::TryInto; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn given_successful_registration_then_successful_discovery() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -85,7 +88,9 @@ async fn given_successful_registration_then_successful_discovery() { #[tokio::test] async fn should_return_error_when_no_external_addresses() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let server = new_server(rendezvous::server::Config::default()).await; let mut client = Swarm::new_ephemeral(rendezvous::client::Behaviour::new); @@ -100,7 +105,9 @@ async fn should_return_error_when_no_external_addresses() { #[tokio::test] async fn given_successful_registration_then_refresh_ttl() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -166,7 +173,9 @@ async fn given_successful_registration_then_refresh_ttl() { #[tokio::test] async fn given_successful_registration_then_refresh_external_addrs() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -217,7 +226,9 @@ async fn given_successful_registration_then_refresh_external_addrs() { #[tokio::test] async fn given_invalid_ttl_then_unsuccessful_registration() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -244,7 +255,9 @@ async fn given_invalid_ttl_then_unsuccessful_registration() { #[tokio::test] async fn discover_allows_for_dial_by_peer_id() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -299,7 +312,9 @@ async fn discover_allows_for_dial_by_peer_id() { #[tokio::test] async fn eve_cannot_register() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let mut robert = new_server(rendezvous::server::Config::default()).await; let mut eve = new_impersonating_client().await; @@ -325,7 +340,9 @@ async fn eve_cannot_register() { // test if charlie can operate as client and server simultaneously #[tokio::test] async fn can_combine_client_and_server() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice], mut robert) = new_server_with_connected_clients(rendezvous::server::Config::default()).await; @@ -361,7 +378,9 @@ async fn can_combine_client_and_server() { #[tokio::test] async fn registration_on_clients_expire() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let namespace = rendezvous::Namespace::from_static("some-namespace"); let ([mut alice, mut bob], robert) = new_server_with_connected_clients(rendezvous::server::Config::default().with_min_ttl(1)) diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index f0ed6ea5961..26a2d0ecc81 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -22,8 +22,8 @@ rand = "0.8" serde = { version = "1.0", optional = true} serde_json = { version = "1.0.107", optional = true } smallvec = "1.11.1" +tracing = "0.1.37" void = "1.0.2" -log = "0.4.20" futures-timer = "3.0.2" futures-bounded = { workspace = true } @@ -34,7 +34,6 @@ cbor = ["dep:serde", "dep:cbor4ii", "libp2p-swarm/macros"] [dev-dependencies] anyhow = "1.0.75" async-std = { version = "1.6.2", features = ["attributes"] } -env_logger = "0.10.0" libp2p-noise = { workspace = true } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } @@ -42,6 +41,7 @@ rand = "0.8" libp2p-swarm-test = { path = "../../swarm-test" } futures_ringbuf = "0.4.0" serde = { version = "1.0", features = ["derive"]} +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index f4f5bf96c6c..ef4b5b44fe0 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -164,7 +164,7 @@ where .try_push(RequestId::Inbound(request_id), recv.boxed()) .is_err() { - log::warn!("Dropping inbound stream because we are at capacity") + tracing::warn!("Dropping inbound stream because we are at capacity") } } @@ -204,7 +204,7 @@ where .try_push(RequestId::Outbound(request_id), send.boxed()) .is_err() { - log::warn!("Dropping outbound stream because we are at capacity") + tracing::warn!("Dropping outbound stream because we are at capacity") } } @@ -236,7 +236,7 @@ where } StreamUpgradeError::Apply(e) => void::unreachable(e), StreamUpgradeError::Io(e) => { - log::debug!( + tracing::debug!( "outbound stream for request {} failed: {e}, retrying", message.request_id ); @@ -386,6 +386,7 @@ where self.pending_outbound.push_back(request); } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index f036fb85956..68a6b689fe5 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -859,7 +859,7 @@ where .push_back(ToSwarm::GenerateEvent(Event::Message { peer, message })); } None => { - log::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); + tracing::debug!("Connection ({connection}) closed after `Event::Request` ({request_id}) has been emitted."); } }, handler::Event::ResponseSent(request_id) => { @@ -940,7 +940,9 @@ where })); } else { // This happens when timeout is emitted before `read_request` finishes. - log::debug!("Inbound request timeout for an unknown request_id ({request_id})"); + tracing::debug!( + "Inbound request timeout for an unknown request_id ({request_id})" + ); } } handler::Event::InboundStreamFailed { request_id, error } => { @@ -955,7 +957,7 @@ where })); } else { // This happens when `read_request` fails. - log::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); + tracing::debug!("Inbound failure is reported for an unknown request_id ({request_id}): {error}"); } } } diff --git a/protocols/request-response/tests/error_reporting.rs b/protocols/request-response/tests/error_reporting.rs index 2256403c0e4..2dc82b2e0c5 100644 --- a/protocols/request-response/tests/error_reporting.rs +++ b/protocols/request-response/tests/error_reporting.rs @@ -13,10 +13,13 @@ use request_response::{ use std::pin::pin; use std::time::Duration; use std::{io, iter}; +use tracing_subscriber::EnvFilter; #[async_std::test] async fn report_outbound_failure_on_read_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); @@ -70,7 +73,9 @@ async fn report_outbound_failure_on_read_response() { #[async_std::test] async fn report_outbound_failure_on_write_request() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); @@ -111,7 +116,9 @@ async fn report_outbound_failure_on_write_request() { #[async_std::test] async fn report_outbound_timeout_on_read_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // `swarm1` needs to have a bigger timeout to avoid racing let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); @@ -156,7 +163,9 @@ async fn report_outbound_timeout_on_read_response() { #[async_std::test] async fn report_inbound_failure_on_read_request() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (_peer2_id, mut swarm2) = new_swarm(); @@ -191,7 +200,9 @@ async fn report_inbound_failure_on_read_request() { #[async_std::test] async fn report_inbound_failure_on_write_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (peer1_id, mut swarm1) = new_swarm(); let (peer2_id, mut swarm2) = new_swarm(); @@ -255,7 +266,9 @@ async fn report_inbound_failure_on_write_response() { #[async_std::test] async fn report_inbound_timeout_on_write_response() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); // `swarm2` needs to have a bigger timeout to avoid racing let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); diff --git a/protocols/request-response/tests/ping.rs b/protocols/request-response/tests/ping.rs index c751dc2b3dd..b9e7878a78b 100644 --- a/protocols/request-response/tests/ping.rs +++ b/protocols/request-response/tests/ping.rs @@ -29,11 +29,14 @@ use libp2p_swarm_test::SwarmExt; use rand::{self, Rng}; use serde::{Deserialize, Serialize}; use std::{io, iter}; +use tracing_subscriber::EnvFilter; #[async_std::test] #[cfg(feature = "cbor")] async fn is_response_outbound() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let ping = Ping("ping".to_string().into_bytes()); let offline_peer = PeerId::random(); diff --git a/protocols/upnp/Cargo.toml b/protocols/upnp/Cargo.toml index 7ada98e2d19..30d50923009 100644 --- a/protocols/upnp/Cargo.toml +++ b/protocols/upnp/Cargo.toml @@ -16,9 +16,9 @@ futures-timer = "3.0.2" igd-next = "0.14.2" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } -log = "0.4.19" -void = "1.0.2" tokio = { version = "1.33", default-features = false, features = ["rt"], optional = true } +tracing = "0.1.37" +void = "1.0.2" [features] tokio = ["igd-next/aio_tokio", "dep:tokio"] diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index 3d83545b952..5410b8dd13f 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -175,9 +175,9 @@ impl MappingList { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -190,9 +190,9 @@ impl MappingList { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -261,7 +261,7 @@ impl NetworkBehaviour for Behaviour { let (addr, protocol) = match multiaddr_to_socketaddr_protocol(multiaddr.clone()) { Ok(addr_port) => addr_port, Err(()) => { - log::debug!("multiaddress not supported for UPnP {multiaddr}"); + tracing::debug!("multiaddress not supported for UPnP {multiaddr}"); return; } }; @@ -271,7 +271,11 @@ impl NetworkBehaviour for Behaviour { .iter() .find(|(mapping, _state)| mapping.internal_addr.port() == addr.port()) { - log::debug!("port from multiaddress {multiaddr} is already being mapped to another multiaddr: {}", mapping.multiaddr); + tracing::debug!( + multiaddress=%multiaddr, + mapped_multiaddress=%mapping.multiaddr, + "port from multiaddress is already being mapped" + ); return; } @@ -302,9 +306,9 @@ impl NetworkBehaviour for Behaviour { mapping: mapping.clone(), duration, }) { - log::debug!( - "could not request port mapping for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port mapping for multiaddress on the gateway: {}", err ); } @@ -312,14 +316,17 @@ impl NetworkBehaviour for Behaviour { self.mappings.insert(mapping, MappingState::Pending); } GatewayState::GatewayNotFound => { - log::debug!( - "network gateway not found, UPnP port mapping of {multiaddr} discarded" + tracing::debug!( + multiaddres=%multiaddr, + "network gateway not found, UPnP port mapping of multiaddres discarded" ); } GatewayState::NonRoutableGateway(addr) => { - log::debug!( - "the network gateway is not exposed to the public network, \ - it's ip is {addr}. UPnP port mapping of {multiaddr} discarded" + tracing::debug!( + multiaddress=%multiaddr, + network_gateway_ip=%addr, + "the network gateway is not exposed to the public network. / + UPnP port mapping of multiaddress discarded" ); } }; @@ -334,9 +341,9 @@ impl NetworkBehaviour for Behaviour { .sender .try_send(GatewayRequest::RemoveMapping(mapping.clone())) { - log::debug!( - "could not request port removal for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", err ); } @@ -367,6 +374,7 @@ impl NetworkBehaviour for Behaviour { void::unreachable(event) } + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] fn poll( &mut self, cx: &mut Context<'_>, @@ -387,9 +395,9 @@ impl NetworkBehaviour for Behaviour { if !is_addr_global(gateway.external_addr) { self.state = GatewayState::NonRoutableGateway(gateway.external_addr); - log::debug!( - "the gateway is not routable, its address is {}", - gateway.external_addr + tracing::debug!( + gateway_address=%gateway.external_addr, + "the gateway is not routable" ); return Poll::Ready(ToSwarm::GenerateEvent( Event::NonRoutableGateway, @@ -398,7 +406,7 @@ impl NetworkBehaviour for Behaviour { self.state = GatewayState::Available(gateway); } Err(err) => { - log::debug!("could not find gateway: {err}"); + tracing::debug!("could not find gateway: {err}"); self.state = GatewayState::GatewayNotFound; return Poll::Ready(ToSwarm::GenerateEvent(Event::GatewayNotFound)); } @@ -426,20 +434,20 @@ impl NetworkBehaviour for Behaviour { self.pending_events.push_back(Event::NewExternalAddr( external_multiaddr.clone(), )); - log::debug!( - "succcessfully mapped UPnP {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully mapped UPnP for protocol" ); return Poll::Ready(ToSwarm::ExternalAddrConfirmed( external_multiaddr, )); } MappingState::Active(_) => { - log::debug!( - "succcessfully renewed UPnP mapping {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully renewed UPnP mapping for protocol" ); } _ => unreachable!(), @@ -452,10 +460,10 @@ impl NetworkBehaviour for Behaviour { .expect("mapping should exist") { MappingState::Active(_) => { - log::debug!( - "failed to remap UPnP mapped {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to remap UPnP mapped for protocol: {err}" ); let external_multiaddr = mapping.external_addr(gateway.external_addr); @@ -467,10 +475,10 @@ impl NetworkBehaviour for Behaviour { )); } MappingState::Pending => { - log::debug!( - "failed to map upnp mapped {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "failed to map UPnP mapped for protocol: {err}" ); } _ => { @@ -479,28 +487,28 @@ impl NetworkBehaviour for Behaviour { } } GatewayEvent::Removed(mapping) => { - log::debug!( - "succcessfully removed UPnP mapping {} for {} protocol", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "successfully removed UPnP mapping for protocol" ); self.mappings .remove(&mapping) .expect("mapping should exist"); } GatewayEvent::RemovalFailure(mapping, err) => { - log::debug!( - "could not remove UPnP mapping {} for {} protocol: {err}", - mapping.internal_addr, - mapping.protocol + tracing::debug!( + address=%mapping.internal_addr, + protocol=%mapping.protocol, + "could not remove UPnP mapping for protocol: {err}" ); if let Err(err) = gateway .sender .try_send(GatewayRequest::RemoveMapping(mapping.clone())) { - log::debug!( - "could not request port removal for {} on the gateway: {}", - mapping.multiaddr, + tracing::debug!( + multiaddress=%mapping.multiaddr, + "could not request port removal for multiaddress on the gateway: {}", err ); } diff --git a/swarm-test/Cargo.toml b/swarm-test/Cargo.toml index cee783983f7..12f8be2a1d8 100644 --- a/swarm-test/Cargo.toml +++ b/swarm-test/Cargo.toml @@ -20,8 +20,8 @@ libp2p-swarm = { workspace = true, features = ["async-std"] } libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } futures = "0.3.29" -log = "0.4.20" rand = "0.8.5" +tracing = "0.1.37" futures-timer = "3.0.2" [lints] diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index 85bd9c22e9a..ee4058d530d 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -256,10 +256,16 @@ where listener_done = true; } Either::Left((other, _)) => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); } Either::Right((other, _)) => { - log::debug!("Ignoring event from listener {:?}", other); + tracing::debug!( + listener=?other, + "Ignoring event from listener" + ); } } @@ -277,7 +283,10 @@ where endpoint, peer_id, .. } => (endpoint.get_remote_address() == &addr).then_some(peer_id), other => { - log::debug!("Ignoring event from dialer {:?}", other); + tracing::debug!( + dialer=?other, + "Ignoring event from dialer" + ); None } }) @@ -316,7 +325,7 @@ where { Either::Left(((), _)) => panic!("Swarm did not emit an event within 10s"), Either::Right((event, _)) => { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); event } @@ -333,7 +342,7 @@ where async fn loop_on_next(mut self) { while let Some(event) = self.next().await { - log::trace!("Swarm produced: {:?}", event); + tracing::trace!(?event); } } } diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index 116604987f8..fb28ff34d12 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -15,18 +15,18 @@ either = "1.9.0" fnv = "1.0" futures = "0.3.29" futures-timer = "3.0.2" +getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature instant = "0.1.12" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-swarm-derive = { workspace = true, optional = true } -log = "0.4" +multistream-select = { workspace = true } +once_cell = "1.18.0" rand = "0.8" smallvec = "1.11.1" +tracing = "0.1.37" void = "1" wasm-bindgen-futures = { version = "0.4.37", optional = true } -getrandom = { version = "0.2.9", features = ["js"], optional = true } # Explicit dependency to be used in `wasm-bindgen` feature -once_cell = "1.18.0" -multistream-select = { workspace = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] async-std = { version = "1.6.2", optional = true } @@ -41,7 +41,6 @@ wasm-bindgen = ["dep:wasm-bindgen-futures", "dep:getrandom"] [dev-dependencies] async-std = { version = "1.6.2", features = ["attributes"] } either = "1.9.0" -env_logger = "0.10" futures = "0.3.29" libp2p-identify = { path = "../protocols/identify" } # Using `path` here because this is a cyclic dev-dependency which otherwise breaks releasing. libp2p-identity = { workspace = true, features = ["ed25519"] } @@ -56,6 +55,7 @@ void = "1" once_cell = "1.18.0" trybuild = "1.0.85" tokio = { version = "1.33.0", features = ["time", "rt", "macros", "rt-multi-thread"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "swarm_derive" diff --git a/swarm/src/behaviour/external_addresses.rs b/swarm/src/behaviour/external_addresses.rs index 14cdb301fbd..579f46fe486 100644 --- a/swarm/src/behaviour/external_addresses.rs +++ b/swarm/src/behaviour/external_addresses.rs @@ -37,7 +37,7 @@ impl ExternalAddresses { self.addresses.remove(pos); self.push_front(addr); - log::debug!("Refreshed external address {addr}"); + tracing::debug!(address=%addr, "Refreshed external address"); return false; // No changes to our external addresses. } @@ -47,7 +47,11 @@ impl ExternalAddresses { if self.addresses.len() > MAX_LOCAL_EXTERNAL_ADDRS { let expired = self.addresses.pop().expect("list to be not empty"); - log::debug!("Removing previously confirmed external address {expired} because we reached the limit of {MAX_LOCAL_EXTERNAL_ADDRS} addresses"); + tracing::debug!( + external_address=%expired, + address_limit=%MAX_LOCAL_EXTERNAL_ADDRS, + "Removing previously confirmed external address because we reached the address limit" + ); } return true; diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index ee2729e0c82..35cc71d5354 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -192,7 +192,6 @@ where ProtocolsChange::Added(ProtocolsAdded::from_set(&initial_protocols)), )); } - Connection { muxing: muxer, handler, @@ -235,6 +234,7 @@ where /// Polls the handler and the substream, forwarding events from the former to the latter and /// vice versa. + #[tracing::instrument(level = "debug", name = "Connection::poll", skip(self, cx))] pub(crate) fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -252,6 +252,7 @@ where remote_supported_protocols, idle_timeout, stream_counter, + .. } = self.get_mut(); loop { @@ -346,15 +347,15 @@ where continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::Io(e))))) => { - log::debug!("failed to upgrade inbound stream: {e}"); + tracing::debug!("failed to upgrade inbound stream: {e}"); continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::NegotiationFailed)))) => { - log::debug!("no protocol could be agreed upon for inbound stream"); + tracing::debug!("no protocol could be agreed upon for inbound stream"); continue; } Poll::Ready(Some((_, Err(StreamUpgradeError::Timeout)))) => { - log::debug!("inbound stream upgrade timed out"); + tracing::debug!("inbound stream upgrade timed out"); continue; } } @@ -494,7 +495,7 @@ fn compute_new_shutdown( /// The [`Duration`] computed by the this function may not be the longest possible that we can add to `now` but it will work. fn checked_add_fraction(start: Instant, mut duration: Duration) -> Duration { while start.checked_add(duration).is_none() { - log::debug!("{start:?} + {duration:?} cannot be presented, halving duration"); + tracing::debug!(start=?start, duration=?duration, "start + duration cannot be presented, halving duration"); duration /= 2; } @@ -541,7 +542,7 @@ impl StreamUpgrade { { let effective_version = match version_override { Some(version_override) if version_override != upgrade::Version::default() => { - log::debug!( + tracing::debug!( "Substream upgrade protocol override: {:?} -> {:?}", upgrade::Version::default(), version_override @@ -753,11 +754,14 @@ mod tests { use quickcheck::*; use std::sync::{Arc, Weak}; use std::time::Instant; + use tracing_subscriber::EnvFilter; use void::Void; #[test] fn max_negotiating_inbound_streams() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(max_negotiating_inbound_streams: u8) { let max_negotiating_inbound_streams: usize = max_negotiating_inbound_streams.into(); @@ -924,7 +928,9 @@ mod tests { #[test] fn checked_add_fraction_can_add_u64_max() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); let start = Instant::now(); let duration = checked_add_fraction(start, Duration::from_secs(u64::MAX)); @@ -934,7 +940,9 @@ mod tests { #[test] fn compute_new_shutdown_does_not_panic() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); #[derive(Debug)] struct ArbitraryShutdown(Shutdown); diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index 8a2f1cb6b20..cfa3fb7ea3c 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -49,6 +49,7 @@ use std::{ task::Context, task::Poll, }; +use tracing::Instrument; use void::Void; mod concurrent_dial; @@ -425,20 +426,22 @@ where dial_concurrency_factor_override: Option, connection_id: ConnectionId, ) { - let dial = ConcurrentDial::new( - dials, - dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor), - ); + let concurrency_factor = + dial_concurrency_factor_override.unwrap_or(self.dial_concurrency_factor); + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_outgoing_connection", %concurrency_factor, num_dials=%dials.len(), id = %connection_id); + span.follows_from(tracing::Span::current()); let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_outgoing_connection( + self.executor.spawn( + task::new_for_pending_outgoing_connection( connection_id, - dial, + ConcurrentDial::new(dials, concurrency_factor), abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); let endpoint = PendingPoint::Dialer { role_override }; @@ -468,13 +471,18 @@ where let (abort_notifier, abort_receiver) = oneshot::channel(); - self.executor - .spawn(task::new_for_pending_incoming_connection( + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_incoming_connection", remote_addr = %info.send_back_addr, id = %connection_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_pending_incoming_connection( connection_id, future, abort_receiver, self.pending_connection_events_tx.clone(), - )); + ) + .instrument(span), + ); self.counters.inc_pending_incoming(); self.pending.insert( @@ -497,7 +505,6 @@ where handler: THandler, ) { let connection = connection.extract(); - let conns = self.established.entry(obtained_peer_id).or_default(); self.counters.inc_established(endpoint); @@ -524,16 +531,23 @@ where self.idle_connection_timeout, ); - self.executor.spawn(task::new_for_established_connection( - id, - obtained_peer_id, - connection, - command_receiver, - event_sender, - )) + let span = tracing::debug_span!(parent: tracing::Span::none(), "new_established_connection", remote_addr = %endpoint.get_remote_address(), %id, peer = %obtained_peer_id); + span.follows_from(tracing::Span::current()); + + self.executor.spawn( + task::new_for_established_connection( + id, + obtained_peer_id, + connection, + command_receiver, + event_sender, + ) + .instrument(span), + ) } /// Polls the connection pool for events. + #[tracing::instrument(level = "debug", name = "Pool::poll", skip(self, cx))] pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> where THandler: ConnectionHandler + 'static, @@ -685,10 +699,10 @@ where if let Err(error) = check_peer_id() { self.executor.spawn(poll_fn(move |cx| { if let Err(e) = ready!(muxer.poll_close_unpin(cx)) { - log::debug!( - "Failed to close connection {:?} to peer {}: {:?}", - id, - obtained_peer_id, + tracing::debug!( + peer=%obtained_peer_id, + connection=%id, + "Failed to close connection to peer: {:?}", e ); } diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index 41e0cf42df9..89d4d36fadc 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -161,7 +161,7 @@ where }, )); } else { - log::error!("FullyNegotiatedOutbound: no handler for key") + tracing::error!("FullyNegotiatedOutbound: no handler for key") } } ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { @@ -178,7 +178,7 @@ where )); } } else { - log::error!("FullyNegotiatedInbound: no handler for key") + tracing::error!("FullyNegotiatedInbound: no handler for key") } } ConnectionEvent::AddressChange(AddressChange { new_address }) => { @@ -198,7 +198,7 @@ where error, })); } else { - log::error!("DialUpgradeError: no handler for protocol") + tracing::error!("DialUpgradeError: no handler for protocol") } } ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { @@ -225,7 +225,7 @@ where if let Some(h) = self.handlers.get_mut(&key) { h.on_behaviour_event(event) } else { - log::error!("on_behaviour_event: no handler for key") + tracing::error!("on_behaviour_event: no handler for key") } } diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 228c8281a70..7bbc1c68924 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -151,6 +151,7 @@ use std::{ pin::Pin, task::{Context, Poll}, }; +use tracing::Instrument; /// Event generated by the [`NetworkBehaviour`] that the swarm will report back. type TBehaviourOutEvent = ::ToSwarm; @@ -364,7 +365,7 @@ where local_peer_id: PeerId, config: Config, ) -> Self { - log::info!("Local peer id: {local_peer_id}"); + tracing::info!(%local_peer_id); Swarm { local_peer_id, @@ -482,7 +483,11 @@ where let num_addresses = addresses.len(); if num_addresses > 0 { - log::debug!("discarding {num_addresses} addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection {connection_id:?}") + tracing::debug!( + connection=%connection_id, + discarded_addresses_count=%num_addresses, + "discarding addresses from `NetworkBehaviour` because `DialOpts::extend_addresses_through_behaviour is `false` for connection" + ) } } } @@ -524,13 +529,22 @@ where .into_iter() .map(|a| match p2p_addr(peer_id, a) { Ok(address) => { - let dial = match dial_opts.role_override() { - Endpoint::Dialer => self.transport.dial(address.clone()), - Endpoint::Listener => self.transport.dial_as_listener(address.clone()), + let (dial, span) = match dial_opts.role_override() { + Endpoint::Dialer => ( + self.transport.dial(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial", %address), + ), + Endpoint::Listener => ( + self.transport.dial_as_listener(address.clone()), + tracing::debug_span!(parent: tracing::Span::none(), "Transport::dial_as_listener", %address), + ), }; + span.follows_from(tracing::Span::current()); + match dial { Ok(fut) => fut .map(|r| (address, r.map_err(TransportError::Other))) + .instrument(span) .boxed(), Err(err) => futures::future::ready((address, Err(err))).boxed(), } @@ -769,11 +783,11 @@ where self.pool .spawn_connection(id, peer_id, &endpoint, connection, handler); - log::debug!( - "Connection established: {:?} {:?}; Total (peer): {}.", - peer_id, - endpoint, - num_established, + tracing::debug!( + peer=%peer_id, + ?endpoint, + total_peers=%num_established, + "Connection established" ); let failed_addresses = concurrent_dial_errors .as_ref() @@ -820,9 +834,9 @@ where })); if let Some(peer) = peer { - log::debug!("Connection attempt to {:?} failed with {:?}.", peer, error,); + tracing::debug!(%peer, "Connection attempt to peer failed with {:?}.", error,); } else { - log::debug!("Connection attempt to unknown peer failed with {:?}", error); + tracing::debug!("Connection attempt to unknown peer failed with {:?}", error); } self.pending_swarm_events @@ -840,7 +854,7 @@ where } => { let error = error.into(); - log::debug!("Incoming connection failed: {:?}", error); + tracing::debug!("Incoming connection failed: {:?}", error); self.behaviour .on_swarm_event(FromSwarm::ListenFailure(ListenFailure { local_addr: &local_addr, @@ -864,17 +878,17 @@ where .. } => { if let Some(error) = error.as_ref() { - log::debug!( - "Connection closed with error {:?}: {:?}; Total (peer): {}.", + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed with error {:?}: {:?}", error, connected, - remaining_established_connection_ids.len() ); } else { - log::debug!( - "Connection closed: {:?}; Total (peer): {}.", - connected, - remaining_established_connection_ids.len() + tracing::debug!( + total_peers=%remaining_established_connection_ids.len(), + "Connection closed: {:?}", + connected ); } let peer_id = connected.peer_id; @@ -983,7 +997,11 @@ where listener_id, listen_addr, } => { - log::debug!("Listener {:?}; New address: {:?}", listener_id, listen_addr); + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "New listener address" + ); let addrs = self.listened_addrs.entry(listener_id).or_default(); if !addrs.contains(&listen_addr) { addrs.push(listen_addr.clone()) @@ -1003,10 +1021,10 @@ where listener_id, listen_addr, } => { - log::debug!( - "Listener {:?}; Expired address {:?}.", - listener_id, - listen_addr + tracing::debug!( + listener=?listener_id, + address=%listen_addr, + "Expired listener address" ); if let Some(addrs) = self.listened_addrs.get_mut(&listener_id) { addrs.retain(|a| a != &listen_addr); @@ -1026,7 +1044,11 @@ where listener_id, reason, } => { - log::debug!("Listener {:?}; Closed by {:?}.", listener_id, reason); + tracing::debug!( + listener=?listener_id, + ?reason, + "Listener closed" + ); let addrs = self.listened_addrs.remove(&listener_id).unwrap_or_default(); for addr in addrs.iter() { self.behaviour.on_swarm_event(FromSwarm::ExpiredListenAddr( @@ -1167,6 +1189,7 @@ where /// Internal function used by everything event-related. /// /// Polls the `Swarm` for the next event. + #[tracing::instrument(level = "debug", name = "Swarm::poll", skip(self, cx))] fn poll_next_event( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -1232,7 +1255,7 @@ where this.handle_pool_event(pool_event); continue; } - }; + } // Poll the listener(s) for new connections. match Pin::new(&mut this.transport).poll(cx) { @@ -2294,7 +2317,9 @@ mod tests { #[tokio::test] async fn aborting_pending_connection_surfaces_error() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); let mut dialer = new_test_swarm(Config::with_tokio_executor()); let mut listener = new_test_swarm(Config::with_tokio_executor()); diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index 6d217d8be31..df769161c55 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -11,21 +11,21 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] +async-std-resolver = { version = "0.23", optional = true } async-trait = "0.1.74" +futures = "0.3.28" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" -futures = "0.3.29" -async-std-resolver = { version = "0.23", optional = true } parking_lot = "0.12.0" -trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } smallvec = "1.11.1" +tracing = "0.1.37" +trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } [dev-dependencies] -env_logger = "0.10" libp2p-identity = { workspace = true, features = ["rand"] } tokio-crate = { package = "tokio", version = "1.0", default-features = false, features = ["rt", "time"] } async-std-crate = { package = "async-std", version = "1.6" } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] async-std = ["async-std-resolver"] diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index a68a2c53d2d..13ad93952c9 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -277,7 +277,7 @@ where ) }) { if dns_lookups == MAX_DNS_LOOKUPS { - log::debug!("Too many DNS lookups. Dropping unresolved {}.", addr); + tracing::debug!(address=%addr, "Too many DNS lookups, dropping unresolved address"); last_err = Some(Error::TooManyLookups); // There may still be fully resolved addresses in `unresolved`, // so keep going until `unresolved` is empty. @@ -294,13 +294,13 @@ where last_err = Some(e); } Ok(Resolved::One(ip)) => { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); } Ok(Resolved::Many(ips)) => { for ip in ips { - log::trace!("Resolved {} -> {}", name, ip); + tracing::trace!(protocol=%name, resolved=%ip); let addr = addr.replace(i, |_| Some(ip)).expect("`i` is a valid index"); unresolved.push(addr); @@ -314,14 +314,14 @@ where if a.ends_with(&suffix) { if n < MAX_TXT_RECORDS { n += 1; - log::trace!("Resolved {} -> {}", name, a); + tracing::trace!(protocol=%name, resolved=%a); let addr = prefix.iter().chain(a.iter()).collect::(); unresolved.push(addr); } else { - log::debug!( - "Too many TXT records. Dropping resolved {}.", - a + tracing::debug!( + resolved=%a, + "Too many TXT records, dropping resolved" ); } } @@ -330,7 +330,7 @@ where } } else { // We have a fully resolved address, so try to dial it. - log::debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); let transport = inner.clone(); let dial = match role_override { @@ -354,12 +354,12 @@ where match result { Ok(out) => return Ok(out), Err(err) => { - log::debug!("Dial error: {:?}.", err); + tracing::debug!("Dial error: {:?}.", err); if unresolved.is_empty() { return Err(err); } if dial_attempts == MAX_DIAL_ATTEMPTS { - log::debug!( + tracing::debug!( "Aborting dialing after {} attempts.", MAX_DIAL_ATTEMPTS ); @@ -537,7 +537,7 @@ fn resolve<'a, E: 'a + Send, R: Resolver>( match parse_dnsaddr_txt(chars) { Err(e) => { // Skip over seemingly invalid entries. - log::debug!("Invalid TXT record: {:?}", e); + tracing::debug!("Invalid TXT record: {:?}", e); } Ok(a) => { addrs.push(a); @@ -612,7 +612,9 @@ mod tests { #[test] fn basic_resolve() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); #[derive(Clone)] struct CustomTransport; diff --git a/transports/noise/Cargo.toml b/transports/noise/Cargo.toml index c3d5a779b00..502cfdd99d9 100644 --- a/transports/noise/Cargo.toml +++ b/transports/noise/Cargo.toml @@ -15,7 +15,6 @@ curve25519-dalek = "4.1.1" futures = "0.3.29" libp2p-core = { workspace = true } libp2p-identity = { workspace = true, features = ["ed25519"] } -log = "0.4" multiaddr = { workspace = true } multihash = { workspace = true } once_cell = "1.18.0" @@ -24,6 +23,7 @@ rand = "0.8.3" sha2 = "0.10.8" static_assertions = "1" thiserror = "1.0.50" +tracing = "0.1.37" x25519-dalek = "2" zeroize = "1" @@ -34,9 +34,9 @@ snow = { version = "0.9.2", features = ["ring-resolver"], default-features = fal snow = { version = "0.9.2", features = ["default-resolver"], default-features = false } [dev-dependencies] -env_logger = "0.10.0" futures_ringbuf = "0.4.0" quickcheck = { workspace = true } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } libp2p-identity = { workspace = true, features = ["rand"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/transports/noise/src/io.rs b/transports/noise/src/io.rs index c43e1dd67a1..9cd4cfed52a 100644 --- a/transports/noise/src/io.rs +++ b/transports/noise/src/io.rs @@ -27,7 +27,6 @@ use bytes::Bytes; use framed::{Codec, MAX_FRAME_LEN}; use futures::prelude::*; use futures::ready; -use log::trace; use std::{ cmp::min, fmt, io, @@ -76,10 +75,10 @@ impl AsyncRead for Output { if len > 0 { let n = min(len - off, buf.len()); buf[..n].copy_from_slice(&self.recv_buffer[off..off + n]); - trace!("read: copied {}/{} bytes", off + n, len); + tracing::trace!(copied_bytes=%(off + n), total_bytes=%len, "read: copied"); self.recv_offset += n; if len == self.recv_offset { - trace!("read: frame consumed"); + tracing::trace!("read: frame consumed"); // Drop the existing view so `NoiseFramed` can reuse // the buffer when polling for the next frame below. self.recv_buffer = Bytes::new(); @@ -112,7 +111,7 @@ impl AsyncWrite for Output { // The MAX_FRAME_LEN is the maximum buffer size before a frame must be sent. if this.send_offset == MAX_FRAME_LEN { - trace!("write: sending {} bytes", MAX_FRAME_LEN); + tracing::trace!(bytes=%MAX_FRAME_LEN, "write: sending"); ready!(io.as_mut().poll_ready(cx))?; io.as_mut().start_send(frame_buf)?; this.send_offset = 0; @@ -124,7 +123,7 @@ impl AsyncWrite for Output { let n = min(MAX_FRAME_LEN - off, buf.len()); this.send_buffer[off..off + n].copy_from_slice(&buf[..n]); this.send_offset += n; - trace!("write: buffered {} bytes", this.send_offset); + tracing::trace!(bytes=%this.send_offset, "write: buffered"); Poll::Ready(Ok(n)) } @@ -137,7 +136,7 @@ impl AsyncWrite for Output { // Check if there is still one more frame to send. if this.send_offset > 0 { ready!(io.as_mut().poll_ready(cx))?; - trace!("flush: sending {} bytes", this.send_offset); + tracing::trace!(bytes= %this.send_offset, "flush: sending"); io.as_mut().start_send(frame_buf)?; this.send_offset = 0; } diff --git a/transports/noise/src/io/framed.rs b/transports/noise/src/io/framed.rs index 739b0eea426..b7504f2e37a 100644 --- a/transports/noise/src/io/framed.rs +++ b/transports/noise/src/io/framed.rs @@ -177,12 +177,12 @@ fn encrypt( encrypt_buffer: &mut BytesMut, encrypt_fn: impl FnOnce(&[u8], &mut [u8]) -> Result, ) -> io::Result<()> { - log::trace!("Encrypting {} bytes", cleartext.len()); + tracing::trace!("Encrypting {} bytes", cleartext.len()); encrypt_buffer.resize(cleartext.len() + EXTRA_ENCRYPT_SPACE, 0); let n = encrypt_fn(cleartext, encrypt_buffer).map_err(into_io_error)?; - log::trace!("Outgoing ciphertext has {n} bytes"); + tracing::trace!("Outgoing ciphertext has {n} bytes"); encode_length_prefixed(&encrypt_buffer[..n], dst); @@ -202,12 +202,12 @@ fn decrypt( None => return Ok(None), }; - log::trace!("Incoming ciphertext has {} bytes", ciphertext.len()); + tracing::trace!("Incoming ciphertext has {} bytes", ciphertext.len()); let mut decrypt_buffer = BytesMut::zeroed(ciphertext.len()); let n = decrypt_fn(&ciphertext, &mut decrypt_buffer).map_err(into_io_error)?; - log::trace!("Decrypted cleartext has {n} bytes"); + tracing::trace!("Decrypted cleartext has {n} bytes"); Ok(Some(decrypt_buffer.split_to(n).freeze())) } diff --git a/transports/noise/tests/smoke.rs b/transports/noise/tests/smoke.rs index ffcf7934ac0..0afebc0cbea 100644 --- a/transports/noise/tests/smoke.rs +++ b/transports/noise/tests/smoke.rs @@ -24,9 +24,9 @@ use libp2p_core::upgrade; use libp2p_core::upgrade::{InboundConnectionUpgrade, OutboundConnectionUpgrade}; use libp2p_identity as identity; use libp2p_noise as noise; -use log::info; use quickcheck::*; use std::{convert::TryInto, io}; +use tracing_subscriber::EnvFilter; #[allow(dead_code)] fn core_upgrade_compat() { @@ -41,7 +41,9 @@ fn core_upgrade_compat() { #[test] fn xx() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(mut messages: Vec) -> bool { messages.truncate(5); let server_id = identity::Keypair::generate_ed25519(); @@ -86,7 +88,7 @@ fn xx() { Err(e) => panic!("error reading len: {e}"), } }; - info!("server: reading message ({} bytes)", len); + tracing::info!(bytes=%len, "server: reading message"); let mut server_buffer = vec![0; len.try_into().unwrap()]; server_session .read_exact(&mut server_buffer) diff --git a/transports/plaintext/Cargo.toml b/transports/plaintext/Cargo.toml index 33c2eeab7ae..a64f6ce8e9f 100644 --- a/transports/plaintext/Cargo.toml +++ b/transports/plaintext/Cargo.toml @@ -16,16 +16,16 @@ bytes = "1" futures = "0.3.29" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" quick-protobuf = "0.8" +tracing = "0.1.37" quick-protobuf-codec = { workspace = true } [dev-dependencies] -env_logger = "0.10.0" libp2p-identity = { workspace = true, features = ["ed25519", "rand"] } quickcheck = { workspace = true } rand = "0.8" futures_ringbuf = "0.4.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/plaintext/src/handshake.rs b/transports/plaintext/src/handshake.rs index 51dd5501ea0..ddd5f7f8a9b 100644 --- a/transports/plaintext/src/handshake.rs +++ b/transports/plaintext/src/handshake.rs @@ -25,7 +25,6 @@ use asynchronous_codec::{Framed, FramedParts}; use bytes::Bytes; use futures::prelude::*; use libp2p_identity::{PeerId, PublicKey}; -use log::{debug, trace}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; pub(crate) async fn handshake(socket: S, config: Config) -> Result<(S, PublicKey, Bytes), Error> @@ -35,7 +34,7 @@ where // The handshake messages all start with a variable-length integer indicating the size. let mut framed_socket = Framed::new(socket, quick_protobuf_codec::Codec::::new(100)); - trace!("sending exchange to remote"); + tracing::trace!("sending exchange to remote"); framed_socket .send(Exchange { id: Some(config.local_public_key.to_peer_id().to_bytes()), @@ -44,7 +43,7 @@ where .await .map_err(DecodeError)?; - trace!("receiving the remote's exchange"); + tracing::trace!("receiving the remote's exchange"); let public_key = match framed_socket .next() .await @@ -62,13 +61,13 @@ where public_key } None => { - debug!("unexpected eof while waiting for remote's exchange"); + tracing::debug!("unexpected eof while waiting for remote's exchange"); let err = IoError::new(IoErrorKind::BrokenPipe, "unexpected eof"); return Err(err.into()); } }; - trace!("received exchange from remote; pubkey = {:?}", public_key); + tracing::trace!(?public_key, "received exchange from remote"); let FramedParts { io, diff --git a/transports/plaintext/src/lib.rs b/transports/plaintext/src/lib.rs index bdca271a68e..4a322d63fab 100644 --- a/transports/plaintext/src/lib.rs +++ b/transports/plaintext/src/lib.rs @@ -32,7 +32,6 @@ use libp2p_core::UpgradeInfo; use libp2p_identity as identity; use libp2p_identity::PeerId; use libp2p_identity::PublicKey; -use log::debug; use std::{ io, iter, pin::Pin, @@ -101,9 +100,9 @@ impl Config { where T: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - debug!("Starting plaintext handshake."); + tracing::debug!("Starting plaintext handshake."); let (socket, remote_key, read_buffer) = handshake::handshake(socket, self).await?; - debug!("Finished plaintext handshake."); + tracing::debug!("Finished plaintext handshake."); Ok(( remote_key.to_peer_id(), diff --git a/transports/plaintext/tests/smoke.rs b/transports/plaintext/tests/smoke.rs index fd3350fb5aa..f77f23d3ad3 100644 --- a/transports/plaintext/tests/smoke.rs +++ b/transports/plaintext/tests/smoke.rs @@ -22,12 +22,14 @@ use futures::io::{AsyncReadExt, AsyncWriteExt}; use libp2p_core::upgrade::InboundConnectionUpgrade; use libp2p_identity as identity; use libp2p_plaintext as plaintext; -use log::debug; use quickcheck::QuickCheck; +use tracing_subscriber::EnvFilter; #[test] fn variable_msg_length() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); fn prop(msg: Vec) { let msg_to_send = msg.clone(); @@ -53,18 +55,18 @@ fn variable_msg_length() { assert_eq!(received_client_id, client_id.public().to_peer_id()); let client_fut = async { - debug!("Client: writing message."); + tracing::debug!("Client: writing message."); client_channel .write_all(&msg_to_send) .await .expect("no error"); - debug!("Client: flushing channel."); + tracing::debug!("Client: flushing channel."); client_channel.flush().await.expect("no error"); }; let server_fut = async { let mut server_buffer = vec![0; msg_to_receive.len()]; - debug!("Server: reading message."); + tracing::debug!("Server: reading message."); server_channel .read_exact(&mut server_buffer) .await diff --git a/transports/pnet/Cargo.toml b/transports/pnet/Cargo.toml index 16e34a4a9ee..000cf0eb203 100644 --- a/transports/pnet/Cargo.toml +++ b/transports/pnet/Cargo.toml @@ -12,9 +12,9 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.29" -log = "0.4.20" salsa20 = "0.10" sha3 = "0.10" +tracing = "0.1.37" rand = "0.8" pin-project = "1.1.3" diff --git a/transports/pnet/src/crypt_writer.rs b/transports/pnet/src/crypt_writer.rs index c5993548239..06f932fbe71 100644 --- a/transports/pnet/src/crypt_writer.rs +++ b/transports/pnet/src/crypt_writer.rs @@ -23,7 +23,6 @@ use futures::{ ready, task::{Context, Poll}, }; -use log::trace; use pin_project::pin_project; use salsa20::{cipher::StreamCipher, XSalsa20}; use std::{fmt, pin::Pin}; @@ -120,7 +119,7 @@ impl AsyncWrite for CryptWriter { let res = Pin::new(&mut *this.buf).poll_write(cx, buf); if let Poll::Ready(Ok(count)) = res { this.cipher.apply_keystream(&mut this.buf[0..count]); - trace!("encrypted {} bytes", count); + tracing::trace!(bytes=%count, "encrypted bytes"); } else { debug_assert!(false); }; diff --git a/transports/pnet/src/lib.rs b/transports/pnet/src/lib.rs index d8aac22eecd..083ffff36a3 100644 --- a/transports/pnet/src/lib.rs +++ b/transports/pnet/src/lib.rs @@ -29,7 +29,6 @@ mod crypt_writer; use crypt_writer::CryptWriter; use futures::prelude::*; -use log::trace; use pin_project::pin_project; use rand::RngCore; use salsa20::{ @@ -210,7 +209,7 @@ impl PnetConfig { where TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, { - trace!("exchanging nonces"); + tracing::trace!("exchanging nonces"); let mut local_nonce = [0u8; NONCE_SIZE]; let mut remote_nonce = [0u8; NONCE_SIZE]; rand::thread_rng().fill_bytes(&mut local_nonce); @@ -223,7 +222,7 @@ impl PnetConfig { .read_exact(&mut remote_nonce) .await .map_err(PnetError::HandshakeError)?; - trace!("setting up ciphers"); + tracing::trace!("setting up ciphers"); let write_cipher = XSalsa20::new(&self.key.0.into(), &local_nonce.into()); let read_cipher = XSalsa20::new(&self.key.0.into(), &remote_nonce.into()); Ok(PnetOutput::new(socket, write_cipher, read_cipher)) @@ -257,9 +256,9 @@ impl AsyncRead for PnetOutput { let this = self.project(); let result = this.inner.get_pin_mut().poll_read(cx, buf); if let Poll::Ready(Ok(size)) = &result { - trace!("read {} bytes", size); + tracing::trace!(bytes=%size, "read bytes"); this.read_cipher.apply_keystream(&mut buf[..*size]); - trace!("decrypted {} bytes", size); + tracing::trace!(bytes=%size, "decrypted bytes"); } result } diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index afee991c76a..4ce23bf1207 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -17,13 +17,13 @@ if-watch = "3.1.0" libp2p-core = { workspace = true } libp2p-tls = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4" parking_lot = "0.12.0" quinn = { version = "0.10.2", default-features = false, features = ["tls-rustls", "futures-io"] } rand = "0.8.5" rustls = { version = "0.21.8", default-features = false } thiserror = "1.0.50" tokio = { version = "1.33.0", default-features = false, features = ["net", "rt", "time"], optional = true } +tracing = "0.1.37" socket2 = "0.5.5" ring = "0.16.20" @@ -40,7 +40,6 @@ rustc-args = ["--cfg", "docsrs"] [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -env_logger = "0.10.0" libp2p-identity = { workspace = true, features = ["rand"] } libp2p-muxer-test-harness = { path = "../../muxers/test-harness" } libp2p-noise = { workspace = true } @@ -48,6 +47,7 @@ libp2p-tcp = { workspace = true, features = ["async-io"] } libp2p-yamux = { workspace = true } quickcheck = "1" tokio = { version = "1.33.0", features = ["macros", "rt-multi-thread", "time"] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } [[test]] name = "stream_compliance" diff --git a/transports/quic/src/hole_punching.rs b/transports/quic/src/hole_punching.rs index 41f55c5cada..605799af5e1 100644 --- a/transports/quic/src/hole_punching.rs +++ b/transports/quic/src/hole_punching.rs @@ -34,7 +34,7 @@ async fn punch_holes( .take(64) .collect(); - log::trace!("Sending random UDP packet to {remote_addr}"); + tracing::trace!("Sending random UDP packet to {remote_addr}"); P::send_to(&socket, &contents, remote_addr).await?; diff --git a/transports/quic/src/transport.rs b/transports/quic/src/transport.rs index 24527649edf..feda501464f 100644 --- a/transports/quic/src/transport.rs +++ b/transports/quic/src/transport.rs @@ -318,7 +318,7 @@ impl Transport for GenTransport

{ .try_clone_socket() .map_err(Self::Error::from)?; - log::debug!("Preparing for hole-punch from {addr}"); + tracing::debug!("Preparing for hole-punch from {addr}"); let hole_puncher = hole_puncher::

(socket, socket_addr, self.handshake_timeout); @@ -348,7 +348,12 @@ impl Transport for GenTransport

{ .expect("hole punch connection sender is never dropped before receiver") .await?; if inbound_peer_id != peer_id { - log::warn!("expected inbound connection from {socket_addr} to resolve to {peer_id} but got {inbound_peer_id}"); + tracing::warn!( + peer=%peer_id, + inbound_peer=%inbound_peer_id, + socket_address=%socket_addr, + "expected inbound connection from socket_address to resolve to peer but got inbound peer" + ); } Ok((inbound_peer_id, connection)) } @@ -527,7 +532,10 @@ impl Listener

{ if let Some(listen_addr) = ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) { - log::debug!("New listen address: {listen_addr}"); + tracing::debug!( + address=%listen_addr, + "New listen address" + ); self.listening_addresses.insert(inet.addr()); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -539,7 +547,10 @@ impl Listener

{ if let Some(listen_addr) = ip_to_listenaddr(&endpoint_addr, inet.addr(), self.version) { - log::debug!("Expired listen address: {listen_addr}"); + tracing::debug!( + address=%listen_addr, + "Expired listen address" + ); self.listening_addresses.remove(&inet.addr()); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, diff --git a/transports/quic/tests/smoke.rs b/transports/quic/tests/smoke.rs index f72a6494e64..77dfac6bb44 100644 --- a/transports/quic/tests/smoke.rs +++ b/transports/quic/tests/smoke.rs @@ -26,6 +26,7 @@ use std::{ pin::Pin, sync::{Arc, Mutex}, }; +use tracing_subscriber::EnvFilter; #[cfg(feature = "tokio")] #[tokio::test] @@ -42,7 +43,9 @@ async fn async_std_smoke() { #[cfg(feature = "tokio")] #[tokio::test] async fn endpoint_reuse() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_default_transport::(); let (_, mut b_transport) = create_default_transport::(); @@ -67,7 +70,9 @@ async fn endpoint_reuse() { #[cfg(feature = "async-std")] #[async_std::test] async fn ipv4_dial_ipv6() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::(); let (b_peer_id, mut b_transport) = create_default_transport::(); @@ -85,7 +90,9 @@ async fn ipv4_dial_ipv6() { #[cfg(feature = "async-std")] #[async_std::test] async fn wrapped_with_delay() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); struct DialDelay(Arc>>); @@ -253,7 +260,9 @@ async fn tcp_and_quic() { #[cfg(feature = "async-std")] #[test] fn concurrent_connections_and_streams_async_std() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); quickcheck::QuickCheck::new() .min_tests_passed(1) @@ -264,7 +273,9 @@ fn concurrent_connections_and_streams_async_std() { #[cfg(feature = "tokio")] #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -281,7 +292,9 @@ async fn draft_29_support() { use futures::{future::poll_fn, select}; use libp2p_core::transport::TransportError; - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (_, mut a_transport) = create_transport::(|cfg| cfg.support_draft_29 = true); @@ -342,7 +355,9 @@ async fn draft_29_support() { #[cfg(feature = "async-std")] #[async_std::test] async fn backpressure() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let max_stream_data = quic::Config::new(&generate_tls_keypair()).max_stream_data; let (mut stream_a, mut stream_b) = build_streams::().await; @@ -366,7 +381,9 @@ async fn backpressure() { #[cfg(feature = "async-std")] #[async_std::test] async fn read_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (mut stream_a, mut stream_b) = build_streams::().await; let data = vec![0; 10]; @@ -386,7 +403,9 @@ async fn read_after_peer_dropped_stream() { #[async_std::test] #[should_panic] async fn write_after_peer_dropped_stream() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (stream_a, mut stream_b) = build_streams::().await; drop(stream_a); futures_timer::Delay::new(Duration::from_millis(1)).await; @@ -440,7 +459,9 @@ async fn test_local_listener_reuse() { } async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_default_transport::

(); let (b_peer_id, mut b_transport) = create_default_transport::

(); @@ -562,7 +583,11 @@ fn prop( let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -703,7 +728,10 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!( + stream_count=%number_streams, + "Created streams" + ); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index cb54c98e7ba..37e85d04ded 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -18,9 +18,9 @@ if-watch = "3.1.0" libc = "0.2.149" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" socket2 = { version = "0.5.5", features = ["all"] } tokio = { version = "1.33.0", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [features] tokio = ["dep:tokio", "if-watch/tokio"] @@ -30,7 +30,7 @@ async-io = ["dep:async-io", "if-watch/smol"] async-std = { version = "1.6.5", features = ["attributes"] } libp2p-identity = { workspace = true, features = ["rand"] } tokio = { version = "1.33.0", default-features = false, features = ["full"] } -env_logger = "0.10.0" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/tcp/src/lib.rs b/transports/tcp/src/lib.rs index 5efdf16fff5..b466f387ba4 100644 --- a/transports/tcp/src/lib.rs +++ b/transports/tcp/src/lib.rs @@ -98,7 +98,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn register(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Registering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Registering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -111,7 +111,7 @@ impl PortReuse { /// Has no effect if port reuse is disabled. fn unregister(&mut self, ip: IpAddr, port: Port) { if let PortReuse::Enabled { listen_addrs } = self { - log::trace!("Unregistering for port reuse: {}:{}", ip, port); + tracing::trace!(%ip, %port, "Unregistering for port reuse"); listen_addrs .write() .expect("`register()` and `unregister()` never panic while holding the lock") @@ -446,7 +446,7 @@ where } else { return Err(TransportError::MultiaddrNotSupported(addr)); }; - log::debug!("listening on {}", socket_addr); + tracing::debug!(address=%socket_addr, "listening on address"); let listener = self .do_listen(id, socket_addr) .map_err(TransportError::Other)?; @@ -472,14 +472,14 @@ where } else { return Err(TransportError::MultiaddrNotSupported(addr)); }; - log::debug!("dialing {}", socket_addr); + tracing::debug!(address=%socket_addr, "dialing address"); let socket = self .create_socket(socket_addr) .map_err(TransportError::Other)?; if let Some(addr) = self.port_reuse.local_dial_addr(&socket_addr.ip()) { - log::trace!("Binding dial socket to listen socket {}", addr); + tracing::trace!(address=%addr, "Binding dial socket to listen socket address"); socket.bind(&addr.into()).map_err(TransportError::Other)?; } @@ -538,6 +538,7 @@ where } /// Poll all listeners. + #[tracing::instrument(level = "trace", name = "Transport::poll", skip(self, cx))] fn poll( mut self: Pin<&mut Self>, cx: &mut Context<'_>, @@ -677,7 +678,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("New listen address: {}", ma); + tracing::debug!(address=%ma, "New listen address"); self.port_reuse.register(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::NewAddress { listener_id: self.listener_id, @@ -689,7 +690,7 @@ where let ip = inet.addr(); if self.listen_addr.is_ipv4() == ip.is_ipv4() { let ma = ip_to_multiaddr(ip, my_listen_addr_port); - log::debug!("Expired listen address: {}", ma); + tracing::debug!(address=%ma, "Expired listen address"); self.port_reuse.unregister(ip, my_listen_addr_port); return Poll::Ready(TransportEvent::AddressExpired { listener_id: self.listener_id, @@ -762,7 +763,11 @@ where let local_addr = ip_to_multiaddr(local_addr.ip(), local_addr.port()); let remote_addr = ip_to_multiaddr(remote_addr.ip(), remote_addr.port()); - log::debug!("Incoming connection from {} at {}", remote_addr, local_addr); + tracing::debug!( + remote_address=%remote_addr, + local_address=%local_addr, + "Incoming connection from remote at local" + ); return Poll::Ready(Some(TransportEvent::Incoming { listener_id: self.listener_id, @@ -900,7 +905,9 @@ mod tests { #[test] fn communicating_between_dialer_and_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -969,7 +976,9 @@ mod tests { #[test] fn wildcard_expansion() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener(addr: Multiaddr, mut ready_tx: mpsc::Sender) { let mut tcp = Transport::::default().boxed(); @@ -1038,7 +1047,9 @@ mod tests { #[test] fn port_reuse_dialing() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listener( addr: Multiaddr, @@ -1145,7 +1156,9 @@ mod tests { #[test] fn port_reuse_listening() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen_twice(addr: Multiaddr) { let mut tcp = Transport::::new(Config::new().port_reuse(true)); @@ -1199,7 +1212,9 @@ mod tests { #[test] fn listen_port_0() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn listen(addr: Multiaddr) -> Multiaddr { let mut tcp = Transport::::default().boxed(); @@ -1234,7 +1249,9 @@ mod tests { #[test] fn listen_invalid_addr() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); fn test(addr: Multiaddr) { #[cfg(feature = "async-io")] @@ -1304,7 +1321,9 @@ mod tests { #[test] fn test_remove_listener() { - env_logger::try_init().ok(); + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); async fn cycle_listeners() -> bool { let mut tcp = Transport::::default().boxed(); diff --git a/transports/uds/Cargo.toml b/transports/uds/Cargo.toml index f27f0647681..9d480fd1dbe 100644 --- a/transports/uds/Cargo.toml +++ b/transports/uds/Cargo.toml @@ -13,9 +13,9 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.6.2", optional = true } libp2p-core = { workspace = true } -log = "0.4.20" futures = "0.3.29" tokio = { version = "1.33", default-features = false, features = ["net"], optional = true } +tracing = "0.1.37" [dev-dependencies] tempfile = "3.8" diff --git a/transports/uds/src/lib.rs b/transports/uds/src/lib.rs index 3cd71552d18..075cbadb80a 100644 --- a/transports/uds/src/lib.rs +++ b/transports/uds/src/lib.rs @@ -49,7 +49,6 @@ use libp2p_core::{ transport::{TransportError, TransportEvent}, Transport, }; -use log::debug; use std::collections::VecDeque; use std::pin::Pin; use std::task::{Context, Poll}; @@ -104,7 +103,7 @@ macro_rules! codegen { stream::once({ let addr = addr.clone(); async move { - debug!("Now listening on {}", addr); + tracing::debug!(address=%addr, "Now listening on address"); Ok(TransportEvent::NewAddress { listener_id: id, listen_addr: addr, @@ -118,7 +117,7 @@ macro_rules! codegen { async move { let event = match listener.accept().await { Ok((stream, _)) => { - debug!("incoming connection on {}", addr); + tracing::debug!(address=%addr, "incoming connection on address"); TransportEvent::Incoming { upgrade: future::ok(stream), local_addr: addr.clone(), @@ -163,7 +162,7 @@ macro_rules! codegen { fn dial(&mut self, addr: Multiaddr) -> Result> { // TODO: Should we dial at all? if let Ok(path) = multiaddr_to_path(&addr) { - debug!("Dialing {}", addr); + tracing::debug!(address=%addr, "Dialing address"); Ok(async move { <$unix_stream>::connect(&path).await }.boxed()) } else { Err(TransportError::MultiaddrNotSupported(addr)) diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 847e54abbd5..3e2659c71c8 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -22,10 +22,10 @@ libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } libp2p-webrtc-utils = { workspace = true } -log = "0.4.19" send_wrapper = { version = "0.6.0", features = ["futures"] } serde = { version = "1.0", features = ["derive"] } thiserror = "1" +tracing = "0.1.37" wasm-bindgen = { version = "0.2.87" } wasm-bindgen-futures = { version = "0.4.37" } web-sys = { version = "0.3.64", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } diff --git a/transports/webrtc-websys/src/connection.rs b/transports/webrtc-websys/src/connection.rs index b026aec0b40..d0c8968f62e 100644 --- a/transports/webrtc-websys/src/connection.rs +++ b/transports/webrtc-websys/src/connection.rs @@ -47,16 +47,16 @@ impl Connection { let (mut tx_ondatachannel, rx_ondatachannel) = mpsc::channel(4); // we may get more than one data channel opened on a single peer connection let ondatachannel_closure = Closure::new(move |ev: RtcDataChannelEvent| { - log::trace!("New data channel"); + tracing::trace!("New data channel"); if let Err(e) = tx_ondatachannel.try_send(ev.channel()) { if e.is_full() { - log::warn!("Remote is opening too many data channels, we can't keep up!"); + tracing::warn!("Remote is opening too many data channels, we can't keep up!"); return; } if e.is_disconnected() { - log::warn!("Receiver is gone, are we shutting down?"); + tracing::warn!("Receiver is gone, are we shutting down?"); } } }); @@ -90,7 +90,7 @@ impl Connection { /// if they are used. fn close_connection(&mut self) { if !self.closed { - log::trace!("connection::close_connection"); + tracing::trace!("connection::close_connection"); self.inner.inner.close(); self.closed = true; } @@ -121,7 +121,7 @@ impl StreamMuxer for Connection { } None => { // This only happens if the [`RtcPeerConnection::ondatachannel`] closure gets freed which means we are most likely shutting down the connection. - log::debug!("`Sender` for inbound data channels has been dropped"); + tracing::debug!("`Sender` for inbound data channels has been dropped"); Poll::Ready(Err(Error::Connection("connection closed".to_owned()))) } } @@ -131,7 +131,7 @@ impl StreamMuxer for Connection { mut self: Pin<&mut Self>, _: &mut Context<'_>, ) -> Poll> { - log::trace!("Creating outbound data channel"); + tracing::trace!("Creating outbound data channel"); let data_channel = self.inner.new_regular_data_channel(); let stream = self.new_stream_from_data_channel(data_channel); @@ -144,7 +144,7 @@ impl StreamMuxer for Connection { mut self: Pin<&mut Self>, _cx: &mut Context<'_>, ) -> Poll> { - log::trace!("connection::poll_close"); + tracing::trace!("connection::poll_close"); self.close_connection(); Poll::Ready(Ok(())) @@ -158,7 +158,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); diff --git a/transports/webrtc-websys/src/sdp.rs b/transports/webrtc-websys/src/sdp.rs index 6f50262b988..439182ea4db 100644 --- a/transports/webrtc-websys/src/sdp.rs +++ b/transports/webrtc-websys/src/sdp.rs @@ -46,7 +46,7 @@ pub(crate) fn offer(offer: String, client_ufrag: &str) -> RtcSessionDescriptionI // remove any double \r\n let munged_sdp_offer = munged_sdp_offer.replace("\r\n\r\n", "\r\n"); - log::trace!("Created SDP offer: {munged_sdp_offer}"); + tracing::trace!(offer=%munged_sdp_offer, "Created SDP offer"); let mut offer_obj = RtcSessionDescriptionInit::new(RtcSdpType::Offer); offer_obj.sdp(&munged_sdp_offer); diff --git a/transports/webrtc-websys/src/stream/poll_data_channel.rs b/transports/webrtc-websys/src/stream/poll_data_channel.rs index 9c9b19cdb32..0ee4f7920c9 100644 --- a/transports/webrtc-websys/src/stream/poll_data_channel.rs +++ b/transports/webrtc-websys/src/stream/poll_data_channel.rs @@ -53,7 +53,7 @@ impl PollDataChannel { let open_waker = open_waker.clone(); move |_: RtcDataChannelEvent| { - log::trace!("DataChannel opened"); + tracing::trace!("DataChannel opened"); open_waker.wake(); } }); @@ -65,7 +65,7 @@ impl PollDataChannel { let write_waker = write_waker.clone(); move |_: Event| { - log::trace!("DataChannel available for writing (again)"); + tracing::trace!("DataChannel available for writing (again)"); write_waker.wake(); } }); @@ -76,7 +76,7 @@ impl PollDataChannel { let close_waker = close_waker.clone(); move |_: Event| { - log::trace!("DataChannel closed"); + tracing::trace!("DataChannel closed"); close_waker.wake(); } }); @@ -98,7 +98,7 @@ impl PollDataChannel { if read_buffer.len() + data.length() as usize > MAX_MSG_LEN { overloaded.store(true, Ordering::SeqCst); - log::warn!("Remote is overloading us with messages, resetting stream",); + tracing::warn!("Remote is overloading us with messages, resetting stream",); return; } diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index 092baed50c4..cc053835041 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -45,12 +45,12 @@ async fn outbound_inner( let local_fingerprint = rtc_peer_connection.local_fingerprint()?; - log::trace!("local_fingerprint: {:?}", local_fingerprint); - log::trace!("remote_fingerprint: {:?}", remote_fingerprint); + tracing::trace!(?local_fingerprint); + tracing::trace!(?remote_fingerprint); let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint).await?; - log::debug!("Remote peer identified as {peer_id}"); + tracing::debug!(peer=%peer_id, "Remote peer identified"); Ok((peer_id, Connection::new(rtc_peer_connection))) } diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 2379c299527..e8306f7a8ba 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -21,7 +21,6 @@ libp2p-core = { workspace = true } libp2p-noise = { workspace = true } libp2p-identity = { workspace = true } libp2p-webrtc-utils = { workspace = true } -log = "0.4" multihash = { workspace = true } rand = "0.8" rcgen = "0.11.3" @@ -31,6 +30,7 @@ thiserror = "1" tinytemplate = "1.2" tokio = { version = "1.33", features = ["net"], optional = true } tokio-util = { version = "0.7", features = ["compat"], optional = true } +tracing = "0.1.37" webrtc = { version = "0.9.0", optional = true } [features] @@ -38,10 +38,11 @@ tokio = ["dep:tokio", "dep:tokio-util", "dep:webrtc", "if-watch/tokio"] pem = ["webrtc?/pem"] [dev-dependencies] -env_logger = "0.10" libp2p-identity = { workspace = true, features = ["rand"] } tokio = { version = "1.33", features = ["full"] } quickcheck = "1.0.3" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + [[test]] name = "smoke" diff --git a/transports/webrtc/src/tokio/connection.rs b/transports/webrtc/src/tokio/connection.rs index 29983d720b5..3bcc4c3193e 100644 --- a/transports/webrtc/src/tokio/connection.rs +++ b/transports/webrtc/src/tokio/connection.rs @@ -101,7 +101,7 @@ impl Connection { tx: Arc>>>, ) { rtc_conn.on_data_channel(Box::new(move |data_channel: Arc| { - log::debug!("Incoming data channel {}", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Incoming data channel"); let tx = tx.clone(); @@ -109,7 +109,7 @@ impl Connection { data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -118,7 +118,7 @@ impl Connection { Ok(detached) => { let mut tx = tx.lock().await; if let Err(e) = tx.try_send(detached.clone()) { - log::error!("Can't send data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {}", e); // We're not accepting data channels fast enough => // close this channel. // @@ -126,16 +126,16 @@ impl Connection { // during the negotiation process, but it's not // possible with the current API. if let Err(e) = detached.close().await { - log::error!( - "Failed to close data channel {}: {}", - id, + tracing::error!( + channel=%id, + "Failed to close data channel: {}", e ); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) @@ -156,7 +156,7 @@ impl StreamMuxer for Connection { ) -> Poll> { match ready!(self.incoming_data_channels_rx.poll_next_unpin(cx)) { Some(detached) => { - log::trace!("Incoming stream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Incoming stream"); let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); @@ -185,7 +185,7 @@ impl StreamMuxer for Connection { match ready!(self.drop_listeners.poll_next_unpin(cx)) { Some(Ok(())) => {} Some(Err(e)) => { - log::debug!("a DropListener failed: {e}") + tracing::debug!("a DropListener failed: {e}") } None => { self.no_drop_listeners_waker = Some(cx.waker().clone()); @@ -208,7 +208,7 @@ impl StreamMuxer for Connection { // No need to hold the lock during the DTLS handshake. drop(peer_conn); - log::trace!("Opening data channel {}", data_channel.id()); + tracing::trace!(channel=%data_channel.id(), "Opening data channel"); let (tx, rx) = oneshot::channel::>(); @@ -226,7 +226,7 @@ impl StreamMuxer for Connection { Ok(detached) => { self.outbound_fut = None; - log::trace!("Outbound stream {}", detached.stream_identifier()); + tracing::trace!(stream=%detached.stream_identifier(), "Outbound stream"); let (stream, drop_listener) = Stream::new(detached); self.drop_listeners.push(drop_listener); @@ -244,7 +244,7 @@ impl StreamMuxer for Connection { } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - log::debug!("Closing connection"); + tracing::debug!("Closing connection"); let peer_conn = self.peer_conn.clone(); let fut = self.close_fut.get_or_insert(Box::pin(async move { @@ -275,7 +275,7 @@ pub(crate) async fn register_data_channel_open_handler( data_channel.on_open({ let data_channel = data_channel.clone(); Box::new(move || { - log::debug!("Data channel {} open", data_channel.id()); + tracing::debug!(channel=%data_channel.id(), "Data channel open"); Box::pin(async move { let data_channel = data_channel.clone(); @@ -283,14 +283,14 @@ pub(crate) async fn register_data_channel_open_handler( match data_channel.detach().await { Ok(detached) => { if let Err(e) = data_channel_tx.send(detached.clone()) { - log::error!("Can't send data channel {}: {:?}", id, e); + tracing::error!(channel=%id, "Can't send data channel: {:?}", e); if let Err(e) = detached.close().await { - log::error!("Failed to close data channel {}: {}", id, e); + tracing::error!(channel=%id, "Failed to close data channel: {}", e); } } } Err(e) => { - log::error!("Can't detach data channel {}: {}", id, e); + tracing::error!(channel=%id, "Can't detach data channel: {}", e); } }; }) diff --git a/transports/webrtc/src/tokio/sdp.rs b/transports/webrtc/src/tokio/sdp.rs index e49345a01b2..8549a864dcc 100644 --- a/transports/webrtc/src/tokio/sdp.rs +++ b/transports/webrtc/src/tokio/sdp.rs @@ -49,7 +49,7 @@ pub(crate) fn offer(addr: SocketAddr, client_ufrag: &str) -> RTCSessionDescripti client_ufrag, ); - log::trace!("Created SDP offer: {offer}"); + tracing::trace!(offer=%offer, "Created SDP offer"); RTCSessionDescription::offer(offer).unwrap() } diff --git a/transports/webrtc/src/tokio/transport.rs b/transports/webrtc/src/tokio/transport.rs index 4b3f15d5978..b50e44fe4ba 100644 --- a/transports/webrtc/src/tokio/transport.rs +++ b/transports/webrtc/src/tokio/transport.rs @@ -238,7 +238,7 @@ impl ListenStream { /// terminate the stream. fn close(&mut self, reason: Result<(), Error>) { match self.report_closed { - Some(_) => log::debug!("Listener was already closed."), + Some(_) => tracing::debug!("Listener was already closed"), None => { // Report the listener event as closed. let _ = self diff --git a/transports/webrtc/src/tokio/udp_mux.rs b/transports/webrtc/src/tokio/udp_mux.rs index f978121d01c..20e04edaf72 100644 --- a/transports/webrtc/src/tokio/udp_mux.rs +++ b/transports/webrtc/src/tokio/udp_mux.rs @@ -175,7 +175,7 @@ impl UDPMuxNewAddr { None } Err(e) => { - log::debug!("{} (addr={})", e, addr); + tracing::debug!(address=%addr, "{}", e); None } } @@ -342,7 +342,7 @@ impl UDPMuxNewAddr { match self.conn_from_stun_message(read.filled(), &addr) { Some(Ok(s)) => Some(s), Some(Err(e)) => { - log::debug!("addr={}: Error when querying existing connections: {}", &addr, e); + tracing::debug!(address=%&addr, "Error when querying existing connections: {}", e); continue; } None => None, @@ -357,20 +357,20 @@ impl UDPMuxNewAddr { if !self.new_addrs.contains(&addr) { match ufrag_from_stun_message(read.filled(), false) { Ok(ufrag) => { - log::trace!( - "Notifying about new address addr={} from ufrag={}", - &addr, - ufrag - ); + tracing::trace!( + address=%&addr, + %ufrag, + "Notifying about new address from ufrag", + ); self.new_addrs.insert(addr); return Poll::Ready(UDPMuxEvent::NewAddr( NewAddr { addr, ufrag }, )); } Err(e) => { - log::debug!( - "Unknown address addr={} (non STUN packet: {})", - &addr, + tracing::debug!( + address=%&addr, + "Unknown address (non STUN packet: {})", e ); } @@ -384,10 +384,10 @@ impl UDPMuxNewAddr { async move { if let Err(err) = conn.write_packet(&packet, addr).await { - log::error!( - "Failed to write packet: {} (addr={})", + tracing::error!( + address=%addr, + "Failed to write packet: {}", err, - addr ); } } @@ -401,10 +401,10 @@ impl UDPMuxNewAddr { Poll::Pending => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::TimedOut => {} Poll::Ready(Err(err)) if err.kind() == ErrorKind::ConnectionReset => { - log::debug!("ConnectionReset by remote client {err:?}") + tracing::debug!("ConnectionReset by remote client {err:?}") } Poll::Ready(Err(err)) => { - log::error!("Could not read udp packet: {}", err); + tracing::error!("Could not read udp packet: {}", err); return Poll::Ready(UDPMuxEvent::Error(err)); } } @@ -470,7 +470,7 @@ impl UDPMux for UdpMuxHandle { async fn remove_conn_by_ufrag(&self, ufrag: &str) { if let Err(e) = self.remove_sender.send(ufrag.to_owned()).await { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); } } } @@ -511,12 +511,12 @@ impl UDPMuxWriter for UdpMuxWriterHandle { { Ok(()) => {} Err(e) => { - log::debug!("Failed to send message through channel: {:?}", e); + tracing::debug!("Failed to send message through channel: {:?}", e); return; } } - log::debug!("Registered {} for {}", addr, conn.key()); + tracing::debug!(address=%addr, connection=%conn.key(), "Registered address for connection"); } async fn send_to(&self, buf: &[u8], target: &SocketAddr) -> Result { diff --git a/transports/webrtc/src/tokio/upgrade.rs b/transports/webrtc/src/tokio/upgrade.rs index 414fc2721d0..4145a5e7510 100644 --- a/transports/webrtc/src/tokio/upgrade.rs +++ b/transports/webrtc/src/tokio/upgrade.rs @@ -49,19 +49,16 @@ pub(crate) async fn outbound( server_fingerprint: Fingerprint, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new outbound connection to {addr})"); + tracing::debug!(address=%addr, "new outbound connection to address"); let (peer_connection, ufrag) = new_outbound_connection(addr, config, udp_mux).await?; let offer = peer_connection.create_offer(None).await?; - log::debug!("created SDP offer for outbound connection: {:?}", offer.sdp); + tracing::debug!(offer=%offer.sdp, "created SDP offer for outbound connection"); peer_connection.set_local_description(offer).await?; let answer = sdp::answer(addr, server_fingerprint, &ufrag); - log::debug!( - "calculated SDP answer for outbound connection: {:?}", - answer - ); + tracing::debug!(?answer, "calculated SDP answer for outbound connection"); peer_connection.set_remote_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; @@ -85,16 +82,16 @@ pub(crate) async fn inbound( remote_ufrag: String, id_keys: identity::Keypair, ) -> Result<(PeerId, Connection), Error> { - log::debug!("new inbound connection from {addr} (ufrag: {remote_ufrag})"); + tracing::debug!(address=%addr, ufrag=%remote_ufrag, "new inbound connection from address"); let peer_connection = new_inbound_connection(addr, config, udp_mux, &remote_ufrag).await?; let offer = sdp::offer(addr, &remote_ufrag); - log::debug!("calculated SDP offer for inbound connection: {:?}", offer); + tracing::debug!(?offer, "calculated SDP offer for inbound connection"); peer_connection.set_remote_description(offer).await?; let answer = peer_connection.create_answer(None).await?; - log::debug!("created SDP answer for inbound connection: {:?}", answer); + tracing::debug!(?answer, "created SDP answer for inbound connection"); peer_connection.set_local_description(answer).await?; // This will start the gathering of ICE candidates. let data_channel = create_substream_for_noise_handshake(&peer_connection).await?; diff --git a/transports/webrtc/tests/smoke.rs b/transports/webrtc/tests/smoke.rs index ce94da0aea8..6e83f75f0d4 100644 --- a/transports/webrtc/tests/smoke.rs +++ b/transports/webrtc/tests/smoke.rs @@ -33,10 +33,13 @@ use std::num::NonZeroU8; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; +use tracing_subscriber::EnvFilter; #[tokio::test] async fn smoke() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let (a_peer_id, mut a_transport) = create_transport(); let (b_peer_id, mut b_transport) = create_transport(); @@ -53,7 +56,9 @@ async fn smoke() { // Note: This test should likely be ported to the muxer compliance test suite. #[test] fn concurrent_connections_and_streams_tokio() { - let _ = env_logger::try_init(); + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); let rt = tokio::runtime::Runtime::new().unwrap(); let _guard = rt.enter(); @@ -102,7 +107,11 @@ fn prop(number_listeners: NonZeroU8, number_streams: NonZeroU8) -> quickcheck::T let (listeners_tx, mut listeners_rx) = mpsc::channel(number_listeners); - log::info!("Creating {number_streams} streams on {number_listeners} connections"); + tracing::info!( + stream_count=%number_streams, + connection_count=%number_listeners, + "Creating streams on connections" + ); // Spawn the listener nodes. for _ in 0..number_listeners { @@ -244,7 +253,7 @@ async fn open_outbound_streams( }); } - log::info!("Created {number_streams} streams"); + tracing::info!(stream_count=%number_streams, "Created streams"); while future::poll_fn(|cx| connection.poll_unpin(cx)) .await diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 0e9c5796b97..779cc4d8602 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -15,7 +15,7 @@ bytes = "1.4.0" futures = "0.3.29" js-sys = "0.3.61" libp2p-core = { workspace = true } -log = "0.4.19" +tracing = "0.1.37" parking_lot = "0.12.1" send_wrapper = "0.6.0" thiserror = "1.0.50" diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index 24ca4fdce5d..b4f7566f95e 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -278,7 +278,7 @@ impl Connection { let mut read_buffer = read_buffer.lock().unwrap(); if read_buffer.len() + data.length() as usize > MAX_BUFFER { - log::warn!("Remote is overloading us with messages, closing connection"); + tracing::warn!("Remote is overloading us with messages, closing connection"); errored.store(true, Ordering::SeqCst); return; diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index 77616e1cefd..b4c56539139 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -16,11 +16,11 @@ either = "1.9.0" futures = "0.3.29" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -log = "0.4.20" parking_lot = "0.12.0" pin-project-lite = "0.2.13" rw-stream-sink = { workspace = true } soketto = "0.7.0" +tracing = "0.1.37" url = "2.4" webpki-roots = "0.25" diff --git a/transports/websocket/src/framed.rs b/transports/websocket/src/framed.rs index 07013973fdc..3593e1eaff2 100644 --- a/transports/websocket/src/framed.rs +++ b/transports/websocket/src/framed.rs @@ -28,7 +28,6 @@ use libp2p_core::{ transport::{ListenerId, TransportError, TransportEvent}, Transport, }; -use log::{debug, trace}; use parking_lot::Mutex; use soketto::{ connection::{self, CloseReason}, @@ -127,13 +126,13 @@ where if self.tls_config.server.is_some() { p } else { - debug!("/wss address but TLS server support is not configured"); + tracing::debug!("/wss address but TLS server support is not configured"); return Err(TransportError::MultiaddrNotSupported(addr)); } } Some(p @ Protocol::Ws(_)) => p, _ => { - debug!("{} is not a websocket multiaddr", addr); + tracing::debug!(address=%addr, "Address is not a websocket multiaddr"); return Err(TransportError::MultiaddrNotSupported(addr)); } }; @@ -187,7 +186,7 @@ where .get(&listener_id) .expect("Protocol was inserted in Transport::listen_on."); listen_addr.push(proto.clone()); - debug!("Listening on {}", listen_addr); + tracing::debug!(address=%listen_addr, "Listening on address"); TransportEvent::NewAddress { listener_id, listen_addr, @@ -288,7 +287,7 @@ where { Ok(Either::Left(redirect)) => { if remaining_redirects == 0 { - debug!("Too many redirects (> {})", max_redirects); + tracing::debug!(%max_redirects, "Too many redirects"); return Err(Error::TooManyRedirects); } remaining_redirects -= 1; @@ -310,7 +309,7 @@ where tls_config: tls::Config, role_override: Endpoint, ) -> Result>, Error> { - trace!("Dialing websocket address: {:?}", addr); + tracing::trace!(address=?addr, "Dialing websocket address"); let dial = match role_override { Endpoint::Dialer => transport.lock().dial(addr.tcp_addr), @@ -322,19 +321,19 @@ where })?; let stream = dial.map_err(Error::Transport).await?; - trace!("TCP connection to {} established.", addr.host_port); + tracing::trace!(port=%addr.host_port, "TCP connection established"); let stream = if addr.use_tls { // begin TLS session let dns_name = addr .dns_name .expect("for use_tls we have checked that dns_name is some"); - trace!("Starting TLS handshake with {:?}", dns_name); + tracing::trace!(?dns_name, "Starting TLS handshake"); let stream = tls_config .client .connect(dns_name.clone(), stream) .map_err(|e| { - debug!("TLS handshake with {:?} failed: {}", dns_name, e); + tracing::debug!(?dns_name, "TLS handshake failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -346,7 +345,7 @@ where future::Either::Right(stream) }; - trace!("Sending websocket handshake to {}", addr.host_port); + tracing::trace!(port=%addr.host_port, "Sending websocket handshake"); let mut client = handshake::Client::new(stream, &addr.host_port, addr.path.as_ref()); @@ -359,9 +358,10 @@ where status_code, location, } => { - debug!( - "received redirect ({}); location: {}", - status_code, location + tracing::debug!( + %status_code, + %location, + "received redirect" ); Ok(Either::Left(location)) } @@ -370,7 +370,7 @@ where Err(Error::Handshake(msg.into())) } handshake::ServerResponse::Accepted { .. } => { - trace!("websocket handshake with {} successful", addr.host_port); + tracing::trace!(port=%addr.host_port, "websocket handshake successful"); Ok(Either::Right(Connection::new(client.into_builder()))) } } @@ -388,7 +388,7 @@ where async move { let stream = upgrade.map_err(Error::Transport).await?; - trace!("incoming connection from {}", remote_addr); + tracing::trace!(address=%remote_addr, "incoming connection from address"); let stream = if use_tls { // begin TLS session @@ -396,12 +396,12 @@ where .server .expect("for use_tls we checked server is not none"); - trace!("awaiting TLS handshake with {}", remote_addr); + tracing::trace!(address=%remote_addr, "awaiting TLS handshake with address"); let stream = server .accept(stream) .map_err(move |e| { - debug!("TLS handshake with {} failed: {}", remote_addr, e); + tracing::debug!(address=%remote_addr, "TLS handshake with address failed: {}", e); Error::Tls(tls::Error::from(e)) }) .await?; @@ -414,9 +414,9 @@ where future::Either::Right(stream) }; - trace!( - "receiving websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "receiving websocket handshake request from address" ); let mut server = handshake::Server::new(stream); @@ -429,9 +429,9 @@ where request.key() }; - trace!( - "accepting websocket handshake request from {}", - remote_addr2 + tracing::trace!( + address=%remote_addr2, + "accepting websocket handshake request from address" ); let response = handshake::server::Response::Accept { @@ -511,7 +511,7 @@ fn parse_ws_dial_addr(addr: Multiaddr) -> Result> { Some(Protocol::Ws(path)) => break (false, path.into_owned()), Some(Protocol::Wss(path)) => { if dns_name.is_none() { - debug!("Missing DNS name in WSS address: {}", addr); + tracing::debug!(addrress=%addr, "Missing DNS name in WSS address"); return Err(Error::InvalidMultiaddr(addr)); } break (true, path.into_owned()); @@ -556,13 +556,13 @@ fn location_to_multiaddr(location: &str) -> Result> { } else if s.eq_ignore_ascii_case("http") | s.eq_ignore_ascii_case("ws") { a.push(Protocol::Ws(url.path().into())) } else { - debug!("unsupported scheme: {}", s); + tracing::debug!(scheme=%s, "unsupported scheme"); return Err(Error::InvalidRedirectLocation); } Ok(a) } Err(e) => { - debug!("failed to parse url as multi-address: {:?}", e); + tracing::debug!("failed to parse url as multi-address: {:?}", e); Err(Error::InvalidRedirectLocation) } } diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index cbc340e9244..49053349298 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -19,11 +19,11 @@ js-sys = "0.3.64" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } -log = "0.4.20" multiaddr = { workspace = true } multihash = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } thiserror = "1.0.50" +tracing = "0.1.37" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" web-sys = { version = "0.3.64", features = [ diff --git a/transports/webtransport-websys/src/transport.rs b/transports/webtransport-websys/src/transport.rs index dcb3639a194..cb556ffef99 100644 --- a/transports/webtransport-websys/src/transport.rs +++ b/transports/webtransport-websys/src/transport.rs @@ -65,7 +65,7 @@ impl libp2p_core::Transport for Transport { fn dial(&mut self, addr: Multiaddr) -> Result> { let endpoint = Endpoint::from_multiaddr(&addr).map_err(|e| match e { e @ Error::InvalidMultiaddr(_) => { - log::warn!("{}", e); + tracing::warn!("{}", e); TransportError::MultiaddrNotSupported(addr) } e => TransportError::Other(e), From a428ffdb7df519801e601cdf7a895333b8dfe0da Mon Sep 17 00:00:00 2001 From: Dave Huseby Date: Thu, 2 Nov 2023 10:28:49 -0600 Subject: [PATCH 21/33] feat(swarm): add `#[non_exhaustive]` to key enums Add `#[non_exhaustive]` to the following enums so that future additions don't cause breaking changes in downstream client code: `FromSwarm`, `ToSwarm`, `SwarmEvent`, `ConnectionHandlerEvent`, `ConnectionEvent`. Related: #4543. Pull-Request: #4581. --- .github/workflows/ci.yml | 2 +- Cargo.toml | 2 +- misc/allow-block-list/src/lib.rs | 18 +---- misc/connection-limits/src/lib.rs | 10 +-- misc/metrics/src/swarm.rs | 51 +++++------- protocols/autonat/src/behaviour.rs | 1 + protocols/dcutr/src/behaviour.rs | 11 +-- protocols/dcutr/src/handler/relayed.rs | 4 +- protocols/floodsub/src/layer.rs | 12 +-- protocols/gossipsub/src/behaviour.rs | 11 +-- protocols/gossipsub/src/handler.rs | 5 +- protocols/identify/src/behaviour.rs | 11 +-- protocols/identify/src/handler.rs | 4 +- protocols/kad/src/behaviour.rs | 10 +-- protocols/kad/src/handler.rs | 4 +- protocols/perf/src/client/behaviour.rs | 12 +-- protocols/perf/src/client/handler.rs | 1 + protocols/perf/src/server/behaviour.rs | 18 +---- protocols/perf/src/server/handler.rs | 1 + protocols/ping/src/handler.rs | 5 +- protocols/ping/src/lib.rs | 18 +---- protocols/relay/src/behaviour.rs | 18 +---- protocols/relay/src/behaviour/handler.rs | 5 +- protocols/relay/src/priv_client.rs | 11 +-- protocols/relay/src/priv_client/handler.rs | 4 +- protocols/rendezvous/src/client.rs | 11 +-- protocols/rendezvous/src/server.rs | 1 + protocols/request-response/src/handler.rs | 4 +- protocols/request-response/src/lib.rs | 10 +-- protocols/upnp/src/behaviour.rs | 12 +-- swarm-derive/CHANGELOG.md | 2 +- swarm-derive/src/lib.rs | 93 +++++++--------------- swarm/CHANGELOG.md | 2 + swarm/src/behaviour.rs | 2 + swarm/src/dummy.rs | 18 +---- swarm/src/handler.rs | 2 + swarm/src/lib.rs | 1 + swarm/src/test.rs | 18 +---- swarm/tests/swarm_derive.rs | 18 +---- 39 files changed, 93 insertions(+), 350 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8802871dc9d..3ccaa53f381 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -70,7 +70,7 @@ jobs: echo "Package version: $PACKAGE_VERSION"; echo "Specified version: $SPECIFIED_VERSION"; - test "$PACKAGE_VERSION" = "$SPECIFIED_VERSION" + test "$PACKAGE_VERSION" = "$SPECIFIED_VERSION" || test "=$PACKAGE_VERSION" = "$SPECIFIED_VERSION" - name: Ensure manifest and CHANGELOG are properly updated if: > diff --git a/Cargo.toml b/Cargo.toml index 4be55edb103..d676d69a57a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -99,7 +99,7 @@ libp2p-rendezvous = { version = "0.14.0", path = "protocols/rendezvous" } libp2p-request-response = { version = "0.26.0", path = "protocols/request-response" } libp2p-server = { version = "0.12.3", path = "misc/server" } libp2p-swarm = { version = "0.44.0", path = "swarm" } -libp2p-swarm-derive = { version = "0.34.0", path = "swarm-derive" } +libp2p-swarm-derive = { version = "=0.34.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. libp2p-swarm-test = { version = "0.3.0", path = "swarm-test" } libp2p-tcp = { version = "0.41.0", path = "transports/tcp" } libp2p-tls = { version = "0.3.0", path = "transports/tls" } diff --git a/misc/allow-block-list/src/lib.rs b/misc/allow-block-list/src/lib.rs index 9f2524733e6..c1d31433db1 100644 --- a/misc/allow-block-list/src/lib.rs +++ b/misc/allow-block-list/src/lib.rs @@ -231,23 +231,7 @@ where Ok(dummy::ConnectionHandler) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionClosed(_) => {} - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index d0ea3436177..af76e9a57d9 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -340,18 +340,10 @@ impl NetworkBehaviour for Behaviour { FromSwarm::DialFailure(DialFailure { connection_id, .. }) => { self.pending_outbound_connections.remove(&connection_id); } - FromSwarm::AddressChange(_) => {} FromSwarm::ListenFailure(ListenFailure { connection_id, .. }) => { self.pending_inbound_connections.remove(&connection_id); } - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index 20d3ce2eff3..fff28e5f639 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -23,7 +23,7 @@ use std::sync::{Arc, Mutex}; use crate::protocol_stack; use instant::Instant; -use libp2p_swarm::{ConnectionId, SwarmEvent}; +use libp2p_swarm::{ConnectionId, DialError, SwarmEvent}; use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; use prometheus_client::metrics::counter::Counter; use prometheus_client::metrics::family::Family; @@ -185,11 +185,11 @@ impl Metrics { } } -impl super::Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &SwarmEvent) { match event { - libp2p_swarm::SwarmEvent::Behaviour(_) => {} - libp2p_swarm::SwarmEvent::ConnectionEstablished { + SwarmEvent::Behaviour(_) => {} + SwarmEvent::ConnectionEstablished { endpoint, established_in: time_taken, connection_id, @@ -208,7 +208,7 @@ impl super::Recorder super::Recorder { + SwarmEvent::IncomingConnection { send_back_addr, .. } => { self.connections_incoming .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(send_back_addr), }) .inc(); } - libp2p_swarm::SwarmEvent::IncomingConnectionError { + SwarmEvent::IncomingConnectionError { error, send_back_addr, .. @@ -250,7 +250,7 @@ impl super::Recorder { + SwarmEvent::OutgoingConnectionError { error, peer_id, .. } => { let peer = match peer_id { Some(_) => PeerStatus::Known, None => PeerStatus::Unknown, @@ -263,7 +263,7 @@ impl super::Recorder { + DialError::Transport(errors) => { for (_multiaddr, error) in errors { match error { libp2p_core::transport::TransportError::MultiaddrNotSupported( @@ -277,39 +277,31 @@ impl super::Recorder { - record(OutgoingConnectionError::LocalPeerId) - } - libp2p_swarm::DialError::NoAddresses => { - record(OutgoingConnectionError::NoAddresses) - } - libp2p_swarm::DialError::DialPeerConditionFalse(_) => { + DialError::LocalPeerId { .. } => record(OutgoingConnectionError::LocalPeerId), + DialError::NoAddresses => record(OutgoingConnectionError::NoAddresses), + DialError::DialPeerConditionFalse(_) => { record(OutgoingConnectionError::DialPeerConditionFalse) } - libp2p_swarm::DialError::Aborted => record(OutgoingConnectionError::Aborted), - libp2p_swarm::DialError::WrongPeerId { .. } => { - record(OutgoingConnectionError::WrongPeerId) - } - libp2p_swarm::DialError::Denied { .. } => { - record(OutgoingConnectionError::Denied) - } + DialError::Aborted => record(OutgoingConnectionError::Aborted), + DialError::WrongPeerId { .. } => record(OutgoingConnectionError::WrongPeerId), + DialError::Denied { .. } => record(OutgoingConnectionError::Denied), }; } - libp2p_swarm::SwarmEvent::NewListenAddr { address, .. } => { + SwarmEvent::NewListenAddr { address, .. } => { self.new_listen_addr .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(address), }) .inc(); } - libp2p_swarm::SwarmEvent::ExpiredListenAddr { address, .. } => { + SwarmEvent::ExpiredListenAddr { address, .. } => { self.expired_listen_addr .get_or_create(&AddressLabels { protocols: protocol_stack::as_string(address), }) .inc(); } - libp2p_swarm::SwarmEvent::ListenerClosed { addresses, .. } => { + SwarmEvent::ListenerClosed { addresses, .. } => { for address in addresses { self.listener_closed .get_or_create(&AddressLabels { @@ -318,10 +310,10 @@ impl super::Recorder { + SwarmEvent::ListenerError { .. } => { self.listener_error.inc(); } - libp2p_swarm::SwarmEvent::Dialing { .. } => { + SwarmEvent::Dialing { .. } => { self.dial_attempt.inc(); } SwarmEvent::NewExternalAddrCandidate { address } => { @@ -345,6 +337,7 @@ impl super::Recorder {} } } } diff --git a/protocols/autonat/src/behaviour.rs b/protocols/autonat/src/behaviour.rs index 06c945eb888..27f5474c924 100644 --- a/protocols/autonat/src/behaviour.rs +++ b/protocols/autonat/src/behaviour.rs @@ -593,6 +593,7 @@ impl NetworkBehaviour for Behaviour { self.inner.on_swarm_event(listener_closed) } confirmed @ FromSwarm::ExternalAddrConfirmed(_) => self.inner.on_swarm_event(confirmed), + _ => {} } } diff --git a/protocols/dcutr/src/behaviour.rs b/protocols/dcutr/src/behaviour.rs index d0b46abb0b4..b644d90b6f2 100644 --- a/protocols/dcutr/src/behaviour.rs +++ b/protocols/dcutr/src/behaviour.rs @@ -339,16 +339,7 @@ impl NetworkBehaviour for Behaviour { FromSwarm::NewExternalAddrCandidate(NewExternalAddrCandidate { addr }) => { self.address_candidates.add(addr.clone()); } - FromSwarm::AddressChange(_) - | FromSwarm::ConnectionEstablished(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index b4daefce15f..4d9bf3c910d 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -312,9 +312,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/floodsub/src/layer.rs b/protocols/floodsub/src/layer.rs index 7fa9f3001b1..cfd10024ca3 100644 --- a/protocols/floodsub/src/layer.rs +++ b/protocols/floodsub/src/layer.rs @@ -490,17 +490,7 @@ impl NetworkBehaviour for Floodsub { FromSwarm::ConnectionClosed(connection_closed) => { self.on_connection_closed(connection_closed) } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index f1069658b73..4ad0784f807 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -3458,16 +3458,7 @@ where self.on_connection_closed(connection_closed) } FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), - FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 4f3dd5c9f63..55480384ffa 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -533,10 +533,7 @@ impl ConnectionHandler for Handler { }) => { tracing::debug!("Protocol negotiation failed: {e}") } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } Handler::Disabled(_) => {} diff --git a/protocols/identify/src/behaviour.rs b/protocols/identify/src/behaviour.rs index 75ddfc812bf..25e2b14bdcd 100644 --- a/protocols/identify/src/behaviour.rs +++ b/protocols/identify/src/behaviour.rs @@ -395,16 +395,7 @@ impl NetworkBehaviour for Behaviour { } } } - FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 963397e2274..5012868c17a 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -407,9 +407,6 @@ impl ConnectionHandler for Handler { )); self.trigger_next_identify.reset(self.interval); } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} ConnectionEvent::LocalProtocolsChange(change) => { let before = tracing::enabled!(Level::DEBUG) .then(|| self.local_protocols_to_string()) @@ -436,6 +433,7 @@ impl ConnectionHandler for Handler { }); } } + _ => {} } } } diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index cc80b9c1be9..fc942cf635a 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -2554,15 +2554,7 @@ where } FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), - FromSwarm::ExpiredListenAddr(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } } diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 0f36800a904..21dad8a82b9 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -777,9 +777,6 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) => {} ConnectionEvent::RemoteProtocolsChange(change) => { let dirty = self.remote_supported_protocols.on_protocols_change(change); @@ -795,6 +792,7 @@ impl ConnectionHandler for Handler { )) } } + _ => {} } } } diff --git a/protocols/perf/src/client/behaviour.rs b/protocols/perf/src/client/behaviour.rs index 79c73d55102..216c10be9c0 100644 --- a/protocols/perf/src/client/behaviour.rs +++ b/protocols/perf/src/client/behaviour.rs @@ -121,17 +121,7 @@ impl NetworkBehaviour for Behaviour { assert!(self.connected.remove(&peer_id)); } } - FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index d5d05284a85..0cc2dd23abe 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -150,6 +150,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { void::unreachable(error) } + _ => {} } } diff --git a/protocols/perf/src/server/behaviour.rs b/protocols/perf/src/server/behaviour.rs index 370bc2ae188..8f5ac93e1e0 100644 --- a/protocols/perf/src/server/behaviour.rs +++ b/protocols/perf/src/server/behaviour.rs @@ -75,23 +75,7 @@ impl NetworkBehaviour for Behaviour { Ok(Handler::default()) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) => {} - FromSwarm::ConnectionClosed(_) => {} - FromSwarm::AddressChange(_) => {} - FromSwarm::DialFailure(_) => {} - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index 7f262ac4820..ed42162cb7e 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -112,6 +112,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::ListenUpgradeError(ListenUpgradeError { info: (), error }) => { void::unreachable(error) } + _ => {} } } diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 71ebcd97261..3ee6bfdf5d6 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -359,10 +359,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/ping/src/lib.rs b/protocols/ping/src/lib.rs index 3e17db300e7..5eaa6d4952a 100644 --- a/protocols/ping/src/lib.rs +++ b/protocols/ping/src/lib.rs @@ -150,21 +150,5 @@ impl NetworkBehaviour for Behaviour { } } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } diff --git a/protocols/relay/src/behaviour.rs b/protocols/relay/src/behaviour.rs index 98e2a5a53bb..2c2870f5618 100644 --- a/protocols/relay/src/behaviour.rs +++ b/protocols/relay/src/behaviour.rs @@ -350,22 +350,8 @@ impl NetworkBehaviour for Behaviour { fn on_swarm_event(&mut self, event: FromSwarm) { self.external_addresses.on_swarm_event(&event); - match event { - FromSwarm::ConnectionClosed(connection_closed) => { - self.on_connection_closed(connection_closed) - } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::DialFailure(_) - | FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + if let FromSwarm::ConnectionClosed(connection_closed) = event { + self.on_connection_closed(connection_closed) } } diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 361fb8ac333..4e729b1993e 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -898,10 +898,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error); } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::ListenUpgradeError(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/relay/src/priv_client.rs b/protocols/relay/src/priv_client.rs index ae2ceb2e97d..53d75364e9d 100644 --- a/protocols/relay/src/priv_client.rs +++ b/protocols/relay/src/priv_client.rs @@ -202,16 +202,7 @@ impl NetworkBehaviour for Behaviour { FromSwarm::DialFailure(DialFailure { connection_id, .. }) => { self.pending_handler_commands.remove(&connection_id); } - FromSwarm::AddressChange(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index d884f15c7eb..f30f24a949b 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -537,9 +537,7 @@ impl ConnectionHandler for Handler { ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { self.on_dial_upgrade_error(dial_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/rendezvous/src/client.rs b/protocols/rendezvous/src/client.rs index c6072533194..beef7496274 100644 --- a/protocols/rendezvous/src/client.rs +++ b/protocols/rendezvous/src/client.rs @@ -280,16 +280,7 @@ impl NetworkBehaviour for Behaviour { )) => { unreachable!("rendezvous clients never receive requests") } - Poll::Ready( - other @ (ToSwarm::ExternalAddrConfirmed(_) - | ToSwarm::ExternalAddrExpired(_) - | ToSwarm::NewExternalAddrCandidate(_) - | ToSwarm::NotifyHandler { .. } - | ToSwarm::Dial { .. } - | ToSwarm::CloseConnection { .. } - | ToSwarm::ListenOn { .. } - | ToSwarm::RemoveListener { .. }), - ) => { + Poll::Ready(other) => { let new_to_swarm = other.map_out(|_| unreachable!("we manually map `GenerateEvent` variants")); diff --git a/protocols/rendezvous/src/server.rs b/protocols/rendezvous/src/server.rs index 886b64cc829..e1e03f41375 100644 --- a/protocols/rendezvous/src/server.rs +++ b/protocols/rendezvous/src/server.rs @@ -227,6 +227,7 @@ impl NetworkBehaviour for Behaviour { return Poll::Ready(new_to_swarm); } + _ => {} }; } diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index ef4b5b44fe0..9ccbc49fc4b 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -484,9 +484,7 @@ where ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { self.on_listen_upgrade_error(listen_upgrade_error) } - ConnectionEvent::AddressChange(_) - | ConnectionEvent::LocalProtocolsChange(_) - | ConnectionEvent::RemoteProtocolsChange(_) => {} + _ => {} } } } diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index 68a6b689fe5..9737663a876 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -804,15 +804,7 @@ where } FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), - FromSwarm::ListenFailure(_) => {} - FromSwarm::NewListener(_) => {} - FromSwarm::NewListenAddr(_) => {} - FromSwarm::ExpiredListenAddr(_) => {} - FromSwarm::ListenerError(_) => {} - FromSwarm::ListenerClosed(_) => {} - FromSwarm::NewExternalAddrCandidate(_) => {} - FromSwarm::ExternalAddrExpired(_) => {} - FromSwarm::ExternalAddrConfirmed(_) => {} + _ => {} } } diff --git a/protocols/upnp/src/behaviour.rs b/protocols/upnp/src/behaviour.rs index 5410b8dd13f..648b28e1c82 100644 --- a/protocols/upnp/src/behaviour.rs +++ b/protocols/upnp/src/behaviour.rs @@ -351,17 +351,7 @@ impl NetworkBehaviour for Behaviour { } } } - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrConfirmed(_) - | FromSwarm::ExternalAddrExpired(_) => {} + _ => {} } } diff --git a/swarm-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md index 269c2f1af7f..3ed7b9931df 100644 --- a/swarm-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -6,7 +6,7 @@ To same functionality is available using `#[behaviour(to_swarm = "...")]` See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). -## 0.33.0 +## 0.33.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/swarm-derive/src/lib.rs b/swarm-derive/src/lib.rs index e8aa79d8470..514975390b0 100644 --- a/swarm-derive/src/lib.rs +++ b/swarm-derive/src/lib.rs @@ -672,79 +672,47 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result { - return std::task::Poll::Ready(#network_behaviour_action::GenerateEvent(#into_out_event)) - } - } - }; + let map_in_event = quote! { |event| #wrapped_event }; - quote!{ - match #trait_to_impl::poll(&mut self.#field, cx) { - #generate_event_match_arm - std::task::Poll::Ready(#network_behaviour_action::Dial { opts }) => { - return std::task::Poll::Ready(#network_behaviour_action::Dial { opts }); - } - std::task::Poll::Ready(#network_behaviour_action::ListenOn { opts }) => { - return std::task::Poll::Ready(#network_behaviour_action::ListenOn { opts }); - } - std::task::Poll::Ready(#network_behaviour_action::RemoveListener { id }) => { - return std::task::Poll::Ready(#network_behaviour_action::RemoveListener { id }); - } - std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { peer_id, handler, event }) => { - return std::task::Poll::Ready(#network_behaviour_action::NotifyHandler { - peer_id, - handler, - event: #wrapped_event, - }); - } - std::task::Poll::Ready(#network_behaviour_action::NewExternalAddrCandidate(addr)) => { - return std::task::Poll::Ready(#network_behaviour_action::NewExternalAddrCandidate(addr)); - } - std::task::Poll::Ready(#network_behaviour_action::ExternalAddrConfirmed(addr)) => { - return std::task::Poll::Ready(#network_behaviour_action::ExternalAddrConfirmed(addr)); - } - std::task::Poll::Ready(#network_behaviour_action::ExternalAddrExpired(addr)) => { - return std::task::Poll::Ready(#network_behaviour_action::ExternalAddrExpired(addr)); - } - std::task::Poll::Ready(#network_behaviour_action::CloseConnection { peer_id, connection }) => { - return std::task::Poll::Ready(#network_behaviour_action::CloseConnection { peer_id, connection }); + quote! { + match #trait_to_impl::poll(&mut self.#field, cx) { + std::task::Poll::Ready(e) => return std::task::Poll::Ready(e.map_out(#map_out_event).map_in(#map_in_event)), + std::task::Poll::Pending => {}, } - std::task::Poll::Pending => {}, } - } - }); + }); let out_event_reference = if out_event_definition.is_some() { quote! { #out_event_name #ty_generics } @@ -819,7 +787,6 @@ fn build_struct(ast: &DeriveInput, data_struct: &DataStruct) -> syn::Result std::task::Poll<#network_behaviour_action>> { - use #prelude_path::futures::*; #(#poll_stmts)* std::task::Poll::Pending } diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 6e2e9bb1c0c..717674c9dd1 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,5 +1,7 @@ ## 0.44.0 - unreleased +- Add `#[non_exhaustive]` to `FromSwarm`, `ToSwarm`, `SwarmEvent`, `ConnectionHandlerEvent`, `ConnectionEvent`. + See [PR 4581](https://github.com/libp2p/rust-libp2p/pull/4581). - Remove `handler` field from `ConnectionClosed`. If you need to transfer state from a `ConnectionHandler` to its `NetworkBehaviour` when a connection closes, use `ConnectionHandler::poll_close`. See [PR 4076](https://github.com/libp2p/rust-libp2p/pull/4076). diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 27e62f71831..7fbb72b9260 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -217,6 +217,7 @@ pub trait NetworkBehaviour: 'static { /// /// [`Swarm`]: super::Swarm #[derive(Debug)] +#[non_exhaustive] pub enum ToSwarm { /// Instructs the `Swarm` to return an event when it is being polled. GenerateEvent(TOutEvent), @@ -394,6 +395,7 @@ pub enum CloseConnection { /// Enumeration with the list of the possible events /// to pass to [`on_swarm_event`](NetworkBehaviour::on_swarm_event). #[derive(Debug)] +#[non_exhaustive] pub enum FromSwarm<'a> { /// Informs the behaviour about a newly established connection to a peer. ConnectionEstablished(ConnectionEstablished<'a>), diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index 1005c4be035..c3e9c22a422 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -54,23 +54,7 @@ impl NetworkBehaviour for Behaviour { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } /// An implementation of [`ConnectionHandler`] that neither handles any protocols nor does it keep the connection alive. diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index ea42f64cca9..f0a46129250 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -214,6 +214,7 @@ pub trait ConnectionHandler: Send + 'static { /// Enumeration with the list of the possible stream events /// to pass to [`on_connection_event`](ConnectionHandler::on_connection_event). +#[non_exhaustive] pub enum ConnectionEvent<'a, IP: InboundUpgradeSend, OP: OutboundUpgradeSend, IOI, OOI> { /// Informs the handler about the output of a successful upgrade on a new inbound substream. FullyNegotiatedInbound(FullyNegotiatedInbound), @@ -539,6 +540,7 @@ impl SubstreamProtocol { /// Event produced by a handler. #[derive(Debug, Clone, PartialEq, Eq)] +#[non_exhaustive] pub enum ConnectionHandlerEvent { /// Request a new outbound substream to be opened with the remote. OutboundSubstreamRequest { diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 7bbc1c68924..1aa6bfb30cb 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -172,6 +172,7 @@ pub type THandlerErr = as ConnectionHandler>:: /// Event generated by the `Swarm`. #[derive(Debug)] +#[non_exhaustive] pub enum SwarmEvent { /// Event generated by the `NetworkBehaviour`. Behaviour(TBehaviourOutEvent), diff --git a/swarm/src/test.rs b/swarm/src/test.rs index 9a192444aec..4f6adfc37b0 100644 --- a/swarm/src/test.rs +++ b/swarm/src/test.rs @@ -114,23 +114,7 @@ where self.next_action.take().map_or(Poll::Pending, Poll::Ready) } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} fn on_connection_handler_event( &mut self, diff --git a/swarm/tests/swarm_derive.rs b/swarm/tests/swarm_derive.rs index f13917dcd6c..3337cb7b137 100644 --- a/swarm/tests/swarm_derive.rs +++ b/swarm/tests/swarm_derive.rs @@ -501,23 +501,7 @@ fn custom_out_event_no_type_parameters() { Poll::Pending } - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(_) - | FromSwarm::ConnectionClosed(_) - | FromSwarm::AddressChange(_) - | FromSwarm::DialFailure(_) - | FromSwarm::ListenFailure(_) - | FromSwarm::NewListener(_) - | FromSwarm::NewListenAddr(_) - | FromSwarm::ExpiredListenAddr(_) - | FromSwarm::ListenerError(_) - | FromSwarm::ListenerClosed(_) - | FromSwarm::NewExternalAddrCandidate(_) - | FromSwarm::ExternalAddrExpired(_) - | FromSwarm::ExternalAddrConfirmed(_) => {} - } - } + fn on_swarm_event(&mut self, _event: FromSwarm) {} } #[derive(NetworkBehaviour)] From e6905fe5c096dba465a3a483e4b37c423ec41345 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 3 Nov 2023 10:03:57 +1100 Subject: [PATCH 22/33] deps: migrate to `hickory-dns` The `trust-dns` project has recently rebranded to `hickory-dns`. This is a breaking change for `libp2p-dns` and hence I'd like to get it into `v0.53`. Related: https://github.com/hickory-dns/hickory-dns/issues/2051. Pull-Request: #4780. --- Cargo.lock | 126 ++++++++------------ protocols/mdns/Cargo.toml | 2 +- protocols/mdns/src/behaviour/iface/dns.rs | 2 +- protocols/mdns/src/behaviour/iface/query.rs | 10 +- transports/dns/CHANGELOG.md | 4 + transports/dns/Cargo.toml | 11 +- transports/dns/src/lib.rs | 24 ++-- 7 files changed, 78 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98d0a971eff..c7118a5f776 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -402,17 +402,17 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0354a68a52265a3bde76005ddd2726624ef8624614f7f58871301de205a58a59" +checksum = "3c0ed2b6671c13d2c28756c5a64e04759c1e0b5d3d7ac031f521c3561e21fbcb" dependencies = [ "async-std", "async-trait", "futures-io", "futures-util", + "hickory-resolver", "pin-utils", "socket2 0.5.5", - "trust-dns-resolver", ] [[package]] @@ -1900,6 +1900,52 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" +[[package]] +name = "hickory-proto" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "091a6fbccf4860009355e3efc52ff4acf37a63489aad7435372d44ceeb6fbbcf" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.8.5", + "socket2 0.5.5", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35b8f021164e6a984c9030023544c57789c51760065cd510572fedcfb04164e8" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.3" @@ -2048,7 +2094,7 @@ dependencies = [ "rustls 0.20.8", "rustls-native-certs", "tokio", - "tokio-rustls 0.23.4", + "tokio-rustls", ] [[package]] @@ -2555,6 +2601,7 @@ dependencies = [ "async-std-resolver", "async-trait", "futures", + "hickory-resolver", "libp2p-core", "libp2p-identity", "parking_lot", @@ -2562,7 +2609,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "trust-dns-resolver", ] [[package]] @@ -2719,6 +2765,7 @@ dependencies = [ "async-std", "data-encoding", "futures", + "hickory-proto", "if-watch", "libp2p-core", "libp2p-identity", @@ -2733,7 +2780,6 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", - "trust-dns-proto", "void", ] @@ -5728,16 +5774,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.8", - "tokio", -] - [[package]] name = "tokio-stream" version = "0.1.14" @@ -5927,64 +5963,6 @@ dependencies = [ "tracing-log", ] -[[package]] -name = "trust-dns-proto" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dc775440033cb114085f6f2437682b194fa7546466024b1037e82a48a052a69" -dependencies = [ - "async-trait", - "bytes", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "h2", - "http", - "idna", - "ipnet", - "once_cell", - "rand 0.8.5", - "rustls 0.21.8", - "rustls-pemfile", - "rustls-webpki", - "smallvec", - "socket2 0.5.5", - "thiserror", - "tinyvec", - "tokio", - "tokio-rustls 0.24.1", - "tracing", - "url", - "webpki-roots", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff7aed33ef3e8bf2c9966fccdfed93f93d46f432282ea875cd66faabc6ef2f" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot", - "rand 0.8.5", - "resolv-conf", - "rustls 0.21.8", - "smallvec", - "thiserror", - "tokio", - "tokio-rustls 0.24.1", - "tracing", - "trust-dns-proto", - "webpki-roots", -] - [[package]] name = "try-lock" version = "0.2.4" diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index ef67a7e51b1..90ee633a9c4 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -24,7 +24,7 @@ smallvec = "1.11.1" socket2 = { version = "0.5.5", features = ["all"] } tokio = { version = "1.33", default-features = false, features = ["net", "time"], optional = true} tracing = "0.1.37" -trust-dns-proto = { version = "0.23.0", default-features = false, features = ["mdns"] } +hickory-proto = { version = "0.24.0", default-features = false, features = ["mdns"] } void = "1.0.2" [features] diff --git a/protocols/mdns/src/behaviour/iface/dns.rs b/protocols/mdns/src/behaviour/iface/dns.rs index 61fd5d329b9..6cc5550dbe5 100644 --- a/protocols/mdns/src/behaviour/iface/dns.rs +++ b/protocols/mdns/src/behaviour/iface/dns.rs @@ -395,9 +395,9 @@ impl error::Error for MdnsResponseError {} #[cfg(test)] mod tests { use super::*; + use hickory_proto::op::Message; use libp2p_identity as identity; use std::time::Duration; - use trust_dns_proto::op::Message; #[test] fn build_query_correct() { diff --git a/protocols/mdns/src/behaviour/iface/query.rs b/protocols/mdns/src/behaviour/iface/query.rs index 0185028f6ff..421ea10284c 100644 --- a/protocols/mdns/src/behaviour/iface/query.rs +++ b/protocols/mdns/src/behaviour/iface/query.rs @@ -20,6 +20,10 @@ use super::dns; use crate::{META_QUERY_SERVICE_FQDN, SERVICE_NAME_FQDN}; +use hickory_proto::{ + op::Message, + rr::{Name, RData}, +}; use libp2p_core::{ address_translation, multiaddr::{Multiaddr, Protocol}, @@ -27,10 +31,6 @@ use libp2p_core::{ use libp2p_identity::PeerId; use std::time::Instant; use std::{fmt, net::SocketAddr, str, time::Duration}; -use trust_dns_proto::{ - op::Message, - rr::{Name, RData}, -}; /// A valid mDNS packet received by the service. #[derive(Debug)] @@ -47,7 +47,7 @@ impl MdnsPacket { pub(crate) fn new_from_bytes( buf: &[u8], from: SocketAddr, - ) -> Result, trust_dns_proto::error::ProtoError> { + ) -> Result, hickory_proto::error::ProtoError> { let packet = Message::from_vec(buf)?; if packet.query().is_none() { diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index 9d9b4c09d92..734e7f08740 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -4,6 +4,10 @@ See [PR 4464](https://github.com/libp2p/rust-libp2p/pull/4464). - Remove deprecated type-aliases. See [PR 4739](https://github.com/libp2p/rust-libp2p/pull/4739). +- Migrate to the `hickory-dns` project which has rebranded from `trust-dns`. + We also remove the `tokio-dns-over-rustls` and `tokio-dns-over-https-rustls` features. + Users should activate these features themselves on `hickory-resolver` if so desired. + See [PR 4780](https://github.com/libp2p/rust-libp2p/pull/4780). ## 0.40.1 diff --git a/transports/dns/Cargo.toml b/transports/dns/Cargo.toml index df769161c55..e318e99476a 100644 --- a/transports/dns/Cargo.toml +++ b/transports/dns/Cargo.toml @@ -11,15 +11,15 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-std-resolver = { version = "0.23", optional = true } +async-std-resolver = { version = "0.24", optional = true } async-trait = "0.1.74" futures = "0.3.28" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } parking_lot = "0.12.0" +hickory-resolver = { version = "0.24.0", default-features = false, features = ["system-config"] } smallvec = "1.11.1" tracing = "0.1.37" -trust-dns-resolver = { version = "0.23", default-features = false, features = ["system-config"] } [dev-dependencies] libp2p-identity = { workspace = true, features = ["rand"] } @@ -29,12 +29,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] } [features] async-std = ["async-std-resolver"] -tokio = ["trust-dns-resolver/tokio-runtime"] -# The `tokio-` prefix and feature dependency is just to be explicit, -# since these features of `trust-dns-resolver` are currently only -# available for `tokio`. -tokio-dns-over-rustls = ["tokio", "trust-dns-resolver/dns-over-rustls"] -tokio-dns-over-https-rustls = ["tokio", "trust-dns-resolver/dns-over-https-rustls"] +tokio = ["hickory-resolver/tokio-runtime"] # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/dns/src/lib.rs b/transports/dns/src/lib.rs index 13ad93952c9..483d50be15b 100644 --- a/transports/dns/src/lib.rs +++ b/transports/dns/src/lib.rs @@ -60,12 +60,12 @@ #[cfg(feature = "async-std")] pub mod async_std { use async_std_resolver::AsyncStdResolver; - use parking_lot::Mutex; - use std::{io, sync::Arc}; - use trust_dns_resolver::{ + use hickory_resolver::{ config::{ResolverConfig, ResolverOpts}, system_conf, }; + use parking_lot::Mutex; + use std::{io, sync::Arc}; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `async-std` for all async I/O. @@ -90,9 +90,9 @@ pub mod async_std { #[cfg(feature = "tokio")] pub mod tokio { + use hickory_resolver::{system_conf, TokioAsyncResolver}; use parking_lot::Mutex; use std::sync::Arc; - use trust_dns_resolver::{system_conf, TokioAsyncResolver}; /// A `Transport` wrapper for performing DNS lookups when dialing `Multiaddr`esses /// using `tokio` for all async I/O. @@ -109,8 +109,8 @@ pub mod tokio { /// and options. pub fn custom( inner: T, - cfg: trust_dns_resolver::config::ResolverConfig, - opts: trust_dns_resolver::config::ResolverOpts, + cfg: hickory_resolver::config::ResolverConfig, + opts: hickory_resolver::config::ResolverOpts, ) -> Transport { Transport { inner: Arc::new(Mutex::new(inner)), @@ -141,12 +141,12 @@ use std::{ task::{Context, Poll}, }; -pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts}; -pub use trust_dns_resolver::error::{ResolveError, ResolveErrorKind}; -use trust_dns_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; -use trust_dns_resolver::lookup_ip::LookupIp; -use trust_dns_resolver::name_server::ConnectionProvider; -use trust_dns_resolver::AsyncResolver; +pub use hickory_resolver::config::{ResolverConfig, ResolverOpts}; +pub use hickory_resolver::error::{ResolveError, ResolveErrorKind}; +use hickory_resolver::lookup::{Ipv4Lookup, Ipv6Lookup, TxtLookup}; +use hickory_resolver::lookup_ip::LookupIp; +use hickory_resolver::name_server::ConnectionProvider; +use hickory_resolver::AsyncResolver; /// The prefix for `dnsaddr` protocol TXT record lookups. const DNSADDR_PREFIX: &str = "_dnsaddr."; From 0ef6feb397ede286dcd8c9831dc41fb288426885 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 3 Nov 2023 10:18:44 +1100 Subject: [PATCH 23/33] feat(swarm): don't have `ConnectionHandler`s close connections This PR implements the long-awaited design of disallowing `ConnectionHandler`s to close entire connections. Instead, users should close connections via `ToSwarm::CloseConnection` from a `NetworkBehaviour` or - even better - from the `Swarm` via `close_connection`. A `NetworkBehaviour` also does not have a "full" view onto how a connection is used but at least it can correlate whether it created the connection via the `ConnectionId`. In general, the more modular and friendly approach is to stop "using" a connection if a particular protocol no longer needs it. As a result of the keep-alive algorithm, such a connection is then closed automatically. Depends-on: #4745. Depends-on: #4718. Depends-on: #4749. Related: #3353. Related: #4714. Resolves: #3591. Pull-Request: #4755. --- examples/file-sharing/src/network.rs | 7 +-- misc/metrics/src/identify.rs | 4 +- misc/metrics/src/lib.rs | 4 +- misc/metrics/src/swarm.rs | 10 ++-- protocols/dcutr/src/handler/relayed.rs | 10 +--- protocols/gossipsub/src/handler.rs | 10 +--- protocols/identify/src/handler.rs | 8 +-- protocols/kad/src/handler.rs | 12 ++--- protocols/perf/src/client/handler.rs | 10 +--- protocols/perf/src/server/handler.rs | 8 +-- protocols/ping/src/handler.rs | 11 +--- protocols/relay/src/behaviour/handler.rs | 9 +--- protocols/relay/src/priv_client/handler.rs | 9 +--- protocols/request-response/src/handler.rs | 4 +- swarm-test/src/lib.rs | 30 ++++------- swarm/CHANGELOG.md | 3 ++ swarm/src/behaviour.rs | 15 +++--- swarm/src/behaviour/toggle.rs | 8 +-- swarm/src/connection.rs | 15 ++---- swarm/src/connection/error.rs | 19 ++----- swarm/src/connection/pool.rs | 10 ++-- swarm/src/connection/pool/task.rs | 8 +-- swarm/src/dummy.rs | 8 +-- swarm/src/handler.rs | 58 +++------------------- swarm/src/handler/either.rs | 10 +--- swarm/src/handler/map_in.rs | 8 +-- swarm/src/handler/map_out.rs | 9 +--- swarm/src/handler/multi.rs | 8 +-- swarm/src/handler/one_shot.rs | 8 +-- swarm/src/handler/pending.rs | 8 +-- swarm/src/handler/select.rs | 14 +----- swarm/src/lib.rs | 25 ++++------ swarm/tests/connection_close.rs | 8 +-- 33 files changed, 85 insertions(+), 303 deletions(-) diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index 2ea16ef180c..ad5418193a4 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -1,5 +1,3 @@ -use async_std::io; -use either::Either; use futures::channel::{mpsc, oneshot}; use futures::prelude::*; @@ -208,10 +206,7 @@ impl EventLoop { } } - async fn handle_event( - &mut self, - event: SwarmEvent>, - ) { + async fn handle_event(&mut self, event: SwarmEvent) { match event { SwarmEvent::Behaviour(BehaviourEvent::Kademlia( kad::Event::OutboundQueryProgressed { diff --git a/misc/metrics/src/identify.rs b/misc/metrics/src/identify.rs index 4dac6ea6774..b1d4e9f0c89 100644 --- a/misc/metrics/src/identify.rs +++ b/misc/metrics/src/identify.rs @@ -123,8 +123,8 @@ impl super::Recorder for Metrics { } } -impl super::Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { if let libp2p_swarm::SwarmEvent::ConnectionClosed { peer_id, num_established, diff --git a/misc/metrics/src/lib.rs b/misc/metrics/src/lib.rs index 2132dd5d7fb..97968253faa 100644 --- a/misc/metrics/src/lib.rs +++ b/misc/metrics/src/lib.rs @@ -138,8 +138,8 @@ impl Recorder for Metrics { } } -impl Recorder> for Metrics { - fn record(&self, event: &libp2p_swarm::SwarmEvent) { +impl Recorder> for Metrics { + fn record(&self, event: &libp2p_swarm::SwarmEvent) { self.swarm.record(event); #[cfg(feature = "identify")] diff --git a/misc/metrics/src/swarm.rs b/misc/metrics/src/swarm.rs index fff28e5f639..ad83401f316 100644 --- a/misc/metrics/src/swarm.rs +++ b/misc/metrics/src/swarm.rs @@ -185,8 +185,8 @@ impl Metrics { } } -impl super::Recorder> for Metrics { - fn record(&self, event: &SwarmEvent) { +impl super::Recorder> for Metrics { + fn record(&self, event: &SwarmEvent) { match event { SwarmEvent::Behaviour(_) => {} SwarmEvent::ConnectionEstablished { @@ -359,15 +359,13 @@ struct ConnectionClosedLabels { enum ConnectionError { Io, KeepAliveTimeout, - Handler, } -impl From<&libp2p_swarm::ConnectionError> for ConnectionError { - fn from(value: &libp2p_swarm::ConnectionError) -> Self { +impl From<&libp2p_swarm::ConnectionError> for ConnectionError { + fn from(value: &libp2p_swarm::ConnectionError) -> Self { match value { libp2p_swarm::ConnectionError::IO(_) => ConnectionError::Io, libp2p_swarm::ConnectionError::KeepAliveTimeout => ConnectionError::KeepAliveTimeout, - libp2p_swarm::ConnectionError::Handler(_) => ConnectionError::Handler, } } } diff --git a/protocols/dcutr/src/handler/relayed.rs b/protocols/dcutr/src/handler/relayed.rs index 4d9bf3c910d..eba58f89313 100644 --- a/protocols/dcutr/src/handler/relayed.rs +++ b/protocols/dcutr/src/handler/relayed.rs @@ -40,7 +40,6 @@ use std::collections::VecDeque; use std::io; use std::task::{Context, Poll}; use std::time::Duration; -use void::Void; #[derive(Debug)] pub enum Command { @@ -63,7 +62,6 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, @@ -182,7 +180,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = Command; type ToBehaviour = Event; - type Error = Void; type InboundProtocol = Either, DeniedUpgrade>; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -229,12 +226,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { // Return queued events. if let Some(event) = self.queued_events.pop_front() { diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index 55480384ffa..63ef96781d9 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -38,7 +38,6 @@ use std::{ pin::Pin, task::{Context, Poll}, }; -use void::Void; /// The event emitted by the Handler. This informs the behaviour of various events created /// by the handler. @@ -220,7 +219,6 @@ impl EnabledHandler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, > { if !self.peer_kind_sent { @@ -391,7 +389,6 @@ impl EnabledHandler { impl ConnectionHandler for Handler { type FromBehaviour = HandlerIn; type ToBehaviour = HandlerEvent; - type Error = Void; type InboundOpenInfo = (); type InboundProtocol = either::Either; type OutboundOpenInfo = (); @@ -434,12 +431,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { match self { Handler::Enabled(handler) => handler.poll(cx), diff --git a/protocols/identify/src/handler.rs b/protocols/identify/src/handler.rs index 5012868c17a..f9b77e0b63a 100644 --- a/protocols/identify/src/handler.rs +++ b/protocols/identify/src/handler.rs @@ -38,7 +38,7 @@ use libp2p_swarm::{ }; use smallvec::SmallVec; use std::collections::HashSet; -use std::{io, task::Context, task::Poll, time::Duration}; +use std::{task::Context, task::Poll, time::Duration}; use tracing::Level; const STREAM_TIMEOUT: Duration = Duration::from_secs(60); @@ -57,7 +57,6 @@ pub struct Handler { Either, ReadyUpgrade>, (), Event, - io::Error, >; 4], >, @@ -282,7 +281,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = InEvent; type ToBehaviour = Event; - type Error = io::Error; type InboundProtocol = SelectUpgrade, ReadyUpgrade>; type OutboundProtocol = Either, ReadyUpgrade>; @@ -320,9 +318,7 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { + ) -> Poll> { if let Some(event) = self.events.pop() { return Poll::Ready(event); } diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index 21dad8a82b9..adfb076541c 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -597,7 +597,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = HandlerIn; type ToBehaviour = HandlerEvent; - type Error = io::Error; // TODO: better error type? type InboundProtocol = Either; type OutboundProtocol = ProtocolConfig; type OutboundOpenInfo = (); @@ -711,12 +710,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { match &mut self.protocol_status { Some(status) if !status.reported => { @@ -846,7 +840,7 @@ impl Handler { } impl futures::Stream for OutboundSubstreamState { - type Item = ConnectionHandlerEvent; + type Item = ConnectionHandlerEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -978,7 +972,7 @@ impl futures::Stream for OutboundSubstreamState { } impl futures::Stream for InboundSubstreamState { - type Item = ConnectionHandlerEvent; + type Item = ConnectionHandlerEvent; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/protocols/perf/src/client/handler.rs b/protocols/perf/src/client/handler.rs index 0cc2dd23abe..2a2c5499fc2 100644 --- a/protocols/perf/src/client/handler.rs +++ b/protocols/perf/src/client/handler.rs @@ -35,7 +35,6 @@ use libp2p_swarm::{ }, ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, SubstreamProtocol, }; -use void::Void; use crate::client::{RunError, RunId}; use crate::{RunParams, RunUpdate}; @@ -59,7 +58,6 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, @@ -87,7 +85,6 @@ impl Default for Handler { impl ConnectionHandler for Handler { type FromBehaviour = Command; type ToBehaviour = Event; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -159,12 +156,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { if let Some(event) = self.queued_events.pop_front() { return Poll::Ready(event); diff --git a/protocols/perf/src/server/handler.rs b/protocols/perf/src/server/handler.rs index ed42162cb7e..ddfe8f881e5 100644 --- a/protocols/perf/src/server/handler.rs +++ b/protocols/perf/src/server/handler.rs @@ -63,7 +63,6 @@ impl Default for Handler { impl ConnectionHandler for Handler { type FromBehaviour = Void; type ToBehaviour = Event; - type Error = Void; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = DeniedUpgrade; type OutboundOpenInfo = Void; @@ -121,12 +120,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { loop { match self.inbound.poll_unpin(cx) { diff --git a/protocols/ping/src/handler.rs b/protocols/ping/src/handler.rs index 3ee6bfdf5d6..5e6fc2cd2cf 100644 --- a/protocols/ping/src/handler.rs +++ b/protocols/ping/src/handler.rs @@ -209,7 +209,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = Void; type ToBehaviour = Result; - type Error = Void; type InboundProtocol = ReadyUpgrade; type OutboundProtocol = ReadyUpgrade; type OutboundOpenInfo = (); @@ -225,14 +224,8 @@ impl ConnectionHandler for Handler { fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent< - ReadyUpgrade, - (), - Result, - Self::Error, - >, - > { + ) -> Poll, (), Result>> + { match self.state { State::Inactive { reported: true } => { return Poll::Pending; // nothing to do on this connection diff --git a/protocols/relay/src/behaviour/handler.rs b/protocols/relay/src/behaviour/handler.rs index 4e729b1993e..958c6a9b906 100644 --- a/protocols/relay/src/behaviour/handler.rs +++ b/protocols/relay/src/behaviour/handler.rs @@ -339,7 +339,6 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, @@ -482,7 +481,6 @@ type Futures = FuturesUnordered>; impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; - type Error = void::Void; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); type OutboundProtocol = ReadyUpgrade; @@ -593,12 +591,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { // Return queued events. if let Some(event) = self.queued_events.pop_front() { diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index f30f24a949b..1d24493be77 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -101,7 +101,6 @@ pub struct Handler { ::OutboundProtocol, ::OutboundOpenInfo, ::ToBehaviour, - ::Error, >, >, @@ -230,7 +229,6 @@ impl Handler { impl ConnectionHandler for Handler { type FromBehaviour = In; type ToBehaviour = Event; - type Error = void::Void; type InboundProtocol = ReadyUpgrade; type InboundOpenInfo = (); type OutboundProtocol = ReadyUpgrade; @@ -275,12 +273,7 @@ impl ConnectionHandler for Handler { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { loop { debug_assert_eq!( diff --git a/protocols/request-response/src/handler.rs b/protocols/request-response/src/handler.rs index 9ccbc49fc4b..2d45e0d7dc3 100644 --- a/protocols/request-response/src/handler.rs +++ b/protocols/request-response/src/handler.rs @@ -367,7 +367,6 @@ where { type FromBehaviour = OutboundMessage; type ToBehaviour = Event; - type Error = void::Void; type InboundProtocol = Protocol; type OutboundProtocol = Protocol; type OutboundOpenInfo = (); @@ -390,8 +389,7 @@ where fn poll( &mut self, cx: &mut Context<'_>, - ) -> Poll, (), Self::ToBehaviour, Self::Error>> - { + ) -> Poll, (), Self::ToBehaviour>> { match self.worker_streams.poll_unpin(cx) { Poll::Ready((_, Ok(Ok(event)))) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); diff --git a/swarm-test/src/lib.rs b/swarm-test/src/lib.rs index ee4058d530d..48f5bcbf4ef 100644 --- a/swarm-test/src/lib.rs +++ b/swarm-test/src/lib.rs @@ -27,9 +27,7 @@ use libp2p_core::{ use libp2p_identity::{Keypair, PeerId}; use libp2p_plaintext as plaintext; use libp2p_swarm::dial_opts::PeerCondition; -use libp2p_swarm::{ - self as swarm, dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent, THandlerErr, -}; +use libp2p_swarm::{self as swarm, dial_opts::DialOpts, NetworkBehaviour, Swarm, SwarmEvent}; use libp2p_yamux as yamux; use std::fmt::Debug; use std::future::IntoFuture; @@ -70,9 +68,7 @@ pub trait SwarmExt { /// Wait for specified condition to return `Some`. async fn wait(&mut self, predicate: P) -> E where - P: Fn( - SwarmEvent<::ToSwarm, THandlerErr>, - ) -> Option, + P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send; /// Listens for incoming connections, polling the [`Swarm`] until the transport is ready to accept connections. @@ -83,9 +79,7 @@ pub trait SwarmExt { /// Returns the next [`SwarmEvent`] or times out after 10 seconds. /// /// If the 10s timeout does not fit your usecase, please fall back to `StreamExt::next`. - async fn next_swarm_event( - &mut self, - ) -> SwarmEvent<::ToSwarm, THandlerErr>; + async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm>; /// Returns the next behaviour event or times out after 10 seconds. /// @@ -142,8 +136,8 @@ where TBehaviour2::ToSwarm: Debug, TBehaviour1: NetworkBehaviour + Send, TBehaviour1::ToSwarm: Debug, - SwarmEvent>: TryIntoOutput, - SwarmEvent>: TryIntoOutput, + SwarmEvent: TryIntoOutput, + SwarmEvent: TryIntoOutput, Out1: Debug, Out2: Debug, { @@ -185,15 +179,15 @@ pub trait TryIntoOutput: Sized { fn try_into_output(self) -> Result; } -impl TryIntoOutput for SwarmEvent { +impl TryIntoOutput for SwarmEvent { fn try_into_output(self) -> Result { self.try_into_behaviour_event() } } -impl TryIntoOutput> - for SwarmEvent +impl TryIntoOutput> + for SwarmEvent { - fn try_into_output(self) -> Result, Self> { + fn try_into_output(self) -> Result, Self> { Ok(self) } } @@ -295,7 +289,7 @@ where async fn wait(&mut self, predicate: P) -> E where - P: Fn(SwarmEvent<::ToSwarm, THandlerErr>) -> Option, + P: Fn(SwarmEvent<::ToSwarm>) -> Option, P: Send, { loop { @@ -314,9 +308,7 @@ where } } - async fn next_swarm_event( - &mut self, - ) -> SwarmEvent<::ToSwarm, THandlerErr> { + async fn next_swarm_event(&mut self) -> SwarmEvent<::ToSwarm> { match futures::future::select( futures_timer::Delay::new(Duration::from_secs(10)), self.select_next_some(), diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 717674c9dd1..692ce9d1bde 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -7,6 +7,9 @@ See [PR 4076](https://github.com/libp2p/rust-libp2p/pull/4076). - Remove deprecated `PollParameters` from `NetworkBehaviour::poll` function. See [PR 4490](https://github.com/libp2p/rust-libp2p/pull/4490). +- Remove deprecated `ConnectionHandlerEvent::Close` and `ConnectionHandler::Error`. + `ConnectionHandler`s should not close connections directly as the connection might still be in use by other handlers. + See [PR 4755](https://github.com/libp2p/rust-libp2p/pull/4755). - Add `PeerCondition::DisconnectedAndNotDialing` variant, combining pre-existing conditions. This is the new default. A new dialing attempt is iniated _only if_ the peer is both considered disconnected and there is currently no ongoing dialing attempt. diff --git a/swarm/src/behaviour.rs b/swarm/src/behaviour.rs index 7fbb72b9260..c25b14e75e3 100644 --- a/swarm/src/behaviour.rs +++ b/swarm/src/behaviour.rs @@ -290,16 +290,13 @@ pub enum ToSwarm { /// This address will be shared with all [`NetworkBehaviour`]s via [`FromSwarm::ExternalAddrExpired`]. ExternalAddrExpired(Multiaddr), - /// Instructs the `Swarm` to initiate a graceful close of one or all connections - /// with the given peer. + /// Instructs the `Swarm` to initiate a graceful close of one or all connections with the given peer. /// - /// Note: Closing a connection via - /// [`ToSwarm::CloseConnection`] does not inform the - /// corresponding [`ConnectionHandler`]. - /// Closing a connection via a [`ConnectionHandler`] can be done - /// either in a collaborative manner across [`ConnectionHandler`]s - /// with [`ConnectionHandler::connection_keep_alive`] or directly with - /// [`ConnectionHandlerEvent::Close`](crate::ConnectionHandlerEvent::Close). + /// Closing a connection via [`ToSwarm::CloseConnection`] will poll [`ConnectionHandler::poll_close`] to completion. + /// In most cases, stopping to "use" a connection is enough to have it closed. + /// The keep-alive algorithm will close a connection automatically once all [`ConnectionHandler`]s are idle. + /// + /// Use this command if you want to close a connection _despite_ it still being in use by one or more handlers. CloseConnection { /// The peer to disconnect. peer_id: PeerId, diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index e1da71a0450..5c23ee099a3 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -264,7 +264,6 @@ where { type FromBehaviour = TInner::FromBehaviour; type ToBehaviour = TInner::ToBehaviour; - type Error = TInner::Error; type InboundProtocol = Either, SendWrapper>; type OutboundProtocol = TInner::OutboundProtocol; type OutboundOpenInfo = TInner::OutboundOpenInfo; @@ -299,12 +298,7 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { if let Some(inner) = self.inner.as_mut() { inner.poll(cx) diff --git a/swarm/src/connection.rs b/swarm/src/connection.rs index 35cc71d5354..15c49bb7bd5 100644 --- a/swarm/src/connection.rs +++ b/swarm/src/connection.rs @@ -238,7 +238,7 @@ where pub(crate) fn poll( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, ConnectionError>> { + ) -> Poll, ConnectionError>> { let Self { requested_substreams, muxing, @@ -283,9 +283,6 @@ where Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { return Poll::Ready(Ok(Event::Handler(event))); } - Poll::Ready(ConnectionHandlerEvent::Close(err)) => { - return Poll::Ready(Err(ConnectionError::Handler(err))); - } Poll::Ready(ConnectionHandlerEvent::ReportRemoteProtocols( ProtocolSupport::Added(protocols), )) => { @@ -452,9 +449,7 @@ where } #[cfg(test)] - fn poll_noop_waker( - &mut self, - ) -> Poll, ConnectionError>> { + fn poll_noop_waker(&mut self) -> Poll, ConnectionError>> { Pin::new(self).poll(&mut Context::from_waker(futures::task::noop_waker_ref())) } } @@ -1112,7 +1107,7 @@ mod tests { #[derive(Default)] struct ConfigurableProtocolConnectionHandler { - events: Vec>, + events: Vec>, active_protocols: HashSet, local_added: Vec>, local_removed: Vec>, @@ -1147,7 +1142,6 @@ mod tests { impl ConnectionHandler for MockConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -1203,7 +1197,6 @@ mod tests { Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour, - Self::Error, >, > { if self.outbound_requested { @@ -1221,7 +1214,6 @@ mod tests { impl ConnectionHandler for ConfigurableProtocolConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = ManyProtocolsUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -1280,7 +1272,6 @@ mod tests { Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour, - Self::Error, >, > { if let Some(event) = self.events.pop() { diff --git a/swarm/src/connection/error.rs b/swarm/src/connection/error.rs index 5d5dda57868..33aa81c19a9 100644 --- a/swarm/src/connection/error.rs +++ b/swarm/src/connection/error.rs @@ -25,47 +25,36 @@ use std::{fmt, io}; /// Errors that can occur in the context of an established `Connection`. #[derive(Debug)] -pub enum ConnectionError { +pub enum ConnectionError { /// An I/O error occurred on the connection. // TODO: Eventually this should also be a custom error? IO(io::Error), /// The connection keep-alive timeout expired. KeepAliveTimeout, - - /// The connection handler produced an error. - Handler(THandlerErr), } -impl fmt::Display for ConnectionError -where - THandlerErr: fmt::Display, -{ +impl fmt::Display for ConnectionError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ConnectionError::IO(err) => write!(f, "Connection error: I/O error: {err}"), ConnectionError::KeepAliveTimeout => { write!(f, "Connection closed due to expired keep-alive timeout.") } - ConnectionError::Handler(err) => write!(f, "Connection error: Handler error: {err}"), } } } -impl std::error::Error for ConnectionError -where - THandlerErr: std::error::Error + 'static, -{ +impl std::error::Error for ConnectionError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { ConnectionError::IO(err) => Some(err), ConnectionError::KeepAliveTimeout => None, - ConnectionError::Handler(err) => Some(err), } } } -impl From for ConnectionError { +impl From for ConnectionError { fn from(error: io::Error) -> Self { ConnectionError::IO(error) } diff --git a/swarm/src/connection/pool.rs b/swarm/src/connection/pool.rs index cfa3fb7ea3c..9bcd1b446d3 100644 --- a/swarm/src/connection/pool.rs +++ b/swarm/src/connection/pool.rs @@ -132,7 +132,7 @@ where /// Receivers for events reported from established connections. established_connection_events: - SelectAll>>, + SelectAll>>, /// Receivers for [`NewConnection`] objects that are dropped. new_connection_dropped_listeners: FuturesUnordered>, @@ -226,7 +226,7 @@ impl fmt::Debug for Pool { /// Event that can happen on the `Pool`. #[derive(Debug)] -pub(crate) enum PoolEvent { +pub(crate) enum PoolEvent { /// A new connection has been established. ConnectionEstablished { id: ConnectionId, @@ -258,7 +258,7 @@ pub(crate) enum PoolEvent { connected: Connected, /// The error that occurred, if any. If `None`, the connection /// was closed by the local peer. - error: Option>, + error: Option, /// The remaining established connections to the same peer. remaining_established_connection_ids: Vec, }, @@ -290,7 +290,7 @@ pub(crate) enum PoolEvent { id: ConnectionId, peer_id: PeerId, /// The produced event. - event: THandler::ToBehaviour, + event: ToBehaviour, }, /// The connection to a node has changed its address. @@ -548,7 +548,7 @@ where /// Polls the connection pool for events. #[tracing::instrument(level = "debug", name = "Pool::poll", skip(self, cx))] - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> where THandler: ConnectionHandler + 'static, ::OutboundOpenInfo: Send, diff --git a/swarm/src/connection/pool/task.rs b/swarm/src/connection/pool/task.rs index f2c6928cd27..08674fd2ee5 100644 --- a/swarm/src/connection/pool/task.rs +++ b/swarm/src/connection/pool/task.rs @@ -66,7 +66,7 @@ pub(crate) enum PendingConnectionEvent { } #[derive(Debug)] -pub(crate) enum EstablishedConnectionEvent { +pub(crate) enum EstablishedConnectionEvent { /// A node we are connected to has changed its address. AddressChange { id: ConnectionId, @@ -77,7 +77,7 @@ pub(crate) enum EstablishedConnectionEvent { Notify { id: ConnectionId, peer_id: PeerId, - event: THandler::ToBehaviour, + event: ToBehaviour, }, /// A connection closed, possibly due to an error. /// @@ -86,7 +86,7 @@ pub(crate) enum EstablishedConnectionEvent { Closed { id: ConnectionId, peer_id: PeerId, - error: Option>, + error: Option, }, } @@ -171,7 +171,7 @@ pub(crate) async fn new_for_established_connection( peer_id: PeerId, mut connection: crate::connection::Connection, mut command_receiver: mpsc::Receiver>, - mut events: mpsc::Sender>, + mut events: mpsc::Sender>, ) where THandler: ConnectionHandler, { diff --git a/swarm/src/dummy.rs b/swarm/src/dummy.rs index c3e9c22a422..86df676443b 100644 --- a/swarm/src/dummy.rs +++ b/swarm/src/dummy.rs @@ -64,7 +64,6 @@ pub struct ConnectionHandler; impl crate::handler::ConnectionHandler for ConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -82,12 +81,7 @@ impl crate::handler::ConnectionHandler for ConnectionHandler { &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index f0a46129250..be0ca67ab48 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -102,8 +102,6 @@ pub trait ConnectionHandler: Send + 'static { type FromBehaviour: fmt::Debug + Send + 'static; /// A type representing message(s) a [`ConnectionHandler`] can send to a [`NetworkBehaviour`](crate::behaviour::NetworkBehaviour) via [`ConnectionHandlerEvent::NotifyBehaviour`]. type ToBehaviour: fmt::Debug + Send + 'static; - /// The type of errors returned by [`ConnectionHandler::poll`]. - type Error: error::Error + fmt::Debug + Send + 'static; /// The inbound upgrade for the protocol(s) used by the handler. type InboundProtocol: InboundUpgradeSend; /// The outbound upgrade for the protocol(s) used by the handler. @@ -149,12 +147,7 @@ pub trait ConnectionHandler: Send + 'static { &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, >; /// Gracefully close the [`ConnectionHandler`]. @@ -541,21 +534,12 @@ impl SubstreamProtocol { /// Event produced by a handler. #[derive(Debug, Clone, PartialEq, Eq)] #[non_exhaustive] -pub enum ConnectionHandlerEvent { +pub enum ConnectionHandlerEvent { /// Request a new outbound substream to be opened with the remote. OutboundSubstreamRequest { /// The protocol(s) to apply on the substream. protocol: SubstreamProtocol, }, - - /// Close the connection for the given reason. - /// - /// Note this will affect all [`ConnectionHandler`]s handling this - /// connection, in other words it will close the connection for all - /// [`ConnectionHandler`]s. To signal that one has no more need for the - /// connection, while allowing other [`ConnectionHandler`]s to continue using - /// the connection, return false in [`ConnectionHandler::connection_keep_alive`]. - Close(TErr), /// We learned something about the protocols supported by the remote. ReportRemoteProtocols(ProtocolSupport), @@ -572,15 +556,15 @@ pub enum ProtocolSupport { } /// Event produced by a handler. -impl - ConnectionHandlerEvent +impl + ConnectionHandlerEvent { /// If this is an `OutboundSubstreamRequest`, maps the `info` member from a /// `TOutboundOpenInfo` to something else. pub fn map_outbound_open_info( self, map: F, - ) -> ConnectionHandlerEvent + ) -> ConnectionHandlerEvent where F: FnOnce(TOutboundOpenInfo) -> I, { @@ -593,7 +577,6 @@ impl ConnectionHandlerEvent::NotifyBehaviour(val) => { ConnectionHandlerEvent::NotifyBehaviour(val) } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), ConnectionHandlerEvent::ReportRemoteProtocols(support) => { ConnectionHandlerEvent::ReportRemoteProtocols(support) } @@ -602,10 +585,7 @@ impl /// If this is an `OutboundSubstreamRequest`, maps the protocol (`TConnectionUpgrade`) /// to something else. - pub fn map_protocol( - self, - map: F, - ) -> ConnectionHandlerEvent + pub fn map_protocol(self, map: F) -> ConnectionHandlerEvent where F: FnOnce(TConnectionUpgrade) -> I, { @@ -618,7 +598,6 @@ impl ConnectionHandlerEvent::NotifyBehaviour(val) => { ConnectionHandlerEvent::NotifyBehaviour(val) } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), ConnectionHandlerEvent::ReportRemoteProtocols(support) => { ConnectionHandlerEvent::ReportRemoteProtocols(support) } @@ -629,7 +608,7 @@ impl pub fn map_custom( self, map: F, - ) -> ConnectionHandlerEvent + ) -> ConnectionHandlerEvent where F: FnOnce(TCustom) -> I, { @@ -640,29 +619,6 @@ impl ConnectionHandlerEvent::NotifyBehaviour(val) => { ConnectionHandlerEvent::NotifyBehaviour(map(val)) } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(val), - ConnectionHandlerEvent::ReportRemoteProtocols(support) => { - ConnectionHandlerEvent::ReportRemoteProtocols(support) - } - } - } - - /// If this is a `Close` event, maps the content to something else. - pub fn map_close( - self, - map: F, - ) -> ConnectionHandlerEvent - where - F: FnOnce(TErr) -> I, - { - match self { - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { - ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } - } - ConnectionHandlerEvent::NotifyBehaviour(val) => { - ConnectionHandlerEvent::NotifyBehaviour(val) - } - ConnectionHandlerEvent::Close(val) => ConnectionHandlerEvent::Close(map(val)), ConnectionHandlerEvent::ReportRemoteProtocols(support) => { ConnectionHandlerEvent::ReportRemoteProtocols(support) } diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index 093900135b8..b48b7cdcb15 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -80,7 +80,6 @@ where { type FromBehaviour = Either; type ToBehaviour = Either; - type Error = Either; type InboundProtocol = Either, SendWrapper>; type OutboundProtocol = Either, SendWrapper>; @@ -119,22 +118,15 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { let event = match self { Either::Left(handler) => futures::ready!(handler.poll(cx)) .map_custom(Either::Left) - .map_close(Either::Left) .map_protocol(|p| Either::Left(SendWrapper(p))) .map_outbound_open_info(Either::Left), Either::Right(handler) => futures::ready!(handler.poll(cx)) .map_custom(Either::Right) - .map_close(Either::Right) .map_protocol(|p| Either::Right(SendWrapper(p))) .map_outbound_open_info(Either::Right), }; diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index e3458eb5451..bd45eee4d97 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -52,7 +52,6 @@ where { type FromBehaviour = TNewIn; type ToBehaviour = TConnectionHandler::ToBehaviour; - type Error = TConnectionHandler::Error; type InboundProtocol = TConnectionHandler::InboundProtocol; type OutboundProtocol = TConnectionHandler::OutboundProtocol; type InboundOpenInfo = TConnectionHandler::InboundOpenInfo; @@ -76,12 +75,7 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { self.inner.poll(cx) } diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index cc06a4c50c8..8ef8bad61b3 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -47,7 +47,6 @@ where { type FromBehaviour = TConnectionHandler::FromBehaviour; type ToBehaviour = TNewOut; - type Error = TConnectionHandler::Error; type InboundProtocol = TConnectionHandler::InboundProtocol; type OutboundProtocol = TConnectionHandler::OutboundProtocol; type InboundOpenInfo = TConnectionHandler::InboundOpenInfo; @@ -69,18 +68,12 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { self.inner.poll(cx).map(|ev| match ev { ConnectionHandlerEvent::NotifyBehaviour(ev) => { ConnectionHandlerEvent::NotifyBehaviour((self.map)(ev)) } - ConnectionHandlerEvent::Close(err) => ConnectionHandlerEvent::Close(err), ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } => { ConnectionHandlerEvent::OutboundSubstreamRequest { protocol } } diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index 89d4d36fadc..fc1cd750763 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -111,7 +111,6 @@ where { type FromBehaviour = (K, ::FromBehaviour); type ToBehaviour = (K, ::ToBehaviour); - type Error = ::Error; type InboundProtocol = Upgrade::InboundProtocol>; type OutboundProtocol = ::OutboundProtocol; type InboundOpenInfo = Info::InboundOpenInfo>; @@ -241,12 +240,7 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { // Calling `gen_range(0, 0)` (see below) would panic, so we have return early to avoid // that situation. diff --git a/swarm/src/handler/one_shot.rs b/swarm/src/handler/one_shot.rs index a611bc5073c..b1fc41e9098 100644 --- a/swarm/src/handler/one_shot.rs +++ b/swarm/src/handler/one_shot.rs @@ -115,7 +115,6 @@ where { type FromBehaviour = TOutbound; type ToBehaviour = Result>; - type Error = void::Void; type InboundProtocol = TInbound; type OutboundProtocol = TOutbound; type OutboundOpenInfo = (); @@ -133,12 +132,7 @@ where &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { if !self.events_out.is_empty() { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( diff --git a/swarm/src/handler/pending.rs b/swarm/src/handler/pending.rs index 90e6522404e..23b9adcfd90 100644 --- a/swarm/src/handler/pending.rs +++ b/swarm/src/handler/pending.rs @@ -42,7 +42,6 @@ impl PendingConnectionHandler { impl ConnectionHandler for PendingConnectionHandler { type FromBehaviour = Void; type ToBehaviour = Void; - type Error = Void; type InboundProtocol = PendingUpgrade; type OutboundProtocol = PendingUpgrade; type OutboundOpenInfo = Void; @@ -60,12 +59,7 @@ impl ConnectionHandler for PendingConnectionHandler { &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index 957ba43fbe7..fc470ff803e 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -181,7 +181,6 @@ where { type FromBehaviour = Either; type ToBehaviour = Either; - type Error = Either; type InboundProtocol = SelectUpgrade< SendWrapper<::InboundProtocol>, SendWrapper<::InboundProtocol>, @@ -219,20 +218,12 @@ where &mut self, cx: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { match self.proto1.poll(cx) { Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)) => { return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Either::Left(event))); } - Poll::Ready(ConnectionHandlerEvent::Close(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Close(Either::Left(event))); - } Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: protocol @@ -252,9 +243,6 @@ where event, ))); } - Poll::Ready(ConnectionHandlerEvent::Close(event)) => { - return Poll::Ready(ConnectionHandlerEvent::Close(Either::Right(event))); - } Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol }) => { return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { protocol: protocol diff --git a/swarm/src/lib.rs b/swarm/src/lib.rs index 1aa6bfb30cb..462dc718b86 100644 --- a/swarm/src/lib.rs +++ b/swarm/src/lib.rs @@ -167,13 +167,10 @@ pub type THandlerInEvent = as ConnectionHandle /// Custom event that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. pub type THandlerOutEvent = as ConnectionHandler>::ToBehaviour; -/// Custom error that can be produced by the [`ConnectionHandler`] of the [`NetworkBehaviour`]. -pub type THandlerErr = as ConnectionHandler>::Error; - /// Event generated by the `Swarm`. #[derive(Debug)] #[non_exhaustive] -pub enum SwarmEvent { +pub enum SwarmEvent { /// Event generated by the `NetworkBehaviour`. Behaviour(TBehaviourOutEvent), /// A connection to the given peer has been opened. @@ -207,7 +204,7 @@ pub enum SwarmEvent { num_established: u32, /// Reason for the disconnection, if it was not a successful /// active close. - cause: Option>, + cause: Option, }, /// A new connection arrived on a listener and is in the process of protocol negotiation. /// @@ -304,7 +301,7 @@ pub enum SwarmEvent { ExternalAddrExpired { address: Multiaddr }, } -impl SwarmEvent { +impl SwarmEvent { /// Extract the `TBehaviourOutEvent` from this [`SwarmEvent`] in case it is the `Behaviour` variant, otherwise fail. #[allow(clippy::result_large_err)] pub fn try_into_behaviour_event(self) -> Result { @@ -349,7 +346,7 @@ where /// can be polled again. pending_handler_event: Option<(PeerId, PendingNotifyHandler, THandlerInEvent)>, - pending_swarm_events: VecDeque>>, + pending_swarm_events: VecDeque>, } impl Unpin for Swarm where TBehaviour: NetworkBehaviour {} @@ -631,12 +628,8 @@ where /// /// Returns `Ok(())` if there was one or more established connections to the peer. /// - /// Note: Closing a connection via [`Swarm::disconnect_peer_id`] does - /// not inform the corresponding [`ConnectionHandler`]. - /// Closing a connection via a [`ConnectionHandler`] can be done either in a - /// collaborative manner across [`ConnectionHandler`]s - /// with [`ConnectionHandler::connection_keep_alive`] or directly with - /// [`ConnectionHandlerEvent::Close`]. + /// Closing a connection via [`Swarm::disconnect_peer_id`] will poll [`ConnectionHandler::poll_close`] to completion. + /// Use this function if you want to close a connection _despite_ it still being in use by one or more handlers. #[allow(clippy::result_unit_err)] pub fn disconnect_peer_id(&mut self, peer_id: PeerId) -> Result<(), ()> { let was_connected = self.pool.is_connected(peer_id); @@ -687,7 +680,7 @@ where &mut self.behaviour } - fn handle_pool_event(&mut self, event: PoolEvent>) { + fn handle_pool_event(&mut self, event: PoolEvent>) { match event { PoolEvent::ConnectionEstablished { peer_id, @@ -1194,7 +1187,7 @@ where fn poll_next_event( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll> { // We use a `this` variable because the compiler can't mutably borrow multiple times // across a `Deref`. let this = &mut *self; @@ -1369,7 +1362,7 @@ impl futures::Stream for Swarm where TBehaviour: NetworkBehaviour, { - type Item = SwarmEvent, THandlerErr>; + type Item = SwarmEvent>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.as_mut().poll_next_event(cx).map(Some) diff --git a/swarm/tests/connection_close.rs b/swarm/tests/connection_close.rs index 305e33c1804..4efe8d17e49 100644 --- a/swarm/tests/connection_close.rs +++ b/swarm/tests/connection_close.rs @@ -96,7 +96,6 @@ impl NetworkBehaviour for Behaviour { impl ConnectionHandler for HandlerWithState { type FromBehaviour = Void; type ToBehaviour = u64; - type Error = Void; type InboundProtocol = DeniedUpgrade; type OutboundProtocol = DeniedUpgrade; type InboundOpenInfo = (); @@ -114,12 +113,7 @@ impl ConnectionHandler for HandlerWithState { &mut self, _: &mut Context<'_>, ) -> Poll< - ConnectionHandlerEvent< - Self::OutboundProtocol, - Self::OutboundOpenInfo, - Self::ToBehaviour, - Self::Error, - >, + ConnectionHandlerEvent, > { Poll::Pending } From 32c945ccc05ac60f2daf305b21d78f4534e5500f Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 3 Nov 2023 22:36:51 +1100 Subject: [PATCH 24/33] chore: add changelog entry of backport Related: #4755. Pull-Request: #4795. --- swarm/CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 692ce9d1bde..979f056a425 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -26,6 +26,12 @@ - Remove deprecated symbols. See [PR 4737](https://github.com/libp2p/rust-libp2p/pull/4737). +## 0.43.7 + +- Deprecate `ConnectionHandlerEvent::Close`. + See [issue 3591](https://github.com/libp2p/rust-libp2p/issues/3591) for details. + See [PR 4714](https://github.com/libp2p/rust-libp2p/pull/4714). + ## 0.43.6 - Deprecate `libp2p::swarm::SwarmBuilder`. From 402212cf79668cb77749f947626a41fdfc3011c8 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 5 Nov 2023 03:22:49 +0100 Subject: [PATCH 25/33] fix(swarm): implement `ConnectionHandler::poll_close` for combinators Follow-up to https://github.com/libp2p/rust-libp2p/pull/4076. This is especially relevant since `libp2p-swarm-derive` uses `SelectConnectionHandler`. Pull-Request: #4794. --- swarm/src/behaviour/toggle.rs | 8 ++++++++ swarm/src/handler.rs | 3 +++ swarm/src/handler/either.rs | 9 +++++++++ swarm/src/handler/map_in.rs | 4 ++++ swarm/src/handler/map_out.rs | 9 +++++++++ swarm/src/handler/multi.rs | 13 ++++++++++++- swarm/src/handler/select.rs | 14 +++++++++++++- 7 files changed, 58 insertions(+), 2 deletions(-) diff --git a/swarm/src/behaviour/toggle.rs b/swarm/src/behaviour/toggle.rs index 5c23ee099a3..e81c5343701 100644 --- a/swarm/src/behaviour/toggle.rs +++ b/swarm/src/behaviour/toggle.rs @@ -363,4 +363,12 @@ where } } } + + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let Some(inner) = self.inner.as_mut() else { + return Poll::Ready(None); + }; + + inner.poll_close(cx) + } } diff --git a/swarm/src/handler.rs b/swarm/src/handler.rs index be0ca67ab48..31d2c91e391 100644 --- a/swarm/src/handler.rs +++ b/swarm/src/handler.rs @@ -160,6 +160,9 @@ pub trait ConnectionHandler: Send + 'static { /// We therefore cannot guarantee that performing IO within here will succeed. /// /// To signal completion, [`Poll::Ready(None)`] should be returned. + /// + /// Implementations MUST have a [`fuse`](futures::StreamExt::fuse)-like behaviour. + /// That is, [`Poll::Ready(None)`] MUST be returned on repeated calls to [`ConnectionHandler::poll_close`]. fn poll_close(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(None) } diff --git a/swarm/src/handler/either.rs b/swarm/src/handler/either.rs index b48b7cdcb15..a5aab9b5fee 100644 --- a/swarm/src/handler/either.rs +++ b/swarm/src/handler/either.rs @@ -134,6 +134,15 @@ where Poll::Ready(event) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let event = match self { + Either::Left(handler) => futures::ready!(handler.poll_close(cx)).map(Either::Left), + Either::Right(handler) => futures::ready!(handler.poll_close(cx)).map(Either::Right), + }; + + Poll::Ready(event) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/map_in.rs b/swarm/src/handler/map_in.rs index bd45eee4d97..9316ef4d2ce 100644 --- a/swarm/src/handler/map_in.rs +++ b/swarm/src/handler/map_in.rs @@ -80,6 +80,10 @@ where self.inner.poll(cx) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_close(cx) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/map_out.rs b/swarm/src/handler/map_out.rs index 8ef8bad61b3..f877bfa6f64 100644 --- a/swarm/src/handler/map_out.rs +++ b/swarm/src/handler/map_out.rs @@ -21,6 +21,7 @@ use crate::handler::{ ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, SubstreamProtocol, }; +use futures::ready; use std::fmt::Debug; use std::task::{Context, Poll}; @@ -83,6 +84,14 @@ where }) } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + let Some(e) = ready!(self.inner.poll_close(cx)) else { + return Poll::Ready(None); + }; + + Poll::Ready(Some((self.map)(e))) + } + fn on_connection_event( &mut self, event: ConnectionEvent< diff --git a/swarm/src/handler/multi.rs b/swarm/src/handler/multi.rs index fc1cd750763..0b4549ed733 100644 --- a/swarm/src/handler/multi.rs +++ b/swarm/src/handler/multi.rs @@ -27,7 +27,7 @@ use crate::handler::{ }; use crate::upgrade::{InboundUpgradeSend, OutboundUpgradeSend, UpgradeInfoSend}; use crate::Stream; -use futures::{future::BoxFuture, prelude::*}; +use futures::{future::BoxFuture, prelude::*, ready}; use rand::Rng; use std::{ cmp, @@ -271,6 +271,17 @@ where Poll::Pending } + + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + for (k, h) in self.handlers.iter_mut() { + let Some(e) = ready!(h.poll_close(cx)) else { + continue; + }; + return Poll::Ready(Some((k.clone(), e))); + } + + Poll::Ready(None) + } } /// Split [`MultiHandler`] into parts. diff --git a/swarm/src/handler/select.rs b/swarm/src/handler/select.rs index fc470ff803e..e049252d448 100644 --- a/swarm/src/handler/select.rs +++ b/swarm/src/handler/select.rs @@ -25,7 +25,7 @@ use crate::handler::{ }; use crate::upgrade::SendWrapper; use either::Either; -use futures::future; +use futures::{future, ready}; use libp2p_core::upgrade::SelectUpgrade; use std::{cmp, task::Context, task::Poll}; @@ -259,6 +259,18 @@ where Poll::Pending } + fn poll_close(&mut self, cx: &mut Context<'_>) -> Poll> { + if let Some(e) = ready!(self.proto1.poll_close(cx)) { + return Poll::Ready(Some(Either::Left(e))); + } + + if let Some(e) = ready!(self.proto2.poll_close(cx)) { + return Poll::Ready(Some(Either::Right(e))); + } + + Poll::Ready(None) + } + fn on_connection_event( &mut self, event: ConnectionEvent< From 74e315719cb4f934ba9b7052b8bc5e565dda3599 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sun, 5 Nov 2023 08:45:26 +0100 Subject: [PATCH 26/33] chore: prepare v0.53.0 Pull-Request: #4796. --- core/CHANGELOG.md | 2 +- libp2p/CHANGELOG.md | 2 +- misc/allow-block-list/CHANGELOG.md | 4 ++-- misc/connection-limits/CHANGELOG.md | 2 +- misc/futures-bounded/CHANGELOG.md | 2 +- misc/memory-connection-limits/CHANGELOG.md | 2 +- misc/metrics/CHANGELOG.md | 2 +- muxers/mplex/CHANGELOG.md | 4 ++-- muxers/yamux/CHANGELOG.md | 2 +- protocols/autonat/CHANGELOG.md | 4 ++-- protocols/dcutr/CHANGELOG.md | 6 +++--- protocols/floodsub/CHANGELOG.md | 2 +- protocols/gossipsub/CHANGELOG.md | 2 +- protocols/identify/CHANGELOG.md | 4 ++-- protocols/kad/CHANGELOG.md | 4 ++-- protocols/mdns/CHANGELOG.md | 6 +++--- protocols/perf/CHANGELOG.md | 2 +- protocols/ping/CHANGELOG.md | 2 +- protocols/relay/CHANGELOG.md | 6 +++--- protocols/rendezvous/CHANGELOG.md | 2 +- protocols/request-response/CHANGELOG.md | 2 +- protocols/upnp/CHANGELOG.md | 2 +- swarm-derive/CHANGELOG.md | 2 +- swarm-test/CHANGELOG.md | 4 ++-- swarm/CHANGELOG.md | 2 +- transports/dns/CHANGELOG.md | 4 ++-- transports/noise/CHANGELOG.md | 2 +- transports/plaintext/CHANGELOG.md | 2 +- transports/pnet/CHANGELOG.md | 2 +- transports/quic/CHANGELOG.md | 2 +- transports/tcp/CHANGELOG.md | 2 +- transports/tls/CHANGELOG.md | 2 +- transports/uds/CHANGELOG.md | 4 ++-- transports/websocket-websys/CHANGELOG.md | 2 +- transports/websocket/CHANGELOG.md | 2 +- transports/webtransport-websys/CHANGELOG.md | 2 +- 36 files changed, 50 insertions(+), 50 deletions(-) diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index da6f81c139e..6dda895c70a 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.41.0 - unreleased +## 0.41.0 - Remove blanket-impl of `{In,Out}boundUpgrade` for `{In,Out}boundConnectionUpgrade`. See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index 636d0622256..1c325215778 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.53.0 - unreleased +## 0.53.0 - Raise MSRV to 1.73. See [PR 4692](https://github.com/libp2p/rust-libp2p/pull/4692). diff --git a/misc/allow-block-list/CHANGELOG.md b/misc/allow-block-list/CHANGELOG.md index d9b8e0c6de1..7778e924886 100644 --- a/misc/allow-block-list/CHANGELOG.md +++ b/misc/allow-block-list/CHANGELOG.md @@ -1,7 +1,7 @@ -## 0.3.0 - unreleased +## 0.3.0 -## 0.2.0 +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/misc/connection-limits/CHANGELOG.md b/misc/connection-limits/CHANGELOG.md index 36884867b97..a5b68a6f51b 100644 --- a/misc/connection-limits/CHANGELOG.md +++ b/misc/connection-limits/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.3.0 - unreleased +## 0.3.0 ## 0.2.1 diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md index 9801c9c1498..6e3b720fe4c 100644 --- a/misc/futures-bounded/CHANGELOG.md +++ b/misc/futures-bounded/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.2.1 - unreleased +## 0.2.1 - Add `.len()` getter to `FuturesMap`, `FuturesSet`, `StreamMap` and `StreamSet`. See [PR 4745](https://github.com/libp2p/rust-lib2pp/pulls/4745). diff --git a/misc/memory-connection-limits/CHANGELOG.md b/misc/memory-connection-limits/CHANGELOG.md index 32d9c8bf59b..fc598872d50 100644 --- a/misc/memory-connection-limits/CHANGELOG.md +++ b/misc/memory-connection-limits/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.2.0 - unreleased +## 0.2.0 ## 0.1.0 diff --git a/misc/metrics/CHANGELOG.md b/misc/metrics/CHANGELOG.md index acad2043fc8..482cb6b6e0a 100644 --- a/misc/metrics/CHANGELOG.md +++ b/misc/metrics/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.14.0 - unreleased +## 0.14.0 - Add metrics for `SwarmEvent::{NewExternalAddrCandidate,ExternalAddrConfirmed,ExternalAddrExpired}`. See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). diff --git a/muxers/mplex/CHANGELOG.md b/muxers/mplex/CHANGELOG.md index 1e50042e08a..48ab616e131 100644 --- a/muxers/mplex/CHANGELOG.md +++ b/muxers/mplex/CHANGELOG.md @@ -1,9 +1,9 @@ -## 0.41.0 - unreleased +## 0.41.0 - Migrate to `{In,Out}boundConnectionUpgrade` traits. See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). -## 0.40.0 +## 0.40.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index d9925596bad..a2983b31572 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.45.0 - unreleased +## 0.45.0 - Migrate to `{In,Out}boundConnectionUpgrade` traits. See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). diff --git a/protocols/autonat/CHANGELOG.md b/protocols/autonat/CHANGELOG.md index 2b14598bd3e..1259dd01fd4 100644 --- a/protocols/autonat/CHANGELOG.md +++ b/protocols/autonat/CHANGELOG.md @@ -1,10 +1,10 @@ -## 0.12.0 - unreleased +## 0.12.0 - Remove `Clone`, `PartialEq` and `Eq` implementations on `Event` and its sub-structs. The `Event` also contains errors which are not clonable or comparable. See [PR 3914](https://github.com/libp2p/rust-libp2p/pull/3914). -## 0.11.0 +## 0.11.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/dcutr/CHANGELOG.md b/protocols/dcutr/CHANGELOG.md index cb84020ec5f..d3857373658 100644 --- a/protocols/dcutr/CHANGELOG.md +++ b/protocols/dcutr/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.11.0 - unreleased +## 0.11.0 - Add `ConnectionId` to `Event::DirectConnectionUpgradeSucceeded` and `Event::DirectConnectionUpgradeFailed`. See [PR 4558](https://github.com/libp2p/rust-libp2p/pull/4558). @@ -7,9 +7,9 @@ See [PR 4624](https://github.com/libp2p/rust-libp2p/pull/4624). - Simplify public API. We now only emit a single event: whether the hole-punch was successful or not. - See [PR XXXX](https://github.com/libp2p/rust-libp2p/pull/XXXX). + See [PR 4749](https://github.com/libp2p/rust-libp2p/pull/4749). -## 0.10.0 +## 0.10.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/protocols/floodsub/CHANGELOG.md b/protocols/floodsub/CHANGELOG.md index 3891a09f4d4..8e3cb70ddf1 100644 --- a/protocols/floodsub/CHANGELOG.md +++ b/protocols/floodsub/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.44.0 - unreleased +## 0.44.0 - Change publish to require `data: impl Into` to internally avoid any costly cloning / allocation. See [PR 4754](https://github.com/libp2p/rust-libp2p/pull/4754). diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index b86ec4de6d4..a9dac438223 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.46.0 - unreleased +## 0.46.0 - Remove `fast_message_id_fn` mechanism from `Config`. See [PR 4285](https://github.com/libp2p/rust-libp2p/pull/4285). diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index 960ed530682..9ee8565b254 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.44.0 - unreleased +## 0.44.0 - Add `Info` to the `libp2p-identify::Event::Pushed` to report pushed info. See [PR 4527](https://github.com/libp2p/rust-libp2p/pull/4527) @@ -7,7 +7,7 @@ See [PR 4735](https://github.com/libp2p/rust-libp2p/pull/4735) - Don't repeatedly report the same observed address as a `NewExternalAddrCandidate`. Instead, only report each observed address once per connection. - This allows users to probabilistically deem an address as external if it gets reported as a candidate repeatedly. + This allows users to probabilistically deem an address as external if it gets reported as a candidate repeatedly. See [PR 4721](https://github.com/libp2p/rust-libp2p/pull/4721). ## 0.43.1 diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index d2b92195ab3..842204cbef2 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.45.0 - unreleased +## 0.45.0 - Remove deprecated `kad::Config::set_connection_idle_timeout` in favor of `SwarmBuilder::idle_connection_timeout`. See [PR 4659](https://github.com/libp2p/rust-libp2p/pull/4659). @@ -10,7 +10,7 @@ See [PR 4698](https://github.com/libp2p/rust-libp2p/pull/4698). - Remove previously deprecated type-aliases. Users should follow the convention of importing the `libp2p::kad` module and referring to symbols as `kad::Behaviour` etc. - See [PR 4733](https://github.com/libp2p/rust-libp2p/pull/4733). + See [PR 4733](https://github.com/libp2p/rust-libp2p/pull/4733). ## 0.44.6 diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 060fac8c51c..29b0fbcbb9c 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,13 +1,13 @@ -## 0.45.0 - unreleased +## 0.45.0 - Don't perform IO in `Behaviour::poll`. See [PR 4623](https://github.com/libp2p/rust-libp2p/pull/4623). -## 0.44.0 +## 0.44.0 - Change `mdns::Event` to hold `Vec` and remove `DiscoveredAddrsIter` and `ExpiredAddrsIter`. See [PR 3621]. - + - Raise MSRV to 1.65. See [PR 3715]. - Remove deprecated `Mdns` prefixed items. See [PR 3699]. diff --git a/protocols/perf/CHANGELOG.md b/protocols/perf/CHANGELOG.md index 6976a89887b..4e448d7f44a 100644 --- a/protocols/perf/CHANGELOG.md +++ b/protocols/perf/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.3.0 - unreleased +## 0.3.0 - Continuously measure on single connection (iperf-style). See https://github.com/libp2p/test-plans/issues/261 for high level overview. diff --git a/protocols/ping/CHANGELOG.md b/protocols/ping/CHANGELOG.md index b94b4581a6a..33e0139b996 100644 --- a/protocols/ping/CHANGELOG.md +++ b/protocols/ping/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.44.0 - unreleased +## 0.44.0 ## 0.43.1 diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index 200cc4bc18d..8c1198f9974 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.17.0 - unreleased +## 0.17.0 - Don't close connections on protocol failures within the relay-server. To achieve this, error handling was restructured: @@ -10,7 +10,7 @@ - Fix a rare race condition when making a reservation on a relay that could lead to a failed reservation. See [PR 4747](https://github.com/libp2p/rust-lib2pp/pulls/4747). - Propagate errors of relay client to the listener / dialer. - A failed reservation will now appear as `SwarmEvent::ListenerClosed` with the `ListenerId` of the corresponding `Swarm::listen_on` call. + A failed reservation will now appear as `SwarmEvent::ListenerClosed` with the `ListenerId` of the corresponding `Swarm::listen_on` call. A failed circuit request will now appear as `SwarmEvent::OutgoingConnectionError` with the `ConnectionId` of the corresponding `Swarm::dial` call. Lastly, a failed reservation or circuit request will **no longer** close the underlying relay connection. As a result, we remove the following enum variants: @@ -18,7 +18,7 @@ - `relay::client::Event::OutboundCircuitReqFailed` - `relay::client::Event::InboundCircuitReqDenied` - `relay::client::Event::InboundCircuitReqDenyFailed` - + See [PR 4745](https://github.com/libp2p/rust-lib2pp/pulls/4745). ## 0.16.2 diff --git a/protocols/rendezvous/CHANGELOG.md b/protocols/rendezvous/CHANGELOG.md index 7b75b35ae9c..e60699da734 100644 --- a/protocols/rendezvous/CHANGELOG.md +++ b/protocols/rendezvous/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.14.0 - unreleased +## 0.14.0 ## 0.13.1 diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 138401c2f50..30fc700da3c 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.26.0 - unreleased +## 0.26.0 - Remove `request_response::Config::set_connection_keep_alive` in favor of `SwarmBuilder::idle_connection_timeout`. See [PR 4679](https://github.com/libp2p/rust-libp2p/pull/4679). diff --git a/protocols/upnp/CHANGELOG.md b/protocols/upnp/CHANGELOG.md index 75aeaf64d6e..806ed2e11d5 100644 --- a/protocols/upnp/CHANGELOG.md +++ b/protocols/upnp/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.2.0 - unreleased +## 0.2.0 ## 0.1.1 diff --git a/swarm-derive/CHANGELOG.md b/swarm-derive/CHANGELOG.md index 3ed7b9931df..08adba00cdb 100644 --- a/swarm-derive/CHANGELOG.md +++ b/swarm-derive/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.34.0 - unreleased +## 0.34.0 - Adapt to interface changes in `libp2p-swarm`. See [PR 4706](https://github.com/libp2p/rust-libp2p/pull/4076). diff --git a/swarm-test/CHANGELOG.md b/swarm-test/CHANGELOG.md index 341b3d6e01f..95223e60272 100644 --- a/swarm-test/CHANGELOG.md +++ b/swarm-test/CHANGELOG.md @@ -1,7 +1,7 @@ -## 0.3.0 - unreleased +## 0.3.0 -## 0.2.0 +## 0.2.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/swarm/CHANGELOG.md b/swarm/CHANGELOG.md index 979f056a425..48cafee6ced 100644 --- a/swarm/CHANGELOG.md +++ b/swarm/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.44.0 - unreleased +## 0.44.0 - Add `#[non_exhaustive]` to `FromSwarm`, `ToSwarm`, `SwarmEvent`, `ConnectionHandlerEvent`, `ConnectionEvent`. See [PR 4581](https://github.com/libp2p/rust-libp2p/pull/4581). diff --git a/transports/dns/CHANGELOG.md b/transports/dns/CHANGELOG.md index 734e7f08740..6906cc972d6 100644 --- a/transports/dns/CHANGELOG.md +++ b/transports/dns/CHANGELOG.md @@ -1,9 +1,9 @@ -## 0.41.0 - unreleased +## 0.41.0 - Make `tokio::Transport::custom` and `async_std::Transport::custom` constructors infallible. See [PR 4464](https://github.com/libp2p/rust-libp2p/pull/4464). - Remove deprecated type-aliases. - See [PR 4739](https://github.com/libp2p/rust-libp2p/pull/4739). + See [PR 4739](https://github.com/libp2p/rust-libp2p/pull/4739). - Migrate to the `hickory-dns` project which has rebranded from `trust-dns`. We also remove the `tokio-dns-over-rustls` and `tokio-dns-over-https-rustls` features. Users should activate these features themselves on `hickory-resolver` if so desired. diff --git a/transports/noise/CHANGELOG.md b/transports/noise/CHANGELOG.md index e53d3a1077e..78effb673d2 100644 --- a/transports/noise/CHANGELOG.md +++ b/transports/noise/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.44.0 - unreleased +## 0.44.0 - Migrate to `{In,Out}boundConnectionUpgrade` traits. See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). diff --git a/transports/plaintext/CHANGELOG.md b/transports/plaintext/CHANGELOG.md index d13b355339b..42b53d12a88 100644 --- a/transports/plaintext/CHANGELOG.md +++ b/transports/plaintext/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.41.0 - unreleased +## 0.41.0 - Migrate to `{In,Out}boundConnectionUpgrade` traits. See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). diff --git a/transports/pnet/CHANGELOG.md b/transports/pnet/CHANGELOG.md index 7111b1039dc..e6c3d1974dc 100644 --- a/transports/pnet/CHANGELOG.md +++ b/transports/pnet/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.24.0 - unreleased +## 0.24.0 ## 0.23.1 diff --git a/transports/quic/CHANGELOG.md b/transports/quic/CHANGELOG.md index bb106fcf170..49438fa7ebb 100644 --- a/transports/quic/CHANGELOG.md +++ b/transports/quic/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.10.0 - unreleased +## 0.10.0 - Improve hole-punch timing. This should improve success rates for hole-punching QUIC connections. diff --git a/transports/tcp/CHANGELOG.md b/transports/tcp/CHANGELOG.md index 23293f09252..2bde64056cb 100644 --- a/transports/tcp/CHANGELOG.md +++ b/transports/tcp/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.41.0 - unreleased +## 0.41.0 ## 0.40.1 diff --git a/transports/tls/CHANGELOG.md b/transports/tls/CHANGELOG.md index 04793c719f0..83f72286559 100644 --- a/transports/tls/CHANGELOG.md +++ b/transports/tls/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.3.0 - unreleased +## 0.3.0 - Migrate to `{In,Out}boundConnectionUpgrade` traits. See [PR 4695](https://github.com/libp2p/rust-libp2p/pull/4695). diff --git a/transports/uds/CHANGELOG.md b/transports/uds/CHANGELOG.md index 10610de17b6..aad61d21547 100644 --- a/transports/uds/CHANGELOG.md +++ b/transports/uds/CHANGELOG.md @@ -1,7 +1,7 @@ -## 0.40.0 - unreleased +## 0.40.0 -## 0.39.0 +## 0.39.0 - Raise MSRV to 1.65. See [PR 3715]. diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md index 71fb0726535..17c253cb80a 100644 --- a/transports/websocket-websys/CHANGELOG.md +++ b/transports/websocket-websys/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.3.0 - unreleased +## 0.3.0 ## 0.2.0 diff --git a/transports/websocket/CHANGELOG.md b/transports/websocket/CHANGELOG.md index 87c8f519d5d..192b1fa094e 100644 --- a/transports/websocket/CHANGELOG.md +++ b/transports/websocket/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.43.0 - unreleased +## 0.43.0 ## 0.42.1 diff --git a/transports/webtransport-websys/CHANGELOG.md b/transports/webtransport-websys/CHANGELOG.md index 358f709cf86..b368a943395 100644 --- a/transports/webtransport-websys/CHANGELOG.md +++ b/transports/webtransport-websys/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.2.0 - unreleased +## 0.2.0 ## 0.1.0 From c8e35d3703657724a0133e0eb856b776fde9312e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 08:32:56 +0000 Subject: [PATCH 27/33] deps: bump wasm-bindgen from 0.2.87 to 0.2.88 Pull-Request: #4799. --- Cargo.lock | 20 ++++++++++---------- examples/browser-webrtc/Cargo.toml | 2 +- transports/webrtc-websys/Cargo.toml | 2 +- transports/websocket-websys/Cargo.toml | 2 +- transports/webtransport-websys/Cargo.toml | 2 +- wasm-tests/webtransport-tests/Cargo.toml | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c7118a5f776..32603623a4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6225,9 +6225,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6235,9 +6235,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", @@ -6262,9 +6262,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6272,9 +6272,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", @@ -6285,9 +6285,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "wasm-bindgen-test" diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index e18f6d9c531..db7c7228562 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -37,7 +37,7 @@ mime_guess = "2.0.4" js-sys = "0.3.64" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } -wasm-bindgen = "0.2.84" +wasm-bindgen = "0.2.88" wasm-bindgen-futures = "0.4.37" wasm-logger = { version = "0.2.0" } web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 3e2659c71c8..1fdc935123f 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -26,7 +26,7 @@ send_wrapper = { version = "0.6.0", features = ["futures"] } serde = { version = "1.0", features = ["derive"] } thiserror = "1" tracing = "0.1.37" -wasm-bindgen = { version = "0.2.87" } +wasm-bindgen = { version = "0.2.88" } wasm-bindgen-futures = { version = "0.4.37" } web-sys = { version = "0.3.64", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 779cc4d8602..24413080c34 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -19,7 +19,7 @@ tracing = "0.1.37" parking_lot = "0.12.1" send_wrapper = "0.6.0" thiserror = "1.0.50" -wasm-bindgen = "0.2.84" +wasm-bindgen = "0.2.88" web-sys = { version = "0.3.61", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 49053349298..a5788ea70fc 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -24,7 +24,7 @@ multihash = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } thiserror = "1.0.50" tracing = "0.1.37" -wasm-bindgen = "0.2.87" +wasm-bindgen = "0.2.88" wasm-bindgen-futures = "0.4.37" web-sys = { version = "0.3.64", features = [ "ReadableStreamDefaultReader", diff --git a/wasm-tests/webtransport-tests/Cargo.toml b/wasm-tests/webtransport-tests/Cargo.toml index 2991993c637..aea0640dd6e 100644 --- a/wasm-tests/webtransport-tests/Cargo.toml +++ b/wasm-tests/webtransport-tests/Cargo.toml @@ -17,7 +17,7 @@ libp2p-noise = { workspace = true } libp2p-webtransport-websys = { workspace = true } multiaddr = { workspace = true } multihash = { workspace = true } -wasm-bindgen = "0.2.87" +wasm-bindgen = "0.2.88" wasm-bindgen-futures = "0.4.37" wasm-bindgen-test = "0.3.37" web-sys = { version = "0.3.64", features = ["Response", "Window"] } From 77f9d153743aa2f771f353826a0c95e4a4e1a762 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:10:08 +0000 Subject: [PATCH 28/33] deps: bump serde_json from 1.0.107 to 1.0.108 Pull-Request: #4801. --- Cargo.lock | 4 ++-- hole-punching-tests/Cargo.toml | 2 +- misc/keygen/Cargo.toml | 2 +- protocols/request-response/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 32603623a4e..47d29fd5fed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5177,9 +5177,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "indexmap 2.0.0", "itoa", diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml index 14e5793f141..fa7ed6354f1 100644 --- a/hole-punching-tests/Cargo.toml +++ b/hole-punching-tests/Cargo.toml @@ -14,5 +14,5 @@ tracing = "0.1.37" redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } tokio = { version = "1.29.1", features = ["full"] } serde = { version = "1.0.190", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" either = "1.9.0" diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 20b94569f12..9062a2c3a8f 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -16,7 +16,7 @@ release = false clap = { version = "4.4.7", features = ["derive"] } zeroize = "1" serde = { version = "1.0.190", features = ["derive"] } -serde_json = "1.0.107" +serde_json = "1.0.108" libp2p-core = { workspace = true } base64 = "0.21.5" libp2p-identity = { workspace = true } diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 26a2d0ecc81..66b37e3d528 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -20,7 +20,7 @@ libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } rand = "0.8" serde = { version = "1.0", optional = true} -serde_json = { version = "1.0.107", optional = true } +serde_json = { version = "1.0.108", optional = true } smallvec = "1.11.1" tracing = "0.1.37" void = "1.0.2" From 4e1ad09c779ba76c0cd6a672981ca780cb0c277c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:20:27 +0000 Subject: [PATCH 29/33] deps: bump libc from 0.2.149 to 0.2.150 Pull-Request: #4803. --- Cargo.lock | 4 ++-- transports/tcp/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 47d29fd5fed..f1825b15ed4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2419,9 +2419,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libp2p" diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 37e85d04ded..28604a4ebdd 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -15,7 +15,7 @@ async-io = { version = "1.13.0", optional = true } futures = "0.3.29" futures-timer = "3.0" if-watch = "3.1.0" -libc = "0.2.149" +libc = "0.2.150" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } socket2 = { version = "0.5.5", features = ["all"] } From 70c968f17e8c0f1def329ce055185a2ee75e42f8 Mon Sep 17 00:00:00 2001 From: Nick Date: Tue, 7 Nov 2023 04:07:40 +0300 Subject: [PATCH 30/33] fix(core): add impl `{In,Out}boundConnectionUpgrade` for `SelectUpgrade` Fixes https://github.com/libp2p/rust-libp2p/issues/4810. Pull-Request: #4812. --- Cargo.lock | 2 +- Cargo.toml | 2 +- core/CHANGELOG.md | 5 +++++ core/Cargo.toml | 2 +- core/src/upgrade/select.rs | 39 +++++++++++++++++++++++++++++++++++++- 5 files changed, 46 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1825b15ed4..4f1630f4810 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2530,7 +2530,7 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.41.0" +version = "0.41.1" dependencies = [ "async-std", "either", diff --git a/Cargo.toml b/Cargo.toml index d676d69a57a..89790baf82e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,7 +75,7 @@ libp2p = { version = "0.53.0", path = "libp2p" } libp2p-allow-block-list = { version = "0.3.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.12.0", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.3.0", path = "misc/connection-limits" } -libp2p-core = { version = "0.41.0", path = "core" } +libp2p-core = { version = "0.41.1", path = "core" } libp2p-dcutr = { version = "0.11.0", path = "protocols/dcutr" } libp2p-dns = { version = "0.41.0", path = "transports/dns" } libp2p-floodsub = { version = "0.44.0", path = "protocols/floodsub" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 6dda895c70a..034524b46dd 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.41.1 - unreleased + +- Implement `{In,Out}boundConnectionUpgrade` for `SelectUpgrade`. + See [PR 4812](https://github.com/libp2p/rust-libp2p/pull/4812). + ## 0.41.0 - Remove blanket-impl of `{In,Out}boundUpgrade` for `{In,Out}boundConnectionUpgrade`. diff --git a/core/Cargo.toml b/core/Cargo.toml index b9ebb0ad851..3d3bad6eefa 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = { workspace = true } description = "Core traits and structs of libp2p" -version = "0.41.0" +version = "0.41.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/core/src/upgrade/select.rs b/core/src/upgrade/select.rs index 19b8b7a93f7..037045a2f29 100644 --- a/core/src/upgrade/select.rs +++ b/core/src/upgrade/select.rs @@ -19,7 +19,10 @@ // DEALINGS IN THE SOFTWARE. use crate::either::EitherFuture; -use crate::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use crate::upgrade::{ + InboundConnectionUpgrade, InboundUpgrade, OutboundConnectionUpgrade, OutboundUpgrade, + UpgradeInfo, +}; use either::Either; use futures::future; use std::iter::{Chain, Map}; @@ -84,6 +87,23 @@ where } } +impl InboundConnectionUpgrade for SelectUpgrade +where + A: InboundConnectionUpgrade, + B: InboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_inbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_inbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_inbound(sock, info)), + } + } +} + impl OutboundUpgrade for SelectUpgrade where A: OutboundUpgrade, @@ -100,3 +120,20 @@ where } } } + +impl OutboundConnectionUpgrade for SelectUpgrade +where + A: OutboundConnectionUpgrade, + B: OutboundConnectionUpgrade, +{ + type Output = future::Either; + type Error = Either; + type Future = EitherFuture; + + fn upgrade_outbound(self, sock: C, info: Self::Info) -> Self::Future { + match info { + Either::Left(info) => EitherFuture::First(self.0.upgrade_outbound(sock, info)), + Either::Right(info) => EitherFuture::Second(self.1.upgrade_outbound(sock, info)), + } + } +} From 7641b1429825cce1eb603d1dc3d9f8b59d4409ba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 01:28:50 +0000 Subject: [PATCH 31/33] deps: bump wasm-bindgen-futures from 0.4.37 to 0.4.38 Pull-Request: #4807. --- Cargo.lock | 8 ++++---- examples/browser-webrtc/Cargo.toml | 2 +- swarm/Cargo.toml | 2 +- transports/webrtc-websys/Cargo.toml | 2 +- transports/webtransport-websys/Cargo.toml | 2 +- wasm-tests/webtransport-tests/Cargo.toml | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f1630f4810..4ab323b793f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2373,9 +2373,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -6250,9 +6250,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index db7c7228562..553f661f8e1 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -38,7 +38,7 @@ js-sys = "0.3.64" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } wasm-bindgen = "0.2.88" -wasm-bindgen-futures = "0.4.37" +wasm-bindgen-futures = "0.4.38" wasm-logger = { version = "0.2.0" } web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } diff --git a/swarm/Cargo.toml b/swarm/Cargo.toml index fb28ff34d12..781029d3619 100644 --- a/swarm/Cargo.toml +++ b/swarm/Cargo.toml @@ -26,7 +26,7 @@ rand = "0.8" smallvec = "1.11.1" tracing = "0.1.37" void = "1" -wasm-bindgen-futures = { version = "0.4.37", optional = true } +wasm-bindgen-futures = { version = "0.4.38", optional = true } [target.'cfg(not(any(target_os = "emscripten", target_os = "wasi", target_os = "unknown")))'.dependencies] async-std = { version = "1.6.2", optional = true } diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 1fdc935123f..ea94a8d337a 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -27,7 +27,7 @@ serde = { version = "1.0", features = ["derive"] } thiserror = "1" tracing = "0.1.37" wasm-bindgen = { version = "0.2.88" } -wasm-bindgen-futures = { version = "0.4.37" } +wasm-bindgen-futures = { version = "0.4.38" } web-sys = { version = "0.3.64", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } [dev-dependencies] diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index a5788ea70fc..9af9ac0e118 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -25,7 +25,7 @@ send_wrapper = { version = "0.6.0", features = ["futures"] } thiserror = "1.0.50" tracing = "0.1.37" wasm-bindgen = "0.2.88" -wasm-bindgen-futures = "0.4.37" +wasm-bindgen-futures = "0.4.38" web-sys = { version = "0.3.64", features = [ "ReadableStreamDefaultReader", "WebTransport", diff --git a/wasm-tests/webtransport-tests/Cargo.toml b/wasm-tests/webtransport-tests/Cargo.toml index aea0640dd6e..c47dd3f034a 100644 --- a/wasm-tests/webtransport-tests/Cargo.toml +++ b/wasm-tests/webtransport-tests/Cargo.toml @@ -18,7 +18,7 @@ libp2p-webtransport-websys = { workspace = true } multiaddr = { workspace = true } multihash = { workspace = true } wasm-bindgen = "0.2.88" -wasm-bindgen-futures = "0.4.37" +wasm-bindgen-futures = "0.4.38" wasm-bindgen-test = "0.3.37" web-sys = { version = "0.3.64", features = ["Response", "Window"] } From 3a6b72fd6276d671098f3d88e409dffef6fab4c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 01:39:16 +0000 Subject: [PATCH 32/33] deps: bump js-sys from 0.3.64 to 0.3.65 Pull-Request: #4800. --- examples/browser-webrtc/Cargo.toml | 2 +- transports/websocket-websys/Cargo.toml | 2 +- transports/webtransport-websys/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index 553f661f8e1..d9084441b14 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -34,7 +34,7 @@ tower-http = { version = "0.4.0", features = ["cors"] } mime_guess = "2.0.4" [target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.64" +js-sys = "0.3.65" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } wasm-bindgen = "0.2.88" diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 24413080c34..f189285a494 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1.4.0" futures = "0.3.29" -js-sys = "0.3.61" +js-sys = "0.3.65" libp2p-core = { workspace = true } tracing = "0.1.37" parking_lot = "0.12.1" diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 9af9ac0e118..8553e66d45b 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -15,7 +15,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.29" -js-sys = "0.3.64" +js-sys = "0.3.65" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } From dbfda10e3f0f649c6f8556d0afc71bf525968104 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 01:49:09 +0000 Subject: [PATCH 33/33] deps: bump syn from 2.0.38 to 2.0.39 Pull-Request: #4802. --- Cargo.lock | 42 ++++++++++++++++++++--------------------- swarm-derive/Cargo.toml | 2 +- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ab323b793f..6800c62a900 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,7 +429,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -905,7 +905,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1191,7 +1191,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1308,7 +1308,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1412,7 +1412,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1650,7 +1650,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3146,7 +3146,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3924,7 +3924,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4150,7 +4150,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4345,7 +4345,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4887,7 +4887,7 @@ dependencies = [ "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.38", + "syn 2.0.39", "walkdir", ] @@ -5172,7 +5172,7 @@ checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5205,7 +5205,7 @@ checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5508,9 +5508,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -5648,7 +5648,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5750,7 +5750,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5905,7 +5905,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -6244,7 +6244,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] @@ -6278,7 +6278,7 @@ checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6811,5 +6811,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] diff --git a/swarm-derive/Cargo.toml b/swarm-derive/Cargo.toml index 61aac588a80..11cc4b6a12a 100644 --- a/swarm-derive/Cargo.toml +++ b/swarm-derive/Cargo.toml @@ -16,7 +16,7 @@ proc-macro = true [dependencies] heck = "0.4" quote = "1.0" -syn = { version = "2.0.38", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } +syn = { version = "2.0.39", default-features = false, features = ["clone-impls", "derive", "parsing", "printing", "proc-macro"] } proc-macro2 = "1.0" # Passing arguments to the docsrs builder in order to properly document cfg's.