From 3e3ea3a7b44820006c0b706428606447ac74f77d Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Sun, 19 Nov 2023 14:28:59 +1100 Subject: [PATCH 01/48] chore(futures-bounded): prepare release `0.2.3` Pull-Request: #4892. --- misc/futures-bounded/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md index 7a6e4deac2f..aa2b3e8a65e 100644 --- a/misc/futures-bounded/CHANGELOG.md +++ b/misc/futures-bounded/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.2.3 - unreleased +## 0.2.3 - Introduce `FuturesTupleSet`, holding tuples of a `Future` together with an arbitrary piece of data. See [PR 4841](https://github.com/libp2p/rust-lib2pp/pulls/4841). From edca09b58e19dd1e6ca5183898ad24b0cfb32d92 Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Sat, 18 Nov 2023 22:48:03 -0500 Subject: [PATCH 02/48] docs: fix URLs in changelogs Pull-Request: #4893. --- misc/futures-bounded/CHANGELOG.md | 8 ++++---- protocols/relay/CHANGELOG.md | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/misc/futures-bounded/CHANGELOG.md b/misc/futures-bounded/CHANGELOG.md index aa2b3e8a65e..72b0b4f457d 100644 --- a/misc/futures-bounded/CHANGELOG.md +++ b/misc/futures-bounded/CHANGELOG.md @@ -1,22 +1,22 @@ ## 0.2.3 - Introduce `FuturesTupleSet`, holding tuples of a `Future` together with an arbitrary piece of data. - See [PR 4841](https://github.com/libp2p/rust-lib2pp/pulls/4841). + See [PR 4841](https://github.com/libp2p/rust-libp2p/pull/4841). ## 0.2.2 - Fix an issue where `{Futures,Stream}Map` returns `Poll::Pending` despite being ready after an item has been replaced as part of `try_push`. - See [PR 4865](https://github.com/libp2p/rust-lib2pp/pulls/4865). + See [PR 4865](https://github.com/libp2p/rust-libp2p/pull/4865). ## 0.2.1 - Add `.len()` getter to `FuturesMap`, `FuturesSet`, `StreamMap` and `StreamSet`. - See [PR 4745](https://github.com/libp2p/rust-lib2pp/pulls/4745). + See [PR 4745](https://github.com/libp2p/rust-libp2p/pull/4745). ## 0.2.0 - Add `StreamMap` type and remove `Future`-suffix from `PushError::ReplacedFuture` to reuse it for `StreamMap`. - See [PR 4616](https://github.com/libp2p/rust-lib2pp/pulls/4616). + See [PR 4616](https://github.com/libp2p/rust-libp2p/pull/4616). ## 0.1.0 diff --git a/protocols/relay/CHANGELOG.md b/protocols/relay/CHANGELOG.md index 565748aef7e..aaade5e48f9 100644 --- a/protocols/relay/CHANGELOG.md +++ b/protocols/relay/CHANGELOG.md @@ -1,7 +1,7 @@ ## 0.17.1 - Automatically register relayed addresses as external addresses. - See [PR 4809](https://github.com/libp2p/rust-lib2pp/pulls/4809). + See [PR 4809](https://github.com/libp2p/rust-libp2p/pull/4809). - Fix an error where performing too many reservations at once could lead to inconsistent internal state. See [PR 4841](https://github.com/libp2p/rust-libp2p/pull/4841). @@ -14,7 +14,7 @@ See [PR 4718](https://github.com/libp2p/rust-libp2p/pull/4718). - Fix a rare race condition when making a reservation on a relay that could lead to a failed reservation. - See [PR 4747](https://github.com/libp2p/rust-lib2pp/pulls/4747). + See [PR 4747](https://github.com/libp2p/rust-libp2p/pull/4747). - Propagate errors of relay client to the listener / dialer. A failed reservation will now appear as `SwarmEvent::ListenerClosed` with the `ListenerId` of the corresponding `Swarm::listen_on` call. A failed circuit request will now appear as `SwarmEvent::OutgoingConnectionError` with the `ConnectionId` of the corresponding `Swarm::dial` call. @@ -25,7 +25,7 @@ - `relay::client::Event::InboundCircuitReqDenied` - `relay::client::Event::InboundCircuitReqDenyFailed` - See [PR 4745](https://github.com/libp2p/rust-lib2pp/pulls/4745). + See [PR 4745](https://github.com/libp2p/rust-libp2p/pull/4745). ## 0.16.2 From 0ceb6580941c010edff2a5689ada2e9e33c773cb Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Sun, 19 Nov 2023 15:19:17 +1100 Subject: [PATCH 03/48] chore: bump version of `quick-protobuf-codec` The `asynchronous-codec` dependency is exposed in `quick-protobuf-codec`, hence we need to bump the minor version. Pull-Request: #4895. --- Cargo.lock | 2 +- Cargo.toml | 2 +- misc/quick-protobuf-codec/CHANGELOG.md | 5 +++++ misc/quick-protobuf-codec/Cargo.toml | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 332764c36f3..a8fe1eeee7a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4367,7 +4367,7 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.2.0" +version = "0.3.0" dependencies = [ "asynchronous-codec", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 35439a1a696..9417876ae01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,7 +117,7 @@ multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } prometheus-client = "0.22.0" -quick-protobuf-codec = { version = "0.2.0", path = "misc/quick-protobuf-codec" } +quick-protobuf-codec = { version = "0.3.0", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } unsigned-varint = { version = "0.8.0" } diff --git a/misc/quick-protobuf-codec/CHANGELOG.md b/misc/quick-protobuf-codec/CHANGELOG.md index 740201f80d7..779dd750abd 100644 --- a/misc/quick-protobuf-codec/CHANGELOG.md +++ b/misc/quick-protobuf-codec/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.0 + +- Update to `asynchronous-codec` `v0.7.0`. + See [PR 4636](https://github.com/libp2p/rust-libp2p/pull/4636). + ## 0.2.0 - Raise MSRV to 1.65. diff --git a/misc/quick-protobuf-codec/Cargo.toml b/misc/quick-protobuf-codec/Cargo.toml index 2e309a02889..fdc65cfa93c 100644 --- a/misc/quick-protobuf-codec/Cargo.toml +++ b/misc/quick-protobuf-codec/Cargo.toml @@ -3,7 +3,7 @@ name = "quick-protobuf-codec" edition = "2021" rust-version = { workspace = true } description = "Asynchronous de-/encoding of Protobuf structs using asynchronous-codec, unsigned-varint and quick-protobuf." -version = "0.2.0" +version = "0.3.0" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" From 2e3dae713dbd4da1ff3e5f94e2c0e17f236d5e4d Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 20 Nov 2023 00:41:43 +0100 Subject: [PATCH 04/48] docs: fix typos Pull-Request: #4897. --- README.md | 2 +- docs/coding-guidelines.md | 10 +++++----- docs/release.md | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 62573353845..62375b3dab0 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ The main components of this repository are structured as follows: * `transports/`: Implementations of transport protocols (e.g. TCP) and protocol upgrades (e.g. for authenticated encryption, compression, ...) based on the `libp2p-core` `Transport` - API . + API. * `muxers/`: Implementations of the `StreamMuxer` interface of `libp2p-core`, e.g. (sub)stream multiplexing protocols on top of (typically TCP) connections. diff --git a/docs/coding-guidelines.md b/docs/coding-guidelines.md index aef8dd6986a..bacbfe9509e 100644 --- a/docs/coding-guidelines.md +++ b/docs/coding-guidelines.md @@ -28,7 +28,7 @@ Below is a set of coding guidelines followed across the rust-libp2p code base. ## Hierarchical State Machines -If you sqint, rust-libp2p is just a big hierarchy of [state +If you squint, rust-libp2p is just a big hierarchy of [state machines](https://en.wikipedia.org/wiki/Finite-state_machine) where parents pass events down to their children and children pass events up to their parents. @@ -167,7 +167,7 @@ impl Stream for SomeStateMachine { } ``` -This priotization provides: +This prioritization provides: - Low memory footprint as local queues (here `events_to_return_to_parent`) stay small. - Low latency as accepted local work is not stuck in queues. - DOS defense as a remote does not control the size of the local queue, nor starves local work with its remote work. @@ -195,7 +195,7 @@ through a side-channel. ### Local queues As for channels shared across potentially concurrent actors (e.g. future tasks -or OS threads), the same applies for queues owned by a single actor only. E.g. +or OS threads), the same applies to queues owned by a single actor only. E.g. reading events from a socket into a `Vec` without some mechanism bounding the size of that `Vec` again can lead to unbounded memory growth and high latencies. @@ -241,7 +241,7 @@ shows a speed up when running it concurrently. ## Use `async/await` for sequential execution only Using `async/await` for sequential execution makes things significantly simpler. -Though unfortunately using `async/await` does not allow accesing methods on the +Though unfortunately using `async/await` does not allow accessing methods on the object being `await`ed unless paired with some synchronization mechanism like an `Arc>`. @@ -308,7 +308,7 @@ response and a previous request. For example, if a user requests two new connect peer, they should be able to match each new connection to the corresponding previous connection request without having to guess. -When accepting a **command** that eventually results in a response through an event require that +When accepting a **command** that eventually results in a response through an event requires that command to contain a unique ID, which is later on contained in the asynchronous response event. One such example is the `Swarm` accepting a `ToSwarm::Dial` from the `NetworkBehaviour`. diff --git a/docs/release.md b/docs/release.md index 5b4d32aedaf..8edc621c127 100644 --- a/docs/release.md +++ b/docs/release.md @@ -65,13 +65,13 @@ Hence, we are going to bump those versions once we work through the milestone th ## Dealing with alphas -Unfortunately, `cargo` has a rather uninutitive behaviour when it comes to dealing with pre-releases like `0.1.0-alpha`. +Unfortunately, `cargo` has a rather unintuitive behaviour when it comes to dealing with pre-releases like `0.1.0-alpha`. See this internals thread for some context: https://internals.rust-lang.org/t/changing-cargo-semver-compatibility-for-pre-releases In short, cargo will automatically update from `0.1.0-alpha.1` to `0.1.0-alpha.2` UNLESS you pin the version directly with `=0.1.0-alpha.1`. However, from a semver perspective, changes between pre-releases can be breaking. -To avoid accidential breaking changes for our users, we employ the following convention for alpha releases: +To avoid accidental breaking changes for our users, we employ the following convention for alpha releases: - For a breaking change in a crate with an alpha release, bump the "minor" version but retain the "alpha" tag. Example: `0.1.0-alpha` to `0.2.0-alpha`. From 35b8fa5812065bb4b3278b90583a10fa2a7d3435 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Mon, 20 Nov 2023 11:55:35 +1100 Subject: [PATCH 05/48] deps: upgrade `cargo semver-checks` to `v0.25.0` Pull-Request: #4899. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ccaa53f381..f0c852367a2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -293,7 +293,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.24.2/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin + - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.25.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin shell: bash - uses: obi1kenobi/cargo-semver-checks-action@e275dda72e250d4df5b564e969e1348d67fefa52 # v2 From 8d3bde46a8482bd58e1e0581cdf110cd0943c1d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 08:13:24 +0000 Subject: [PATCH 06/48] deps: bump rustls from 0.21.8 to 0.21.9 Pull-Request: #4902. --- Cargo.lock | 18 +++++++++--------- transports/quic/Cargo.toml | 2 +- transports/tls/Cargo.toml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8fe1eeee7a..49fd90990ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1657,7 +1657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.8", + "rustls 0.21.9", ] [[package]] @@ -2977,7 +2977,7 @@ dependencies = [ "quinn", "rand 0.8.5", "ring 0.16.20", - "rustls 0.21.8", + "rustls 0.21.9", "socket2 0.5.5", "thiserror", "tokio", @@ -3185,7 +3185,7 @@ dependencies = [ "libp2p-yamux", "rcgen", "ring 0.16.20", - "rustls 0.21.8", + "rustls 0.21.9", "rustls-webpki", "thiserror", "tokio", @@ -4409,7 +4409,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.8", + "rustls 0.21.9", "thiserror", "tokio", "tracing", @@ -4425,7 +4425,7 @@ dependencies = [ "rand 0.8.5", "ring 0.16.20", "rustc-hash", - "rustls 0.21.8", + "rustls 0.21.9", "slab", "thiserror", "tinyvec", @@ -4952,9 +4952,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.5", @@ -6377,7 +6377,7 @@ dependencies = [ "ring 0.16.20", "rtcp", "rtp", - "rustls 0.21.8", + "rustls 0.21.9", "sdp", "serde", "serde_json", @@ -6438,7 +6438,7 @@ dependencies = [ "rand_core 0.6.4", "rcgen", "ring 0.16.20", - "rustls 0.21.8", + "rustls 0.21.9", "sec1", "serde", "sha1", diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 22b241c8d8e..c59c9b64f1c 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -20,7 +20,7 @@ libp2p-identity = { workspace = true } parking_lot = "0.12.0" quinn = { version = "0.10.2", default-features = false, features = ["tls-rustls", "futures-io"] } rand = "0.8.5" -rustls = { version = "0.21.8", default-features = false } +rustls = { version = "0.21.9", default-features = false } thiserror = "1.0.50" tokio = { version = "1.34.0", default-features = false, features = ["net", "rt", "time"], optional = true } tracing = "0.1.37" diff --git a/transports/tls/Cargo.toml b/transports/tls/Cargo.toml index 196251dcb76..5a1e4788245 100644 --- a/transports/tls/Cargo.toml +++ b/transports/tls/Cargo.toml @@ -22,7 +22,7 @@ yasna = "0.5.2" # Exposed dependencies. Breaking changes to these are breaking changes to us. [dependencies.rustls] -version = "0.21.8" +version = "0.21.9" default-features = false features = ["dangerous_configuration"] # Must enable this to allow for custom verification code. From 7387500b878efb73fb3083d2aefca5bc3f08202e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:04:41 +0000 Subject: [PATCH 07/48] deps: bump zeroize from 1.6.0 to 1.7.0 Pull-Request: #4904. --- Cargo.lock | 4 ++-- identity/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 49fd90990ef..b342fbb133b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6783,9 +6783,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 1d5a8f4ac54..49f90a1c680 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -27,7 +27,7 @@ serde = { version = "1", optional = true, features = ["derive"] } sha2 = { version = "0.10.8", optional = true } thiserror = { version = "1.0", optional = true } void = { version = "1.0", optional = true } -zeroize = { version = "1.6", optional = true } +zeroize = { version = "1.7", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = { version = "0.17.5", features = [ "alloc", "std"], default-features = false, optional = true } From 338e467927f8a0cba34eb24288f0be9b0972d05d Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 21 Nov 2023 00:46:06 +1100 Subject: [PATCH 08/48] docs: document new CHANGELOG-management strategy At the moment, we have to send a separate PR that removes the `- unreleased` suffix in `CHANGELOG.md` before we can make a release. This is annoying and unnecessarily delays being able to make releases for features that have been merged into master. As of #4620, we have CI checks that ensure we add a changelog entry when a particular crate is modified. Additionally, the CI check also ensures we bump the version in case the top-most one of the changelog is already released. The combination of these checks should ensure that we add a section for the new version in case the top-most one is already released. Previously, the presence of `- unreleased` would notify us of that. In the future, we can add a CI job that allows us to release at the press of a button (think `workflow_dispatch`). Until then, this setup should already make it much easier to release crates. Pull-Request: #4894. --- docs/release.md | 37 +++++++++++++------------------- protocols/gossipsub/CHANGELOG.md | 2 +- protocols/identify/CHANGELOG.md | 2 +- protocols/kad/CHANGELOG.md | 2 +- protocols/mdns/CHANGELOG.md | 2 +- scripts/add-changelog-header.sh | 2 +- 6 files changed, 20 insertions(+), 27 deletions(-) diff --git a/docs/release.md b/docs/release.md index 8edc621c127..50b7b0605c7 100644 --- a/docs/release.md +++ b/docs/release.md @@ -17,43 +17,34 @@ Non-breaking changes are typically merged very quickly and often released as pat Every crate that we publish on `crates.io` has a `CHANGELOG.md` file. Substantial PRs should add an entry to each crate they modify. -The next unreleased version is tagged with ` - unreleased`, for example: `0.17.0 - unreleased`. +We have a CI check[^1] that enforces adding a changelog entry if you modify code in a particular crate. +In case the current version is already released (we also check that in CI), you'll have to add a new header at the top. +For example, the top-listed version might be `0.17.3` but it is already released. +In that case, add a new heading `## 0.17.4` with your changelog entry in case it is a non-breaking change. -In case there isn't a version with an ` - unreleased` postfix yet, add one for the next version. -The next version number depends on the impact of your change (breaking vs non-breaking, see above). - -If you are making a non-breaking change, please also bump the version number: - -- in the `Cargo.toml` manifest of the respective crate -- in the `[workspace.dependencies]` section of the workspace `Cargo.toml` manifest - -For breaking changes, a changelog entry itself is sufficient. -Bumping the version in the `Cargo.toml` file would lead to many merge conflicts once we decide to merge them. -Hence, we are going to bump those versions once we work through the milestone that collects the breaking changes. +The version in the crate's `Cargo.toml` and the top-most version in the `CHANGELOG.md` file always have to be in sync. +Additionally, we also enforce that all crates always depend on the latest version of other workspace-crates through workspace inheritance. +As a consequence, you'll also have to bump the version in `[workspace.dependencies]` in the workspace `Cargo.toml` manifest. ## Releasing one or more crates +The above changelog-management strategy means `master` is always in a state where we can make a release. + ### Prerequisites - [cargo release](https://github.com/crate-ci/cargo-release/) ### Steps -1. Remove the ` - unreleased` tag for each crate to be released in the respective `CHANGELOG.md`. - Create a pull request with the changes against the rust-libp2p `master` branch. - -2. Once merged, run the two commands below on the (squash-) merged commit on the `master` branch. +1. Run the two commands below on the (squash-) merged commit on the `master` branch. 1. `cargo release publish --execute` 2. `cargo release tag --sign-tag --execute` -3. Confirm that `cargo release` tagged the commit correctly via `git push - $YOUR_ORIGIN --tag --dry-run` and then push the new tags via `git push - $YOUR_ORIGIN --tag`. Make sure not to push unrelated git tags. - - Note that dropping the `--no-push` flag on `cargo release` might as well do - the trick. +2. Confirm that `cargo release` tagged the commit correctly via `git push $YOUR_ORIGIN --tag --dry-run` + Push the new tags via `git push $YOUR_ORIGIN --tag`. + Make sure not to push unrelated git tags. ## Patch release @@ -77,3 +68,5 @@ To avoid accidental breaking changes for our users, we employ the following conv Example: `0.1.0-alpha` to `0.2.0-alpha`. - For a non-breaking change in a crate with an alpha release, bump or append number to the "alpha" tag. Example: `0.1.0-alpha` to `0.1.0-alpha.1`. + +[^1]: See [ci.yml](../.github/workflows/ci.yml) and look for "Ensure manifest and CHANGELOG are properly updated". diff --git a/protocols/gossipsub/CHANGELOG.md b/protocols/gossipsub/CHANGELOG.md index 68041f2dcd3..5ff4cfa27d6 100644 --- a/protocols/gossipsub/CHANGELOG.md +++ b/protocols/gossipsub/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.46.1 - unreleased +## 0.46.1 - Deprecate `Rpc` in preparation for removing it from the public API because it is an internal type. See [PR 4833](https://github.com/libp2p/rust-libp2p/pull/4833). diff --git a/protocols/identify/CHANGELOG.md b/protocols/identify/CHANGELOG.md index d2f67bd908c..22c74b28cae 100644 --- a/protocols/identify/CHANGELOG.md +++ b/protocols/identify/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.44.1 - unreleased +## 0.44.1 - Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index d59ea8acb17..4740e4b1f95 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.45.2 - unreleased +## 0.45.2 - Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index c6a9359c23b..6f5aa6945fd 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.45.1 - unreleased +## 0.45.1 - Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). diff --git a/scripts/add-changelog-header.sh b/scripts/add-changelog-header.sh index 8050857c52e..4717940c8d7 100755 --- a/scripts/add-changelog-header.sh +++ b/scripts/add-changelog-header.sh @@ -7,4 +7,4 @@ if [[ $header == $prefix* ]]; then exit fi -sed -i "1i ## ${NEW_VERSION} - unreleased\n\n" "$CRATE_ROOT/CHANGELOG.md" +sed -i "1i ## ${NEW_VERSION}\n\n" "$CRATE_ROOT/CHANGELOG.md" From 594ecaaeec159f435cb8251aaa5986ddfbb4e552 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 22:15:37 +0000 Subject: [PATCH 09/48] deps: bump ed25519-dalek from 2.0.0 to 2.1.0 Pull-Request: #4903. --- Cargo.lock | 5 +++-- identity/Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b342fbb133b..25319bcf968 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1320,15 +1320,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.8", + "subtle", "zeroize", ] diff --git a/identity/Cargo.toml b/identity/Cargo.toml index 49f90a1c680..f70db830d3f 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -14,7 +14,7 @@ categories = ["cryptography"] [dependencies] asn1_der = { version = "0.7.6", optional = true } bs58 = { version = "0.5.0", optional = true } -ed25519-dalek = { version = "2.0", optional = true } +ed25519-dalek = { version = "2.1", optional = true } hkdf = { version = "0.12.3", optional = true } libsecp256k1 = { version = "0.7.0", optional = true } tracing = "0.1.37" From 5a4a462899ac937cb1a0dc84ef29333a7ad1b047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Daniel=20Hern=C3=A1ndez?= Date: Mon, 20 Nov 2023 18:05:58 -0600 Subject: [PATCH 10/48] feat(websocket-websys): add support for different WASM environments We introduce a `WebContext` `enum` that abstracts and detects the `Window` vs the `WorkerGlobalScope` API. Related: https://github.com/rustwasm/wasm-bindgen/issues/1046. Pull-Request: #4889. --- Cargo.lock | 2 +- Cargo.toml | 2 +- transports/websocket-websys/CHANGELOG.md | 6 ++ transports/websocket-websys/Cargo.toml | 4 +- transports/websocket-websys/src/lib.rs | 16 ++++-- .../websocket-websys/src/web_context.rs | 57 +++++++++++++++++++ 6 files changed, 77 insertions(+), 10 deletions(-) create mode 100644 transports/websocket-websys/src/web_context.rs diff --git a/Cargo.lock b/Cargo.lock index 25319bcf968..4a7655a3004 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3321,7 +3321,7 @@ dependencies = [ [[package]] name = "libp2p-websocket-websys" -version = "0.3.0" +version = "0.3.1" dependencies = [ "bytes", "futures", diff --git a/Cargo.toml b/Cargo.toml index 9417876ae01..0603a22629b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,7 +110,7 @@ libp2p-webrtc = { version = "0.6.1-alpha", path = "transports/webrtc" } libp2p-webrtc-utils = { version = "0.1.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.2.0-alpha", path = "transports/webrtc-websys" } libp2p-websocket = { version = "0.43.0", path = "transports/websocket" } -libp2p-websocket-websys = { version = "0.3.0", path = "transports/websocket-websys" } +libp2p-websocket-websys = { version = "0.3.1", path = "transports/websocket-websys" } libp2p-webtransport-websys = { version = "0.2.0", path = "transports/webtransport-websys" } libp2p-yamux = { version = "0.45.0", path = "muxers/yamux" } multiaddr = "0.18.1" diff --git a/transports/websocket-websys/CHANGELOG.md b/transports/websocket-websys/CHANGELOG.md index 17c253cb80a..3cfb1b2fbf9 100644 --- a/transports/websocket-websys/CHANGELOG.md +++ b/transports/websocket-websys/CHANGELOG.md @@ -1,3 +1,9 @@ +## 0.3.1 + +- Add support for different WASM environments by introducing a `WebContext` that + detects and abstracts the `Window` vs the `WorkerGlobalScope` API. + See [PR 4889](https://github.com/libp2p/rust-libp2p/pull/4889). + ## 0.3.0 diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 6a6cef5f8bc..ffa2892e838 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-websocket-websys" edition = "2021" rust-version = "1.60.0" description = "WebSocket for libp2p under WASM environment" -version = "0.3.0" +version = "0.3.1" authors = ["Vince Vasta "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -20,7 +20,7 @@ parking_lot = "0.12.1" send_wrapper = "0.6.0" thiserror = "1.0.50" wasm-bindgen = "0.2.88" -web-sys = { version = "0.3.65", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window"] } +web-sys = { version = "0.3.65", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window", "WorkerGlobalScope"] } # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling diff --git a/transports/websocket-websys/src/lib.rs b/transports/websocket-websys/src/lib.rs index b4f7566f95e..5c1a6ebf1c4 100644 --- a/transports/websocket-websys/src/lib.rs +++ b/transports/websocket-websys/src/lib.rs @@ -20,6 +20,8 @@ //! Libp2p websocket transports built on [web-sys](https://rustwasm.github.io/wasm-bindgen/web-sys/index.html). +mod web_context; + use bytes::BytesMut; use futures::task::AtomicWaker; use futures::{future::Ready, io, prelude::*}; @@ -35,7 +37,9 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Mutex; use std::{pin::Pin, task::Context, task::Poll}; use wasm_bindgen::{prelude::*, JsCast}; -use web_sys::{window, CloseEvent, Event, MessageEvent, WebSocket}; +use web_sys::{CloseEvent, Event, MessageEvent, WebSocket}; + +use crate::web_context::WebContext; /// A Websocket transport that can be used in a wasm environment. /// @@ -300,8 +304,8 @@ impl Connection { } } }); - let buffered_amount_low_interval = window() - .expect("to have a window") + let buffered_amount_low_interval = WebContext::new() + .expect("to have a window or worker context") .set_interval_with_callback_and_timeout_and_arguments( on_buffered_amount_low_closure.as_ref().unchecked_ref(), 100, // Chosen arbitrarily and likely worth tuning. Due to low impact of the /ws transport, no further effort was invested at the time. @@ -439,8 +443,8 @@ impl Drop for Connection { .close_with_code_and_reason(GO_AWAY_STATUS_CODE, "connection dropped"); } - window() - .expect("to have a window") - .clear_interval_with_handle(self.inner.buffered_amount_low_interval) + WebContext::new() + .expect("to have a window or worker context") + .clear_interval_with_handle(self.inner.buffered_amount_low_interval); } } diff --git a/transports/websocket-websys/src/web_context.rs b/transports/websocket-websys/src/web_context.rs new file mode 100644 index 00000000000..c514435d2bb --- /dev/null +++ b/transports/websocket-websys/src/web_context.rs @@ -0,0 +1,57 @@ +use wasm_bindgen::{prelude::*, JsCast}; +use web_sys::window; + +/// Web context that abstract the window vs web worker API +#[derive(Debug)] +pub(crate) enum WebContext { + Window(web_sys::Window), + Worker(web_sys::WorkerGlobalScope), +} + +impl WebContext { + pub(crate) fn new() -> Option { + match window() { + Some(window) => Some(Self::Window(window)), + None => { + #[wasm_bindgen] + extern "C" { + type Global; + + #[wasm_bindgen(method, getter, js_name = WorkerGlobalScope)] + fn worker(this: &Global) -> JsValue; + } + let global: Global = js_sys::global().unchecked_into(); + if !global.worker().is_undefined() { + Some(Self::Worker(global.unchecked_into())) + } else { + None + } + } + } + } + + /// The `setInterval()` method. + pub(crate) fn set_interval_with_callback_and_timeout_and_arguments( + &self, + handler: &::js_sys::Function, + timeout: i32, + arguments: &::js_sys::Array, + ) -> Result { + match self { + WebContext::Window(w) => { + w.set_interval_with_callback_and_timeout_and_arguments(handler, timeout, arguments) + } + WebContext::Worker(w) => { + w.set_interval_with_callback_and_timeout_and_arguments(handler, timeout, arguments) + } + } + } + + /// The `clearInterval()` method. + pub(crate) fn clear_interval_with_handle(&self, handle: i32) { + match self { + WebContext::Window(w) => w.clear_interval_with_handle(handle), + WebContext::Worker(w) => w.clear_interval_with_handle(handle), + } + } +} From 3e30c2051c4b09642914929bbf471aceb37230eb Mon Sep 17 00:00:00 2001 From: Michael Assaf <94772640+snowmead@users.noreply.github.com> Date: Mon, 20 Nov 2023 19:16:15 -0500 Subject: [PATCH 11/48] fix: set `idle_connection_timeout` in examples Within examples, we often set up a very specific network configuration which may leave connections idle whilst we wait for user input. To ensure that the examples can still showcase something, we need to set an `idle_connection_timeout` on the `Swarm`. Related: #4877. Pull-Request: #4887. --- examples/autonat/src/bin/autonat_client.rs | 1 + examples/autonat/src/bin/autonat_server.rs | 2 ++ examples/dcutr/src/main.rs | 3 ++- examples/distributed-key-value-store/src/main.rs | 2 ++ examples/file-sharing/src/network.rs | 2 ++ examples/identify/src/main.rs | 3 ++- examples/ipfs-private/src/main.rs | 3 ++- 7 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/autonat/src/bin/autonat_client.rs b/examples/autonat/src/bin/autonat_client.rs index b071e717731..3fb25aa6222 100644 --- a/examples/autonat/src/bin/autonat_client.rs +++ b/examples/autonat/src/bin/autonat_client.rs @@ -60,6 +60,7 @@ async fn main() -> Result<(), Box> { yamux::Config::default, )? .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm.listen_on( diff --git a/examples/autonat/src/bin/autonat_server.rs b/examples/autonat/src/bin/autonat_server.rs index d1c0c005861..44a53f0d17f 100644 --- a/examples/autonat/src/bin/autonat_server.rs +++ b/examples/autonat/src/bin/autonat_server.rs @@ -27,6 +27,7 @@ use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; use libp2p::{autonat, identify, identity, noise, tcp, yamux}; use std::error::Error; use std::net::Ipv4Addr; +use std::time::Duration; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] @@ -52,6 +53,7 @@ async fn main() -> Result<(), Box> { yamux::Config::default, )? .with_behaviour(|key| Behaviour::new(key.public()))? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm.listen_on( diff --git a/examples/dcutr/src/main.rs b/examples/dcutr/src/main.rs index 91beaa02c67..51df670f8a7 100644 --- a/examples/dcutr/src/main.rs +++ b/examples/dcutr/src/main.rs @@ -28,8 +28,8 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, PeerId, }; -use std::error::Error; use std::str::FromStr; +use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[derive(Debug, Parser)] @@ -105,6 +105,7 @@ async fn main() -> Result<(), Box> { )), dcutr: dcutr::Behaviour::new(keypair.public().to_peer_id()), })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm diff --git a/examples/distributed-key-value-store/src/main.rs b/examples/distributed-key-value-store/src/main.rs index 1843520838b..404333f3d20 100644 --- a/examples/distributed-key-value-store/src/main.rs +++ b/examples/distributed-key-value-store/src/main.rs @@ -31,6 +31,7 @@ use libp2p::{ tcp, yamux, }; use std::error::Error; +use std::time::Duration; use tracing_subscriber::EnvFilter; #[async_std::main] @@ -65,6 +66,7 @@ async fn main() -> Result<(), Box> { )?, }) })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server)); diff --git a/examples/file-sharing/src/network.rs b/examples/file-sharing/src/network.rs index ad5418193a4..59625fc39ea 100644 --- a/examples/file-sharing/src/network.rs +++ b/examples/file-sharing/src/network.rs @@ -15,6 +15,7 @@ use libp2p::StreamProtocol; use serde::{Deserialize, Serialize}; use std::collections::{hash_map, HashMap, HashSet}; use std::error::Error; +use std::time::Duration; /// Creates the network components, namely: /// @@ -58,6 +59,7 @@ pub(crate) async fn new( request_response::Config::default(), ), })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); swarm diff --git a/examples/identify/src/main.rs b/examples/identify/src/main.rs index 3c40addbcf8..916317a5a43 100644 --- a/examples/identify/src/main.rs +++ b/examples/identify/src/main.rs @@ -22,7 +22,7 @@ use futures::StreamExt; use libp2p::{core::multiaddr::Multiaddr, identify, noise, swarm::SwarmEvent, tcp, yamux}; -use std::error::Error; +use std::{error::Error, time::Duration}; use tracing_subscriber::EnvFilter; #[async_std::main] @@ -44,6 +44,7 @@ async fn main() -> Result<(), Box> { key.public(), )) })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); // Tell the swarm to listen on all interfaces and a random, OS-assigned diff --git a/examples/ipfs-private/src/main.rs b/examples/ipfs-private/src/main.rs index 12bd985cdf0..a57bfd465e0 100644 --- a/examples/ipfs-private/src/main.rs +++ b/examples/ipfs-private/src/main.rs @@ -31,7 +31,7 @@ use libp2p::{ swarm::{NetworkBehaviour, SwarmEvent}, tcp, yamux, Multiaddr, Transport, }; -use std::{env, error::Error, fs, path::Path, str::FromStr}; +use std::{env, error::Error, fs, path::Path, str::FromStr, time::Duration}; use tokio::{io, io::AsyncBufReadExt, select}; use tracing_subscriber::EnvFilter; @@ -151,6 +151,7 @@ async fn main() -> Result<(), Box> { ping: ping::Behaviour::new(ping::Config::new()), }) })? + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) .build(); println!("Subscribing to {gossipsub_topic:?}"); From bb2b79824d1fd9e4f9cf8395ace810d032363397 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 22 Nov 2023 02:53:21 +0000 Subject: [PATCH 12/48] refactor(gossipsub): send more specific messages to `ConnectionHandler` Previously, the `NetworkBehaviour` constructed a `proto::RPC` type and sent that into an unbounded queue to the `ConnectionHandler`. With such a broad type, the `ConnectionHandler` cannot do any prioritization on which messages to drop if a connection slows down. To enable a more fine-granular handling of messages, we make the interface between `NetworkBehaviour` and `ConnectionHandler` more specific by introducing an `RpcOut` type that differentiates between `Publish`, `Forward`, `Subscribe`, etc. Related: #4667. Pull-Request: #4811. --- protocols/gossipsub/src/behaviour.rs | 685 +++++---------------- protocols/gossipsub/src/behaviour/tests.rs | 233 +++---- protocols/gossipsub/src/handler.rs | 7 +- protocols/gossipsub/src/protocol.rs | 4 +- protocols/gossipsub/src/types.rs | 137 +++++ 5 files changed, 374 insertions(+), 692 deletions(-) diff --git a/protocols/gossipsub/src/behaviour.rs b/protocols/gossipsub/src/behaviour.rs index 0780ca657db..24a32de4cc7 100644 --- a/protocols/gossipsub/src/behaviour.rs +++ b/protocols/gossipsub/src/behaviour.rs @@ -61,7 +61,7 @@ use crate::types::{ ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, SubscriptionAction, }; -use crate::types::{PeerConnections, PeerKind, Rpc}; +use crate::types::{PeerConnections, PeerKind, RpcOut}; use crate::{rpc_proto::proto, TopicScoreParams}; use crate::{PublishError, SubscriptionError, ValidationError}; use instant::SystemTime; @@ -534,23 +534,10 @@ where } // send subscription request to all peers - let peer_list = self.peer_topics.keys().cloned().collect::>(); - if !peer_list.is_empty() { - let event = Rpc { - messages: Vec::new(), - subscriptions: vec![Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Subscribe, - }], - control_msgs: Vec::new(), - } - .into_protobuf(); - - for peer in peer_list { - tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); - self.send_message(peer, event.clone()) - .map_err(SubscriptionError::PublishError)?; - } + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); + let event = RpcOut::Subscribe(topic_hash.clone()); + self.send_message(peer, event); } // call JOIN(topic) @@ -574,22 +561,10 @@ where } // announce to all peers - let peer_list = self.peer_topics.keys().cloned().collect::>(); - if !peer_list.is_empty() { - let event = Rpc { - messages: Vec::new(), - subscriptions: vec![Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Unsubscribe, - }], - control_msgs: Vec::new(), - } - .into_protobuf(); - - for peer in peer_list { - tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); - self.send_message(peer, event.clone())?; - } + for peer in self.peer_topics.keys().copied().collect::>() { + tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); + let event = RpcOut::Unsubscribe(topic_hash.clone()); + self.send_message(peer, event); } // call LEAVE(topic) @@ -624,15 +599,8 @@ where topic: raw_message.topic.clone(), }); - let event = Rpc { - subscriptions: Vec::new(), - messages: vec![raw_message.clone()], - control_msgs: Vec::new(), - } - .into_protobuf(); - // check that the size doesn't exceed the max transmission size - if event.get_size() > self.config.max_transmit_size() { + if raw_message.raw_protobuf_len() > self.config.max_transmit_size() { return Err(PublishError::MessageTooLarge); } @@ -651,23 +619,60 @@ where let topic_hash = raw_message.topic.clone(); - // If we are not flood publishing forward the message to mesh peers. - let mesh_peers_sent = !self.config.flood_publish() - && self.forward_msg(&msg_id, raw_message.clone(), None, HashSet::new())?; - let mut recipient_peers = HashSet::new(); if let Some(set) = self.topic_peers.get(&topic_hash) { if self.config.flood_publish() { // Forward to all peers above score and all explicit peers - recipient_peers.extend( - set.iter() - .filter(|p| { - self.explicit_peers.contains(*p) - || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 - }) - .cloned(), - ); + recipient_peers.extend(set.iter().filter(|p| { + self.explicit_peers.contains(*p) + || !self.score_below_threshold(p, |ts| ts.publish_threshold).0 + })); } else { + match self.mesh.get(&raw_message.topic) { + // Mesh peers + Some(mesh_peers) => { + recipient_peers.extend(mesh_peers); + } + // Gossipsub peers + None => { + tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); + // If we have fanout peers add them to the map. + if self.fanout.contains_key(&topic_hash) { + for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { + recipient_peers.insert(*peer); + } + } else { + // We have no fanout peers, select mesh_n of them and add them to the fanout + let mesh_n = self.config.mesh_n(); + let new_peers = get_random_peers( + &self.topic_peers, + &self.connected_peers, + &topic_hash, + mesh_n, + { + |p| { + !self.explicit_peers.contains(p) + && !self + .score_below_threshold(p, |pst| { + pst.publish_threshold + }) + .0 + } + }, + ); + // Add the new peers to the fanout and recipient peers + self.fanout.insert(topic_hash.clone(), new_peers.clone()); + for peer in new_peers { + tracing::debug!(%peer, "Peer added to fanout"); + recipient_peers.insert(peer); + } + } + // We are publishing to fanout peers - update the time we published + self.fanout_last_pub + .insert(topic_hash.clone(), Instant::now()); + } + } + // Explicit peers for peer in &self.explicit_peers { if set.contains(peer) { @@ -685,54 +690,17 @@ where recipient_peers.insert(*peer); } } - - // Gossipsub peers - if self.mesh.get(&topic_hash).is_none() { - tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); - // If we have fanout peers add them to the map. - if self.fanout.contains_key(&topic_hash) { - for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { - recipient_peers.insert(*peer); - } - } else { - // We have no fanout peers, select mesh_n of them and add them to the fanout - let mesh_n = self.config.mesh_n(); - let new_peers = get_random_peers( - &self.topic_peers, - &self.connected_peers, - &topic_hash, - mesh_n, - { - |p| { - !self.explicit_peers.contains(p) - && !self - .score_below_threshold(p, |pst| pst.publish_threshold) - .0 - } - }, - ); - // Add the new peers to the fanout and recipient peers - self.fanout.insert(topic_hash.clone(), new_peers.clone()); - for peer in new_peers { - tracing::debug!(%peer, "Peer added to fanout"); - recipient_peers.insert(peer); - } - } - // We are publishing to fanout peers - update the time we published - self.fanout_last_pub - .insert(topic_hash.clone(), Instant::now()); - } } } - if recipient_peers.is_empty() && !mesh_peers_sent { + if recipient_peers.is_empty() { return Err(PublishError::InsufficientPeers); } // If the message isn't a duplicate and we have sent it to some peers add it to the // duplicate cache and memcache. self.duplicate_cache.insert(msg_id.clone()); - self.mcache.put(&msg_id, raw_message); + self.mcache.put(&msg_id, raw_message.clone()); // If the message is anonymous or has a random author add it to the published message ids // cache. @@ -743,14 +711,9 @@ where } // Send to peers we know are subscribed to the topic. - let msg_bytes = event.get_size(); for peer_id in recipient_peers.iter() { tracing::trace!(peer=%peer_id, "Sending message to peer"); - self.send_message(*peer_id, event.clone())?; - - if let Some(m) = self.metrics.as_mut() { - m.msg_sent(&topic_hash, msg_bytes); - } + self.send_message(*peer_id, RpcOut::Publish(raw_message.clone())); } tracing::debug!(message=%msg_id, "Published message"); @@ -979,7 +942,7 @@ where "JOIN: Adding {:?} peers from the fanout for topic", add_peers ); - added_peers.extend(peers.iter().cloned().take(add_peers)); + added_peers.extend(peers.iter().take(add_peers)); self.mesh.insert( topic_hash.clone(), @@ -1331,13 +1294,15 @@ where } tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); - // build a hashmap of available messages - let mut cached_messages = HashMap::new(); for id in iwant_msgs { - // If we have it and the IHAVE count is not above the threshold, add it do the - // cached_messages mapping - if let Some((msg, count)) = self.mcache.get_with_iwant_counts(&id, peer_id) { + // If we have it and the IHAVE count is not above the threshold, + // foward the message. + if let Some((msg, count)) = self + .mcache + .get_with_iwant_counts(&id, peer_id) + .map(|(msg, count)| (msg.clone(), count)) + { if count > self.config.gossip_retransimission() { tracing::debug!( peer=%peer_id, @@ -1345,36 +1310,8 @@ where "IWANT: Peer has asked for message too many times; ignoring request" ); } else { - cached_messages.insert(id.clone(), msg.clone()); - } - } - } - - if !cached_messages.is_empty() { - tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); - // Send the messages to the peer - let message_list: Vec<_> = cached_messages.into_iter().map(|entry| entry.1).collect(); - - let topics = message_list - .iter() - .map(|message| message.topic.clone()) - .collect::>(); - - let message = Rpc { - subscriptions: Vec::new(), - messages: message_list, - control_msgs: Vec::new(), - } - .into_protobuf(); - - let msg_bytes = message.get_size(); - - if self.send_message(*peer_id, message).is_err() { - tracing::error!("Failed to send cached messages. Messages too large"); - } else if let Some(m) = self.metrics.as_mut() { - // Sending of messages succeeded, register them on the internal metrics. - for topic in topics.iter() { - m.msg_sent(topic, msg_bytes); + tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); + self.send_message(*peer_id, RpcOut::Forward(msg)); } } } @@ -1527,27 +1464,18 @@ where if !to_prune_topics.is_empty() { // build the prune messages to send let on_unsubscribe = false; - let prune_messages = to_prune_topics + for action in to_prune_topics .iter() .map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) - .collect(); + .collect::>() + { + self.send_message(*peer_id, RpcOut::Control(action)); + } // Send the prune messages to the peer tracing::debug!( peer=%peer_id, "GRAFT: Not subscribed to topics - Sending PRUNE to peer" ); - - if let Err(e) = self.send_message( - *peer_id, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: prune_messages, - } - .into_protobuf(), - ) { - tracing::error!("Failed to send PRUNE: {:?}", e); - } } tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); } @@ -2036,23 +1964,12 @@ where // If we need to send grafts to peer, do so immediately, rather than waiting for the // heartbeat. - if !topics_to_graft.is_empty() - && self - .send_message( - *propagation_source, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: topics_to_graft - .into_iter() - .map(|topic_hash| ControlAction::Graft { topic_hash }) - .collect(), - } - .into_protobuf(), - ) - .is_err() + for action in topics_to_graft + .into_iter() + .map(|topic_hash| ControlAction::Graft { topic_hash }) + .collect::>() { - tracing::error!("Failed sending grafts. Message too large"); + self.send_message(*propagation_source, RpcOut::Control(action)) } // Notify the application of the subscriptions @@ -2204,7 +2121,7 @@ where // shuffle the peers and then sort by score ascending beginning with the worst let mut rng = thread_rng(); - let mut shuffled = peers.iter().cloned().collect::>(); + let mut shuffled = peers.iter().copied().collect::>(); shuffled.shuffle(&mut rng); shuffled.sort_by(|p1, p2| { let score_p1 = *scores.get(p1).unwrap_or(&0.0); @@ -2578,12 +2495,9 @@ where &self.connected_peers, ); } - let mut control_msgs: Vec = topics - .iter() - .map(|topic_hash| ControlAction::Graft { - topic_hash: topic_hash.clone(), - }) - .collect(); + let control_msgs = topics.iter().map(|topic_hash| ControlAction::Graft { + topic_hash: topic_hash.clone(), + }); // If there are prunes associated with the same peer add them. // NOTE: In this case a peer has been added to a topic mesh, and removed from another. @@ -2591,52 +2505,37 @@ where // of its removal from another. // The following prunes are not due to unsubscribing. - let on_unsubscribe = false; - if let Some(topics) = to_prune.remove(&peer) { - let mut prunes = topics - .iter() - .map(|topic_hash| { - self.make_prune( - topic_hash, - &peer, - self.config.do_px() && !no_px.contains(&peer), - on_unsubscribe, - ) - }) - .collect::>(); - control_msgs.append(&mut prunes); - } + let prunes = to_prune + .remove(&peer) + .into_iter() + .flatten() + .map(|topic_hash| { + self.make_prune( + &topic_hash, + &peer, + self.config.do_px() && !no_px.contains(&peer), + false, + ) + }); // send the control messages - if self - .send_message( - peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs, - } - .into_protobuf(), - ) - .is_err() - { - tracing::error!("Failed to send control messages. Message too large"); + for msg in control_msgs.chain(prunes).collect::>() { + self.send_message(peer, RpcOut::Control(msg)); } } // handle the remaining prunes // The following prunes are not due to unsubscribing. - let on_unsubscribe = false; for (peer, topics) in to_prune.iter() { - let mut remaining_prunes = Vec::new(); for topic_hash in topics { let prune = self.make_prune( topic_hash, peer, self.config.do_px() && !no_px.contains(peer), - on_unsubscribe, + false, ); - remaining_prunes.push(prune); + self.send_message(*peer, RpcOut::Control(prune)); + // inform the handler peer_removed_from_mesh( *peer, @@ -2647,21 +2546,6 @@ where &self.connected_peers, ); } - - if self - .send_message( - *peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: remaining_prunes, - } - .into_protobuf(), - ) - .is_err() - { - tracing::error!("Failed to send prune messages. Message too large"); - } } } @@ -2718,20 +2602,11 @@ where // forward the message to peers if !recipient_peers.is_empty() { - let event = Rpc { - subscriptions: Vec::new(), - messages: vec![message.clone()], - control_msgs: Vec::new(), - } - .into_protobuf(); + let event = RpcOut::Forward(message.clone()); - let msg_bytes = event.get_size(); for peer in recipient_peers.iter() { tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); - self.send_message(*peer, event.clone())?; - if let Some(m) = self.metrics.as_mut() { - m.msg_sent(&message.topic, msg_bytes); - } + self.send_message(*peer, event.clone()); } tracing::debug!("Completed forwarding message"); Ok(true) @@ -2844,19 +2719,8 @@ where /// Takes each control action mapping and turns it into a message fn flush_control_pool(&mut self) { for (peer, controls) in self.control_pool.drain().collect::>() { - if self - .send_message( - peer, - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: controls, - } - .into_protobuf(), - ) - .is_err() - { - tracing::error!("Failed to flush control pool. Message too large"); + for msg in controls { + self.send_message(peer, RpcOut::Control(msg)); } } @@ -2864,144 +2728,21 @@ where self.pending_iwant_msgs.clear(); } - /// Send a [`Rpc`] message to a peer. This will wrap the message in an arc if it + /// Send a [`RpcOut`] message to a peer. This will wrap the message in an arc if it /// is not already an arc. - fn send_message(&mut self, peer_id: PeerId, message: proto::RPC) -> Result<(), PublishError> { - // If the message is oversized, try and fragment it. If it cannot be fragmented, log an - // error and drop the message (all individual messages should be small enough to fit in the - // max_transmit_size) - - let messages = self.fragment_message(message)?; - - for message in messages { - self.events.push_back(ToSwarm::NotifyHandler { - peer_id, - event: HandlerIn::Message(message), - handler: NotifyHandler::Any, - }) - } - Ok(()) - } - - // If a message is too large to be sent as-is, this attempts to fragment it into smaller RPC - // messages to be sent. - fn fragment_message(&self, rpc: proto::RPC) -> Result, PublishError> { - if rpc.get_size() < self.config.max_transmit_size() { - return Ok(vec![rpc]); - } - - let new_rpc = proto::RPC { - subscriptions: Vec::new(), - publish: Vec::new(), - control: None, - }; - - let mut rpc_list = vec![new_rpc.clone()]; - - // Gets an RPC if the object size will fit, otherwise create a new RPC. The last element - // will be the RPC to add an object. - macro_rules! create_or_add_rpc { - ($object_size: ident ) => { - let list_index = rpc_list.len() - 1; // the list is never empty - - // create a new RPC if the new object plus 5% of its size (for length prefix - // buffers) exceeds the max transmit size. - if rpc_list[list_index].get_size() + (($object_size as f64) * 1.05) as usize - > self.config.max_transmit_size() - && rpc_list[list_index] != new_rpc - { - // create a new rpc and use this as the current - rpc_list.push(new_rpc.clone()); - } - }; - } - - macro_rules! add_item { - ($object: ident, $type: ident ) => { - let object_size = $object.get_size(); - - if object_size + 2 > self.config.max_transmit_size() { - // This should not be possible. All received and published messages have already - // been vetted to fit within the size. - tracing::error!("Individual message too large to fragment"); - return Err(PublishError::MessageTooLarge); - } - - create_or_add_rpc!(object_size); - rpc_list - .last_mut() - .expect("Must have at least one element") - .$type - .push($object.clone()); - }; - } - - // Add messages until the limit - for message in &rpc.publish { - add_item!(message, publish); - } - for subscription in &rpc.subscriptions { - add_item!(subscription, subscriptions); - } - - // handle the control messages. If all are within the max_transmit_size, send them without - // fragmenting, otherwise, fragment the control messages - let empty_control = proto::ControlMessage::default(); - if let Some(control) = rpc.control.as_ref() { - if control.get_size() + 2 > self.config.max_transmit_size() { - // fragment the RPC - for ihave in &control.ihave { - let len = ihave.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .ihave - .push(ihave.clone()); - } - for iwant in &control.iwant { - let len = iwant.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .iwant - .push(iwant.clone()); - } - for graft in &control.graft { - let len = graft.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .graft - .push(graft.clone()); - } - for prune in &control.prune { - let len = prune.get_size(); - create_or_add_rpc!(len); - rpc_list - .last_mut() - .expect("Always an element") - .control - .get_or_insert_with(|| empty_control.clone()) - .prune - .push(prune.clone()); - } - } else { - let len = control.get_size(); - create_or_add_rpc!(len); - rpc_list.last_mut().expect("Always an element").control = Some(control.clone()); + fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) { + if let Some(m) = self.metrics.as_mut() { + if let RpcOut::Publish(ref message) | RpcOut::Forward(ref message) = rpc { + // register bytes sent on the internal metrics. + m.msg_sent(&message.topic, message.raw_protobuf_len()); } } - Ok(rpc_list) + self.events.push_back(ToSwarm::NotifyHandler { + peer_id, + event: HandlerIn::Message(rpc), + handler: NotifyHandler::Any, + }); } fn on_connection_established( @@ -3050,46 +2791,27 @@ where .connections .push(connection_id); - if other_established == 0 { - // Ignore connections from blacklisted peers. - if self.blacklisted_peers.contains(&peer_id) { - tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); - } else { - tracing::debug!(peer=%peer_id, "New peer connected"); - // We need to send our subscriptions to the newly-connected node. - let mut subscriptions = vec![]; - for topic_hash in self.mesh.keys() { - subscriptions.push(Subscription { - topic_hash: topic_hash.clone(), - action: SubscriptionAction::Subscribe, - }); - } + if other_established > 0 { + return; // Not our first connection to this peer, hence nothing to do. + } - if !subscriptions.is_empty() { - // send our subscriptions to the peer - if self - .send_message( - peer_id, - Rpc { - messages: Vec::new(), - subscriptions, - control_msgs: Vec::new(), - } - .into_protobuf(), - ) - .is_err() - { - tracing::error!("Failed to send subscriptions, message too large"); - } - } - } + // Insert an empty set of the topics of this peer until known. + self.peer_topics.insert(peer_id, Default::default()); - // Insert an empty set of the topics of this peer until known. - self.peer_topics.insert(peer_id, Default::default()); + if let Some((peer_score, ..)) = &mut self.peer_score { + peer_score.add_peer(peer_id); + } - if let Some((peer_score, ..)) = &mut self.peer_score { - peer_score.add_peer(peer_id); - } + // Ignore connections from blacklisted peers. + if self.blacklisted_peers.contains(&peer_id) { + tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); + return; + } + + tracing::debug!(peer=%peer_id, "New peer connected"); + // We need to send our subscriptions to the newly-connected node. + for topic_hash in self.mesh.clone().into_keys() { + self.send_message(peer_id, RpcOut::Subscribe(topic_hash)); } } @@ -3552,7 +3274,7 @@ fn get_random_peers_dynamic( // if they exist, filter the peers by `f` Some(peer_list) => peer_list .iter() - .cloned() + .copied() .filter(|p| { f(p) && match connected_peers.get(p) { Some(connections) if connections.kind == PeerKind::Gossipsub => true, @@ -3659,17 +3381,8 @@ impl fmt::Debug for PublishConfig { mod local_test { use super::*; use crate::IdentTopic; - use asynchronous_codec::Encoder; use quickcheck::*; - fn empty_rpc() -> Rpc { - Rpc { - subscriptions: Vec::new(), - messages: Vec::new(), - control_msgs: Vec::new(), - } - } - fn test_message() -> RawMessage { RawMessage { source: Some(PeerId::random()), @@ -3682,13 +3395,6 @@ mod local_test { } } - fn test_subscription() -> Subscription { - Subscription { - action: SubscriptionAction::Subscribe, - topic_hash: IdentTopic::new("TestTopic").hash(), - } - } - fn test_control() -> ControlAction { ControlAction::IHave { topic_hash: IdentTopic::new("TestTopic").hash(), @@ -3696,117 +3402,16 @@ mod local_test { } } - impl Arbitrary for Rpc { + impl Arbitrary for RpcOut { fn arbitrary(g: &mut Gen) -> Self { - let mut rpc = empty_rpc(); - - for _ in 0..g.gen_range(0..10u8) { - rpc.subscriptions.push(test_subscription()); - } - for _ in 0..g.gen_range(0..10u8) { - rpc.messages.push(test_message()); - } - for _ in 0..g.gen_range(0..10u8) { - rpc.control_msgs.push(test_control()); - } - rpc - } - } - - #[test] - /// Tests RPC message fragmentation - fn test_message_fragmentation_deterministic() { - let max_transmit_size = 500; - let config = crate::config::ConfigBuilder::default() - .max_transmit_size(max_transmit_size) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - let gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, config).unwrap(); - - // Message under the limit should be fine. - let mut rpc = empty_rpc(); - rpc.messages.push(test_message()); - - let mut rpc_proto = rpc.clone().into_protobuf(); - let fragmented_messages = gs.fragment_message(rpc_proto.clone()).unwrap(); - assert_eq!( - fragmented_messages, - vec![rpc_proto.clone()], - "Messages under the limit shouldn't be fragmented" - ); - - // Messages over the limit should be split - - while rpc_proto.get_size() < max_transmit_size { - rpc.messages.push(test_message()); - rpc_proto = rpc.clone().into_protobuf(); - } - - let fragmented_messages = gs - .fragment_message(rpc_proto) - .expect("Should be able to fragment the messages"); - - assert!( - fragmented_messages.len() > 1, - "the message should be fragmented" - ); - - // all fragmented messages should be under the limit - for message in fragmented_messages { - assert!( - message.get_size() < max_transmit_size, - "all messages should be less than the transmission size" - ); - } - } - - #[test] - fn test_message_fragmentation() { - fn prop(rpc: Rpc) { - let max_transmit_size = 500; - let config = crate::config::ConfigBuilder::default() - .max_transmit_size(max_transmit_size) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap(); - let gs: Behaviour = Behaviour::new(MessageAuthenticity::RandomAuthor, config).unwrap(); - - let mut codec = - crate::protocol::GossipsubCodec::new(max_transmit_size, ValidationMode::Permissive); - - let rpc_proto = rpc.into_protobuf(); - let fragmented_messages = gs - .fragment_message(rpc_proto.clone()) - .expect("Messages must be valid"); - - if rpc_proto.get_size() < max_transmit_size { - assert_eq!( - fragmented_messages.len(), - 1, - "the message should not be fragmented" - ); - } else { - assert!( - fragmented_messages.len() > 1, - "the message should be fragmented" - ); - } - - // all fragmented messages should be under the limit - for message in fragmented_messages { - assert!( - message.get_size() < max_transmit_size, - "all messages should be less than the transmission size: list size {} max size{}", message.get_size(), max_transmit_size - ); - - // ensure they can all be encoded - let mut buf = bytes::BytesMut::with_capacity(message.get_size()); - codec.encode(message, &mut buf).unwrap() + match u8::arbitrary(g) % 5 { + 0 => RpcOut::Subscribe(IdentTopic::new("TestTopic").hash()), + 1 => RpcOut::Unsubscribe(IdentTopic::new("TestTopic").hash()), + 2 => RpcOut::Publish(test_message()), + 3 => RpcOut::Forward(test_message()), + 4 => RpcOut::Control(test_control()), + _ => panic!("outside range"), } } - QuickCheck::new() - .max_tests(100) - .quickcheck(prop as fn(_) -> _) } } diff --git a/protocols/gossipsub/src/behaviour/tests.rs b/protocols/gossipsub/src/behaviour/tests.rs index cf24ed8d8dc..570cdf43f90 100644 --- a/protocols/gossipsub/src/behaviour/tests.rs +++ b/protocols/gossipsub/src/behaviour/tests.rs @@ -24,7 +24,9 @@ use super::*; use crate::subscription_filter::WhitelistSubscriptionFilter; use crate::transform::{DataTransform, IdentityTransform}; use crate::ValidationError; -use crate::{config::Config, config::ConfigBuilder, IdentTopic as Topic, TopicScoreParams}; +use crate::{ + config::Config, config::ConfigBuilder, types::Rpc, IdentTopic as Topic, TopicScoreParams, +}; use async_std::net::Ipv4Addr; use byteorder::{BigEndian, ByteOrder}; use libp2p_core::{ConnectedPoint, Endpoint}; @@ -402,26 +404,19 @@ fn test_subscribe() { let subscriptions = gs .events .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { - ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), - .. - } => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; + .filter(|e| { + matches!( + e, + ToSwarm::NotifyHandler { + event: HandlerIn::Message(RpcOut::Subscribe(_)), + .. } - collected_subscriptions - } - _ => collected_subscriptions, - }); + ) + }) + .count(); // we sent a subscribe to all known peers - assert!( - subscriptions.len() == 20, - "Should send a subscription to all known peers" - ); + assert_eq!(subscriptions, 20); } #[test] @@ -470,26 +465,16 @@ fn test_unsubscribe() { let subscriptions = gs .events .iter() - .fold(vec![], |mut collected_subscriptions, e| match e { + .fold(0, |collected_subscriptions, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Subscribe(_)), .. - } => { - for s in &message.subscriptions { - if let Some(true) = s.subscribe { - collected_subscriptions.push(s.clone()) - }; - } - collected_subscriptions - } + } => collected_subscriptions + 1, _ => collected_subscriptions, }); // we sent a unsubscribe to all known peers, for two topics - assert!( - subscriptions.len() == 40, - "Should send an unsubscribe event to all known peers" - ); + assert_eq!(subscriptions, 40); // check we clean up internal structures for topic_hash in &topic_hashes { @@ -657,16 +642,13 @@ fn test_publish_without_flood_publishing() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Publish(message)), .. } => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } + collected_publish.push(message); collected_publish } _ => collected_publish, @@ -747,16 +729,13 @@ fn test_fanout() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref message), + event: HandlerIn::Message(RpcOut::Publish(message)), .. } => { - let event = proto_to_message(message); - for s in &event.messages { - collected_publish.push(s.clone()); - } + collected_publish.push(message); collected_publish } _ => collected_publish, @@ -798,37 +777,36 @@ fn test_inject_connected() { // check that our subscriptions are sent to each of the peers // collect all the SendEvents - let send_events: Vec<_> = gs + let subscriptions = gs .events - .iter() - .filter(|e| match e { + .into_iter() + .filter_map(|e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Subscribe(topic)), + peer_id, .. - } => !m.subscriptions.is_empty(), - _ => false, + } => Some((peer_id, topic)), + _ => None, }) - .collect(); + .fold( + HashMap::>::new(), + |mut subs, (peer, sub)| { + let mut peer_subs = subs.remove(&peer).unwrap_or_default(); + peer_subs.push(sub.into_string()); + subs.insert(peer, peer_subs); + subs + }, + ); // check that there are two subscriptions sent to each peer - for sevent in send_events.clone() { - if let ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), - .. - } = sevent - { - assert!( - m.subscriptions.len() == 2, - "There should be two subscriptions sent to each peer (1 for each topic)." - ); - }; + for peer_subs in subscriptions.values() { + assert!(peer_subs.contains(&String::from("topic1"))); + assert!(peer_subs.contains(&String::from("topic2"))); + assert_eq!(peer_subs.len(), 2); } // check that there are 20 send events created - assert!( - send_events.len() == 20, - "There should be a subscription event sent to each peer." - ); + assert_eq!(subscriptions.len(), 20); // should add the new peers to `peer_topics` with an empty vec as a gossipsub node for peer in peers { @@ -1041,21 +1019,18 @@ fn test_handle_iwant_msg_cached() { gs.handle_iwant(&peers[7], vec![msg_id.clone()]); // the messages we are sending - let sent_messages = gs - .events - .iter() - .fold(vec![], |mut collected_messages, e| match e { + let sent_messages = gs.events.into_iter().fold( + Vec::::new(), + |mut collected_messages, e| match e { ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push(c.clone()) - } + if let HandlerIn::Message(RpcOut::Forward(message)) = event { + collected_messages.push(message); } collected_messages } _ => collected_messages, - }); + }, + ); assert!( sent_messages @@ -1104,15 +1079,14 @@ fn test_handle_iwant_msg_cached_shifted() { // is the message is being sent? let message_exists = gs.events.iter().any(|e| match e { ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(message)), .. } => { - let event = proto_to_message(m); - event - .messages - .iter() - .map(|msg| gs.data_transform.inbound_transform(msg.clone()).unwrap()) - .any(|msg| gs.config.message_id(&msg) == msg_id) + gs.config.message_id( + &gs.data_transform + .inbound_transform(message.clone()) + .unwrap(), + ) == msg_id } _ => false, }); @@ -1343,22 +1317,15 @@ fn count_control_msgs( .sum::() + gs.events .iter() - .map(|e| match e { + .filter(|e| match e { ToSwarm::NotifyHandler { peer_id, - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Control(action)), .. - } => { - let event = proto_to_message(m); - event - .control_msgs - .iter() - .filter(|m| filter(peer_id, m)) - .count() - } - _ => 0, + } => filter(peer_id, action), + _ => false, }) - .sum::() + .count() } fn flush_events(gs: &mut Behaviour) { @@ -1567,17 +1534,10 @@ fn do_forward_messages_to_explicit_peers() { .filter(|e| match e { ToSwarm::NotifyHandler { peer_id, - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(m)), .. } => { - let event = proto_to_message(m); - peer_id == &peers[0] - && event - .messages - .iter() - .filter(|m| m.data == message.data) - .count() - > 0 + peer_id == &peers[0] && m.data == message.data } _ => false, }) @@ -2111,14 +2071,11 @@ fn test_flood_publish() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } collected_publish } @@ -2672,14 +2629,11 @@ fn test_iwant_msg_from_peer_below_gossip_threshold_gets_ignored() { // the messages we are sending let sent_messages = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_messages, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for c in &event.messages { - collected_messages.push((*peer_id, c.clone())) - } + if let HandlerIn::Message(RpcOut::Forward(message)) = event { + collected_messages.push((peer_id, message)); } collected_messages } @@ -2820,14 +2774,11 @@ fn test_do_not_publish_to_peer_below_publish_threshold() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push((peer_id, message)); } collected_publish } @@ -2877,14 +2828,11 @@ fn test_do_not_flood_publish_to_peer_below_publish_threshold() { // Collect all publish messages let publishes = gs .events - .iter() + .into_iter() .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { event, peer_id, .. } => { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push((*peer_id, s.clone())); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push((peer_id, message)); } collected_publish } @@ -4403,17 +4351,14 @@ fn test_ignore_too_many_iwants_from_same_peer_for_same_message() { assert_eq!( gs.events .iter() - .map(|e| match e { + .filter(|e| matches!( + e, ToSwarm::NotifyHandler { - event: HandlerIn::Message(ref m), + event: HandlerIn::Message(RpcOut::Forward(_)), .. - } => { - let event = proto_to_message(m); - event.messages.len() } - _ => 0, - }) - .sum::(), + )) + .count(), config.gossip_retransimission() as usize, "not more then gossip_retransmission many messages get sent back" ); @@ -4815,11 +4760,8 @@ fn test_publish_to_floodsub_peers_without_flood_publish() { .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { peer_id, event, .. } => { if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } } collected_publish @@ -4872,11 +4814,8 @@ fn test_do_not_use_floodsub_in_fanout() { .fold(vec![], |mut collected_publish, e| match e { ToSwarm::NotifyHandler { peer_id, event, .. } => { if peer_id == &p1 || peer_id == &p2 { - if let HandlerIn::Message(ref m) = event { - let event = proto_to_message(m); - for s in &event.messages { - collected_publish.push(s.clone()); - } + if let HandlerIn::Message(RpcOut::Publish(message)) = event { + collected_publish.push(message); } } collected_publish @@ -5122,7 +5061,7 @@ fn test_subscribe_and_graft_with_negative_score() { p2, connection_id, HandlerEvent::Message { - rpc: proto_to_message(&message), + rpc: proto_to_message(&message.into_protobuf()), invalid_messages: vec![], }, ); diff --git a/protocols/gossipsub/src/handler.rs b/protocols/gossipsub/src/handler.rs index e2ec681321c..e91f81776e7 100644 --- a/protocols/gossipsub/src/handler.rs +++ b/protocols/gossipsub/src/handler.rs @@ -20,7 +20,7 @@ use crate::protocol::{GossipsubCodec, ProtocolConfig}; use crate::rpc_proto::proto; -use crate::types::{PeerKind, RawMessage, Rpc}; +use crate::types::{PeerKind, RawMessage, Rpc, RpcOut}; use crate::ValidationError; use asynchronous_codec::Framed; use futures::future::Either; @@ -58,10 +58,11 @@ pub enum HandlerEvent { } /// A message sent from the behaviour to the handler. +#[allow(clippy::large_enum_variant)] #[derive(Debug)] pub enum HandlerIn { /// A gossipsub message to send. - Message(proto::RPC), + Message(RpcOut), /// The peer has joined the mesh. JoinedMesh, /// The peer has left the mesh. @@ -408,7 +409,7 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, message: HandlerIn) { match self { Handler::Enabled(handler) => match message { - HandlerIn::Message(m) => handler.send_queue.push(m), + HandlerIn::Message(m) => handler.send_queue.push(m.into_protobuf()), HandlerIn::JoinedMesh => { handler.in_mesh = true; } diff --git a/protocols/gossipsub/src/protocol.rs b/protocols/gossipsub/src/protocol.rs index 42d43c97510..e9600a4d8d8 100644 --- a/protocols/gossipsub/src/protocol.rs +++ b/protocols/gossipsub/src/protocol.rs @@ -577,7 +577,7 @@ mod tests { let message = message.0; let rpc = Rpc { - messages: vec![message], + messages: vec![message.clone()], subscriptions: vec![], control_msgs: vec![], }; @@ -591,7 +591,7 @@ mod tests { HandlerEvent::Message { mut rpc, .. } => { rpc.messages[0].validated = true; - assert_eq!(rpc, rpc); + assert_eq!(vec![message], rpc.messages); } _ => panic!("Must decode a message"), } diff --git a/protocols/gossipsub/src/types.rs b/protocols/gossipsub/src/types.rs index 196468b8d32..d1b92ff0ba8 100644 --- a/protocols/gossipsub/src/types.rs +++ b/protocols/gossipsub/src/types.rs @@ -132,6 +132,19 @@ impl RawMessage { } } +impl From for proto::Message { + fn from(raw: RawMessage) -> Self { + proto::Message { + from: raw.source.map(|m| m.to_bytes()), + data: Some(raw.data), + seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()), + topic: TopicHash::into_string(raw.topic), + signature: raw.signature, + key: raw.key, + } + } +} + /// The message sent to the user after a [`RawMessage`] has been transformed by a /// [`crate::DataTransform`]. #[derive(Clone, PartialEq, Eq, Hash)] @@ -220,6 +233,130 @@ pub enum ControlAction { }, } +/// A Gossipsub RPC message sent. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum RpcOut { + /// Publish a Gossipsub message on network. + Publish(RawMessage), + /// Forward a Gossipsub message to the network. + Forward(RawMessage), + /// Subscribe a topic. + Subscribe(TopicHash), + /// Unsubscribe a topic. + Unsubscribe(TopicHash), + /// List of Gossipsub control messages. + Control(ControlAction), +} + +impl RpcOut { + /// Converts the GossipsubRPC into its protobuf format. + // A convenience function to avoid explicitly specifying types. + pub fn into_protobuf(self) -> proto::RPC { + self.into() + } +} + +impl From for proto::RPC { + /// Converts the RPC into protobuf format. + fn from(rpc: RpcOut) -> Self { + match rpc { + RpcOut::Publish(message) => proto::RPC { + subscriptions: Vec::new(), + publish: vec![message.into()], + control: None, + }, + RpcOut::Forward(message) => proto::RPC { + publish: vec![message.into()], + subscriptions: Vec::new(), + control: None, + }, + RpcOut::Subscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(true), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Unsubscribe(topic) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![proto::SubOpts { + subscribe: Some(false), + topic_id: Some(topic.into_string()), + }], + control: None, + }, + RpcOut::Control(ControlAction::IHave { + topic_hash, + message_ids, + }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![proto::ControlIHave { + topic_id: Some(topic_hash.into_string()), + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + iwant: vec![], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::IWant { message_ids }) => proto::RPC { + publish: Vec::new(), + subscriptions: Vec::new(), + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![proto::ControlIWant { + message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(), + }], + graft: vec![], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::Graft { topic_hash }) => proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![proto::ControlGraft { + topic_id: Some(topic_hash.into_string()), + }], + prune: vec![], + }), + }, + RpcOut::Control(ControlAction::Prune { + topic_hash, + peers, + backoff, + }) => { + proto::RPC { + publish: Vec::new(), + subscriptions: vec![], + control: Some(proto::ControlMessage { + ihave: vec![], + iwant: vec![], + graft: vec![], + prune: vec![proto::ControlPrune { + topic_id: Some(topic_hash.into_string()), + peers: peers + .into_iter() + .map(|info| proto::PeerInfo { + peer_id: info.peer_id.map(|id| id.to_bytes()), + // TODO, see https://github.com/libp2p/specs/pull/217 + signed_peer_record: None, + }) + .collect(), + backoff, + }], + }), + } + } + } + } +} + /// An RPC received/sent. #[derive(Clone, PartialEq, Eq, Hash)] pub struct Rpc { From c718835585379dfd0e72e4359695450ce5100ae9 Mon Sep 17 00:00:00 2001 From: stormshield-frb <144998884+stormshield-frb@users.noreply.github.com> Date: Wed, 22 Nov 2023 21:40:26 +0100 Subject: [PATCH 13/48] fix(mdns): don't suspend task forever upon reading non-mDNS packet Fixes: #4860. Pull-Request: #4861. --- protocols/mdns/CHANGELOG.md | 2 ++ protocols/mdns/src/behaviour/iface.rs | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/protocols/mdns/CHANGELOG.md b/protocols/mdns/CHANGELOG.md index 6f5aa6945fd..cfd02232b07 100644 --- a/protocols/mdns/CHANGELOG.md +++ b/protocols/mdns/CHANGELOG.md @@ -2,6 +2,8 @@ - Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. See [PR 4596](https://github.com/libp2p/rust-libp2p/pull/4596). +- Fix a bug in the `Behaviour::poll` method causing missed mdns packets. + See [PR 4861](https://github.com/libp2p/rust-libp2p/pull/4861). ## 0.45.0 diff --git a/protocols/mdns/src/behaviour/iface.rs b/protocols/mdns/src/behaviour/iface.rs index 7fe97c38381..9302065cde2 100644 --- a/protocols/mdns/src/behaviour/iface.rs +++ b/protocols/mdns/src/behaviour/iface.rs @@ -312,14 +312,18 @@ where } Poll::Ready(Err(err)) if err.kind() == std::io::ErrorKind::WouldBlock => { // No more bytes available on the socket to read + continue; } Poll::Ready(Err(err)) => { tracing::error!("failed reading datagram: {}", err); + return Poll::Ready(()); } Poll::Ready(Ok(Err(err))) => { tracing::debug!("Parsing mdns packet failed: {:?}", err); + continue; } - Poll::Ready(Ok(Ok(None))) | Poll::Pending => {} + Poll::Ready(Ok(Ok(None))) => continue, + Poll::Pending => {} } return Poll::Pending; From add1ff6e50b335f2c8cacdb9e0f4dba90ad22f3b Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 23 Nov 2023 10:36:09 +1100 Subject: [PATCH 14/48] refactor(kad): use `oneshot`s and `async-await` for outbound streams This refactoring addresses several aspects of the current handler implementation: - Remove the manual state machine for outbound streams in favor of using `async-await`. - Use `oneshot`s to track the number of requested outbound streams - Use `futures_bounded::FuturesMap` to track the execution of a stream, thus applying a timeout to the entire request. Resolves: #3130. Related: #3268. Related: #4510. Pull-Request: #4901. --- Cargo.lock | 1 + protocols/kad/Cargo.toml | 1 + protocols/kad/src/behaviour.rs | 1 + protocols/kad/src/handler.rs | 374 ++++++++++++--------------------- 4 files changed, 135 insertions(+), 242 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4a7655a3004..e037dcac20b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2720,6 +2720,7 @@ dependencies = [ "either", "fnv", "futures", + "futures-bounded", "futures-timer", "instant", "libp2p-core", diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 04101d51026..1e4c788cf00 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -19,6 +19,7 @@ asynchronous-codec = { workspace = true } futures = "0.3.29" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } +futures-bounded = { workspace = true } quick-protobuf = "0.8" quick-protobuf-codec = { workspace = true } libp2p-identity = { workspace = true, features = ["rand"] } diff --git a/protocols/kad/src/behaviour.rs b/protocols/kad/src/behaviour.rs index 5a4b737c998..cde4fbb8536 100644 --- a/protocols/kad/src/behaviour.rs +++ b/protocols/kad/src/behaviour.rs @@ -3173,6 +3173,7 @@ impl QueryInfo { multiaddrs: external_addresses.clone(), connection_ty: crate::protocol::ConnectionType::Connected, }, + query_id, }, }, QueryInfo::GetRecord { key, .. } => HandlerIn::GetRecord { diff --git a/protocols/kad/src/handler.rs b/protocols/kad/src/handler.rs index adfb076541c..318261d8d21 100644 --- a/protocols/kad/src/handler.rs +++ b/protocols/kad/src/handler.rs @@ -25,22 +25,22 @@ use crate::protocol::{ use crate::record::{self, Record}; use crate::QueryId; use either::Either; +use futures::channel::oneshot; use futures::prelude::*; use futures::stream::SelectAll; use libp2p_core::{upgrade, ConnectedPoint}; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; +use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound, FullyNegotiatedOutbound}; use libp2p_swarm::{ ConnectionHandler, ConnectionHandlerEvent, Stream, StreamUpgradeError, SubstreamProtocol, SupportedProtocols, }; use std::collections::VecDeque; use std::task::Waker; +use std::time::Duration; use std::{error, fmt, io, marker::PhantomData, pin::Pin, task::Context, task::Poll}; -const MAX_NUM_SUBSTREAMS: usize = 32; +const MAX_NUM_STREAMS: usize = 32; /// Protocol handler that manages substreams for the Kademlia protocol /// on a single connection with a peer. @@ -59,15 +59,16 @@ pub struct Handler { /// Next unique ID of a connection. next_connec_unique_id: UniqueConnecId, - /// List of active outbound substreams with the state they are in. - outbound_substreams: SelectAll, + /// List of active outbound streams. + outbound_substreams: futures_bounded::FuturesMap>>, - /// Number of outbound streams being upgraded right now. - num_requested_outbound_streams: usize, + /// Contains one [`oneshot::Sender`] per outbound stream that we have requested. + pending_streams: + VecDeque, StreamUpgradeError>>>, /// List of outbound substreams that are waiting to become active next. /// Contains the request we want to send, and the user data if we expect an answer. - pending_messages: VecDeque<(KadRequestMsg, Option)>, + pending_messages: VecDeque<(KadRequestMsg, QueryId)>, /// List of active inbound substreams with the state they are in. inbound_substreams: SelectAll, @@ -95,24 +96,6 @@ struct ProtocolStatus { reported: bool, } -/// State of an active outbound substream. -enum OutboundSubstreamState { - /// Waiting to send a message to the remote. - PendingSend(KadOutStreamSink, KadRequestMsg, Option), - /// Waiting to flush the substream so that the data arrives to the remote. - PendingFlush(KadOutStreamSink, Option), - /// Waiting for an answer back from the remote. - // TODO: add timeout - WaitingAnswer(KadOutStreamSink, QueryId), - /// An error happened on the substream and we should report the error to the user. - ReportError(HandlerQueryErr, QueryId), - /// The substream is being closed. - Closing(KadOutStreamSink), - /// The substream is complete and will not perform any more work. - Done, - Poisoned, -} - /// State of an active inbound substream. enum InboundSubstreamState { /// Waiting for a request from the remote. @@ -292,8 +275,6 @@ pub enum HandlerEvent { /// Error that can happen when requesting an RPC query. #[derive(Debug)] pub enum HandlerQueryErr { - /// Error while trying to perform the query. - Upgrade(StreamUpgradeError), /// Received an answer that doesn't correspond to the request. UnexpectedMessage, /// I/O error in the substream. @@ -303,9 +284,6 @@ pub enum HandlerQueryErr { impl fmt::Display for HandlerQueryErr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - HandlerQueryErr::Upgrade(err) => { - write!(f, "Error while performing Kademlia query: {err}") - } HandlerQueryErr::UnexpectedMessage => { write!( f, @@ -322,19 +300,12 @@ impl fmt::Display for HandlerQueryErr { impl error::Error for HandlerQueryErr { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { - HandlerQueryErr::Upgrade(err) => Some(err), HandlerQueryErr::UnexpectedMessage => None, HandlerQueryErr::Io(err) => Some(err), } } } -impl From> for HandlerQueryErr { - fn from(err: StreamUpgradeError) -> Self { - HandlerQueryErr::Upgrade(err) - } -} - /// Event to send to the handler. #[derive(Debug)] pub enum HandlerIn { @@ -355,7 +326,7 @@ pub enum HandlerIn { FindNodeReq { /// Identifier of the node. key: Vec, - /// Custom user data. Passed back in the out event when the results arrive. + /// ID of the query that generated this request. query_id: QueryId, }, @@ -374,7 +345,7 @@ pub enum HandlerIn { GetProvidersReq { /// Identifier being searched. key: record::Key, - /// Custom user data. Passed back in the out event when the results arrive. + /// ID of the query that generated this request. query_id: QueryId, }, @@ -399,13 +370,15 @@ pub enum HandlerIn { key: record::Key, /// Known provider for this key. provider: KadPeer, + /// ID of the query that generated this request. + query_id: QueryId, }, /// Request to retrieve a record from the DHT. GetRecord { /// The key of the record. key: record::Key, - /// Custom data. Passed back in the out event when the results arrive. + /// ID of the query that generated this request. query_id: QueryId, }, @@ -422,7 +395,7 @@ pub enum HandlerIn { /// Put a value into the dht records. PutRecord { record: Record, - /// Custom data. Passed back in the out event when the results arrive. + /// ID of the query that generated this request. query_id: QueryId, }, @@ -480,8 +453,11 @@ impl Handler { remote_peer_id, next_connec_unique_id: UniqueConnecId(0), inbound_substreams: Default::default(), - outbound_substreams: Default::default(), - num_requested_outbound_streams: 0, + outbound_substreams: futures_bounded::FuturesMap::new( + Duration::from_secs(10), + MAX_NUM_STREAMS, + ), + pending_streams: Default::default(), pending_messages: Default::default(), protocol_status: None, remote_supported_protocols: Default::default(), @@ -490,20 +466,18 @@ impl Handler { fn on_fully_negotiated_outbound( &mut self, - FullyNegotiatedOutbound { protocol, info: () }: FullyNegotiatedOutbound< + FullyNegotiatedOutbound { + protocol: stream, + info: (), + }: FullyNegotiatedOutbound< ::OutboundProtocol, ::OutboundOpenInfo, >, ) { - if let Some((msg, query_id)) = self.pending_messages.pop_front() { - self.outbound_substreams - .push(OutboundSubstreamState::PendingSend(protocol, msg, query_id)); - } else { - debug_assert!(false, "Requested outbound stream without message") + if let Some(sender) = self.pending_streams.pop_front() { + let _ = sender.send(Ok(stream)); } - self.num_requested_outbound_streams -= 1; - if self.protocol_status.is_none() { // Upon the first successfully negotiated substream, we know that the // remote is configured with the same protocol name and we want @@ -539,7 +513,7 @@ impl Handler { }); } - if self.inbound_substreams.len() == MAX_NUM_SUBSTREAMS { + if self.inbound_substreams.len() == MAX_NUM_STREAMS { if let Some(s) = self.inbound_substreams.iter_mut().find(|s| { matches!( s, @@ -573,24 +547,42 @@ impl Handler { }); } - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { - info: (), error, .. - }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - // TODO: cache the fact that the remote doesn't support kademlia at all, so that we don't - // continue trying + /// Takes the given [`KadRequestMsg`] and composes it into an outbound request-response protocol handshake using a [`oneshot::channel`]. + fn queue_new_stream(&mut self, id: QueryId, msg: KadRequestMsg) { + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + let result = self.outbound_substreams.try_push(id, async move { + let mut stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(|e| match e { + StreamUpgradeError::Timeout => io::ErrorKind::TimedOut.into(), + StreamUpgradeError::Apply(e) => e, + StreamUpgradeError::NegotiationFailed => { + io::Error::new(io::ErrorKind::ConnectionRefused, "protocol not supported") + } + StreamUpgradeError::Io(e) => e, + })?; - if let Some((_, Some(query_id))) = self.pending_messages.pop_front() { - self.outbound_substreams - .push(OutboundSubstreamState::ReportError(error.into(), query_id)); - } + let has_answer = !matches!(msg, KadRequestMsg::AddProvider { .. }); + + stream.send(msg).await?; + stream.close().await?; + + if !has_answer { + return Ok(None); + } - self.num_requested_outbound_streams -= 1; + let msg = stream.next().await.ok_or(io::ErrorKind::UnexpectedEof)??; + + Ok(Some(msg)) + }); + + debug_assert!( + result.is_ok(), + "Expected to not create more streams than allowed" + ); } } @@ -627,7 +619,7 @@ impl ConnectionHandler for Handler { } HandlerIn::FindNodeReq { key, query_id } => { let msg = KadRequestMsg::FindNode { key }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::FindNodeRes { closer_peers, @@ -635,7 +627,7 @@ impl ConnectionHandler for Handler { } => self.answer_pending_request(request_id, KadResponseMsg::FindNode { closer_peers }), HandlerIn::GetProvidersReq { key, query_id } => { let msg = KadRequestMsg::GetProviders { key }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::GetProvidersRes { closer_peers, @@ -648,17 +640,21 @@ impl ConnectionHandler for Handler { provider_peers, }, ), - HandlerIn::AddProvider { key, provider } => { + HandlerIn::AddProvider { + key, + provider, + query_id, + } => { let msg = KadRequestMsg::AddProvider { key, provider }; - self.pending_messages.push_back((msg, None)); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::GetRecord { key, query_id } => { let msg = KadRequestMsg::GetValue { key }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::PutRecord { record, query_id } => { let msg = KadRequestMsg::PutValue { record }; - self.pending_messages.push_back((msg, Some(query_id))); + self.pending_messages.push_back((msg, query_id)); } HandlerIn::GetRecordRes { record, @@ -712,44 +708,68 @@ impl ConnectionHandler for Handler { ) -> Poll< ConnectionHandlerEvent, > { - match &mut self.protocol_status { - Some(status) if !status.reported => { - status.reported = true; - let event = if status.supported { - HandlerEvent::ProtocolConfirmed { - endpoint: self.endpoint.clone(), - } - } else { - HandlerEvent::ProtocolNotSupported { - endpoint: self.endpoint.clone(), - } - }; + loop { + match &mut self.protocol_status { + Some(status) if !status.reported => { + status.reported = true; + let event = if status.supported { + HandlerEvent::ProtocolConfirmed { + endpoint: self.endpoint.clone(), + } + } else { + HandlerEvent::ProtocolNotSupported { + endpoint: self.endpoint.clone(), + } + }; - return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + _ => {} } - _ => {} - } - if let Poll::Ready(Some(event)) = self.outbound_substreams.poll_next_unpin(cx) { - return Poll::Ready(event); - } + match self.outbound_substreams.poll_unpin(cx) { + Poll::Ready((query, Ok(Ok(Some(response))))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + process_kad_response(response, query), + )) + } + Poll::Ready((_, Ok(Ok(None)))) => { + continue; + } + Poll::Ready((query_id, Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(e), + query_id, + }, + )) + } + Poll::Ready((query_id, Err(_timeout))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour( + HandlerEvent::QueryError { + error: HandlerQueryErr::Io(io::ErrorKind::TimedOut.into()), + query_id, + }, + )) + } + Poll::Pending => {} + } - if let Poll::Ready(Some(event)) = self.inbound_substreams.poll_next_unpin(cx) { - return Poll::Ready(event); - } + if let Poll::Ready(Some(event)) = self.inbound_substreams.poll_next_unpin(cx) { + return Poll::Ready(event); + } - let num_in_progress_outbound_substreams = - self.outbound_substreams.len() + self.num_requested_outbound_streams; - if num_in_progress_outbound_substreams < MAX_NUM_SUBSTREAMS - && self.num_requested_outbound_streams < self.pending_messages.len() - { - self.num_requested_outbound_streams += 1; - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.protocol_config.clone(), ()), - }); - } + if self.outbound_substreams.len() < MAX_NUM_STREAMS { + if let Some((msg, id)) = self.pending_messages.pop_front() { + self.queue_new_stream(id, msg); + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol_config.clone(), ()), + }); + } + } - Poll::Pending + return Poll::Pending; + } } fn on_connection_event( @@ -768,8 +788,10 @@ impl ConnectionHandler for Handler { ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { self.on_fully_negotiated_inbound(fully_negotiated_inbound) } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::DialUpgradeError(ev) => { + if let Some(sender) = self.pending_streams.pop_front() { + let _ = sender.send(Err(ev.error)); + } } ConnectionEvent::RemoteProtocolsChange(change) => { let dirty = self.remote_supported_protocols.on_protocols_change(change); @@ -839,138 +861,6 @@ impl Handler { } } -impl futures::Stream for OutboundSubstreamState { - type Item = ConnectionHandlerEvent; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - loop { - match std::mem::replace(this, OutboundSubstreamState::Poisoned) { - OutboundSubstreamState::PendingSend(mut substream, msg, query_id) => { - match substream.poll_ready_unpin(cx) { - Poll::Ready(Ok(())) => match substream.start_send_unpin(msg) { - Ok(()) => { - *this = OutboundSubstreamState::PendingFlush(substream, query_id); - } - Err(error) => { - *this = OutboundSubstreamState::Done; - let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour( - HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }, - ) - }); - - return Poll::Ready(event); - } - }, - Poll::Pending => { - *this = OutboundSubstreamState::PendingSend(substream, msg, query_id); - return Poll::Pending; - } - Poll::Ready(Err(error)) => { - *this = OutboundSubstreamState::Done; - let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }) - }); - - return Poll::Ready(event); - } - } - } - OutboundSubstreamState::PendingFlush(mut substream, query_id) => { - match substream.poll_flush_unpin(cx) { - Poll::Ready(Ok(())) => { - if let Some(query_id) = query_id { - *this = OutboundSubstreamState::WaitingAnswer(substream, query_id); - } else { - *this = OutboundSubstreamState::Closing(substream); - } - } - Poll::Pending => { - *this = OutboundSubstreamState::PendingFlush(substream, query_id); - return Poll::Pending; - } - Poll::Ready(Err(error)) => { - *this = OutboundSubstreamState::Done; - let event = query_id.map(|query_id| { - ConnectionHandlerEvent::NotifyBehaviour(HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }) - }); - - return Poll::Ready(event); - } - } - } - OutboundSubstreamState::WaitingAnswer(mut substream, query_id) => { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(msg))) => { - *this = OutboundSubstreamState::Closing(substream); - let event = process_kad_response(msg, query_id); - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - event, - ))); - } - Poll::Pending => { - *this = OutboundSubstreamState::WaitingAnswer(substream, query_id); - return Poll::Pending; - } - Poll::Ready(Some(Err(error))) => { - *this = OutboundSubstreamState::Done; - let event = HandlerEvent::QueryError { - error: HandlerQueryErr::Io(error), - query_id, - }; - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - event, - ))); - } - Poll::Ready(None) => { - *this = OutboundSubstreamState::Done; - let event = HandlerEvent::QueryError { - error: HandlerQueryErr::Io(io::ErrorKind::UnexpectedEof.into()), - query_id, - }; - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour( - event, - ))); - } - } - } - OutboundSubstreamState::ReportError(error, query_id) => { - *this = OutboundSubstreamState::Done; - let event = HandlerEvent::QueryError { error, query_id }; - - return Poll::Ready(Some(ConnectionHandlerEvent::NotifyBehaviour(event))); - } - OutboundSubstreamState::Closing(mut stream) => match stream.poll_close_unpin(cx) { - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => return Poll::Ready(None), - Poll::Pending => { - *this = OutboundSubstreamState::Closing(stream); - return Poll::Pending; - } - }, - OutboundSubstreamState::Done => { - *this = OutboundSubstreamState::Done; - return Poll::Ready(None); - } - OutboundSubstreamState::Poisoned => unreachable!(), - } - } - } -} - impl futures::Stream for InboundSubstreamState { type Item = ConnectionHandlerEvent; From 3b6b74db7fdb252103699efcbc85cb11605ea893 Mon Sep 17 00:00:00 2001 From: Andreas <104641345+andreasbros@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:52:25 +0000 Subject: [PATCH 15/48] docs(interop-tests): minor fixes to README Pull-Request: #4915. --- interop-tests/README.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/interop-tests/README.md b/interop-tests/README.md index bab98df7987..c2805ddf707 100644 --- a/interop-tests/README.md +++ b/interop-tests/README.md @@ -8,9 +8,11 @@ You can run this test locally by having a local Redis instance and by having another peer that this test can dial or listen for. For example to test that we can dial/listen for ourselves we can do the following: -1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine`. -2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="true" cargo run --bin ping` -3. In another terminal, run the listener: `redis_addr=localhost:6379 ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" cargo run --bin native_ping` +1. Start redis (needed by the tests): `docker run --rm -p 6379:6379 redis:7-alpine` +2. In one terminal run the dialer: `RUST_LOG=debug redis_addr=localhost:6379 ip="0.0.0.0" transport=tcp security=noise muxer=yamux is_dialer="true" cargo run --bin native_ping` +3. In another terminal, run the listener: `RUST_LOG=debug redis_addr=localhost:6379 ip="0.0.0.0" transport=tcp security=noise muxer=yamux is_dialer="false" cargo run --bin native_ping` + +If testing `transport=quic-v1`, then remove `security` and `muxer` variables from command line, because QUIC protocol comes with its own encryption and multiplexing. To test the interop with other versions do something similar, except replace one of these nodes with the other version's interop test. @@ -30,9 +32,9 @@ Firefox is not yet supported as it doesn't support all required features yet To run the webrtc-direct test, you'll need the `chromedriver` in your `$PATH`, compatible with your Chrome browser. 1. Start redis: `docker run --rm -p 6379:6379 redis:7-alpine`. -1. Build the wasm package: `wasm-pack build --target web` -1. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping` -1. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping` +2. Build the wasm package: `wasm-pack build --target web` +3. With the webrtc-direct listener `RUST_LOG=debug,webrtc=off,webrtc_sctp=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer="false" cargo run --bin native_ping` +4. Run the webrtc-direct dialer: `RUST_LOG=debug,hyper=off redis_addr="127.0.0.1:6379" ip="0.0.0.0" transport=webrtc-direct is_dialer=true cargo run --bin wasm_ping` # Running all interop tests locally with Compose @@ -41,8 +43,8 @@ To run this test against all released libp2p versions you'll need to have the the following (from the root directory of this repository): 1. Build the image: `docker build -t rust-libp2p-head . -f interop-tests/Dockerfile`. -1. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/multidim-interop/ && make)`. -1. Run the test: +2. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/multidim-interop/ && make)`. +3. Run the test: ``` RUST_LIBP2P="$PWD"; (cd /libp2p/test-plans/multidim-interop/ && npm run test -- --extra-version=$RUST_LIBP2P/interop-tests/ping-version.json --name-filter="rust-libp2p-head") ``` From d851d1b3f82735555c4b3716266793817728cee8 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 24 Nov 2023 20:28:26 +1100 Subject: [PATCH 16/48] feat(quick-protobuf-codec): reduce allocations during encoding We can reduce the number of allocations during the encoding of messages by not depending on the `Encoder` implementation of `unsigned-varint` but doing the encoding ourselves. This allows us to directly plug the `BytesMut` into the `Writer` and directly encode into the target buffer. Previously, we were allocating each message as an additional buffer that got immediately thrown-away again. Related: #4781. Pull-Request: #4782. --- Cargo.lock | 5 +- Cargo.toml | 2 +- misc/quick-protobuf-codec/CHANGELOG.md | 5 + misc/quick-protobuf-codec/Cargo.toml | 13 +- misc/quick-protobuf-codec/benches/codec.rs | 28 +++ .../quick-protobuf-codec/src/generated/mod.rs | 2 + .../src/generated/test.proto | 7 + .../src/generated/test.rs | 47 ++++ misc/quick-protobuf-codec/src/lib.rs | 238 ++++++++++++++++-- .../tests/large_message.rs | 16 ++ 10 files changed, 339 insertions(+), 24 deletions(-) create mode 100644 misc/quick-protobuf-codec/benches/codec.rs create mode 100644 misc/quick-protobuf-codec/src/generated/mod.rs create mode 100644 misc/quick-protobuf-codec/src/generated/test.proto create mode 100644 misc/quick-protobuf-codec/src/generated/test.rs create mode 100644 misc/quick-protobuf-codec/tests/large_message.rs diff --git a/Cargo.lock b/Cargo.lock index e037dcac20b..46df5b64478 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4369,11 +4369,14 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" -version = "0.3.0" +version = "0.3.1" dependencies = [ "asynchronous-codec", "bytes", + "criterion", + "futures", "quick-protobuf", + "quickcheck-ext", "thiserror", "unsigned-varint 0.8.0", ] diff --git a/Cargo.toml b/Cargo.toml index 0603a22629b..1dce34011c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,7 +117,7 @@ multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } prometheus-client = "0.22.0" -quick-protobuf-codec = { version = "0.3.0", path = "misc/quick-protobuf-codec" } +quick-protobuf-codec = { version = "0.3.1", path = "misc/quick-protobuf-codec" } quickcheck = { package = "quickcheck-ext", path = "misc/quickcheck-ext" } rw-stream-sink = { version = "0.4.0", path = "misc/rw-stream-sink" } unsigned-varint = { version = "0.8.0" } diff --git a/misc/quick-protobuf-codec/CHANGELOG.md b/misc/quick-protobuf-codec/CHANGELOG.md index 779dd750abd..a301293621f 100644 --- a/misc/quick-protobuf-codec/CHANGELOG.md +++ b/misc/quick-protobuf-codec/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.1 + +- Reduce allocations during encoding. + See [PR 4782](https://github.com/libp2p/rust-libp2p/pull/4782). + ## 0.3.0 - Update to `asynchronous-codec` `v0.7.0`. diff --git a/misc/quick-protobuf-codec/Cargo.toml b/misc/quick-protobuf-codec/Cargo.toml index fdc65cfa93c..484e2c9bc8b 100644 --- a/misc/quick-protobuf-codec/Cargo.toml +++ b/misc/quick-protobuf-codec/Cargo.toml @@ -3,7 +3,7 @@ name = "quick-protobuf-codec" edition = "2021" rust-version = { workspace = true } description = "Asynchronous de-/encoding of Protobuf structs using asynchronous-codec, unsigned-varint and quick-protobuf." -version = "0.3.0" +version = "0.3.1" authors = ["Max Inden "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" @@ -14,9 +14,18 @@ categories = ["asynchronous"] asynchronous-codec = { workspace = true } bytes = { version = "1" } thiserror = "1.0" -unsigned-varint = { workspace = true, features = ["asynchronous_codec"] } +unsigned-varint = { workspace = true, features = ["std"] } quick-protobuf = "0.8" +[dev-dependencies] +criterion = "0.5.1" +futures = "0.3.28" +quickcheck = { workspace = true } + +[[bench]] +name = "codec" +harness = false + # Passing arguments to the docsrs builder in order to properly document cfg's. # More information: https://docs.rs/about/builds#cross-compiling [package.metadata.docs.rs] diff --git a/misc/quick-protobuf-codec/benches/codec.rs b/misc/quick-protobuf-codec/benches/codec.rs new file mode 100644 index 00000000000..0f6ce9469c5 --- /dev/null +++ b/misc/quick-protobuf-codec/benches/codec.rs @@ -0,0 +1,28 @@ +use asynchronous_codec::Encoder; +use bytes::BytesMut; +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use quick_protobuf_codec::{proto, Codec}; + +pub fn benchmark(c: &mut Criterion) { + for size in [1000, 10_000, 100_000, 1_000_000, 10_000_000] { + c.bench_with_input(BenchmarkId::new("encode", size), &size, |b, i| { + b.iter_batched( + || { + let mut out = BytesMut::new(); + out.reserve(i + 100); + let codec = Codec::::new(i + 100); + let msg = proto::Message { + data: vec![0; size], + }; + + (codec, out, msg) + }, + |(mut codec, mut out, msg)| codec.encode(msg, &mut out).unwrap(), + BatchSize::SmallInput, + ); + }); + } +} + +criterion_group!(benches, benchmark); +criterion_main!(benches); diff --git a/misc/quick-protobuf-codec/src/generated/mod.rs b/misc/quick-protobuf-codec/src/generated/mod.rs new file mode 100644 index 00000000000..b9f982f8dfd --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/mod.rs @@ -0,0 +1,2 @@ +// Automatically generated mod.rs +pub mod test; diff --git a/misc/quick-protobuf-codec/src/generated/test.proto b/misc/quick-protobuf-codec/src/generated/test.proto new file mode 100644 index 00000000000..5b1f46c0bfa --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/test.proto @@ -0,0 +1,7 @@ +syntax = "proto3"; + +package test; + +message Message { + bytes data = 1; +} diff --git a/misc/quick-protobuf-codec/src/generated/test.rs b/misc/quick-protobuf-codec/src/generated/test.rs new file mode 100644 index 00000000000..b353e6d9183 --- /dev/null +++ b/misc/quick-protobuf-codec/src/generated/test.rs @@ -0,0 +1,47 @@ +// Automatically generated rust module for 'test.proto' file + +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_imports)] +#![allow(unknown_lints)] +#![allow(clippy::all)] +#![cfg_attr(rustfmt, rustfmt_skip)] + + +use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result}; +use quick_protobuf::sizeofs::*; +use super::*; + +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Debug, Default, PartialEq, Clone)] +pub struct Message { + pub data: Vec, +} + +impl<'a> MessageRead<'a> for Message { + fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result { + let mut msg = Self::default(); + while !r.is_eof() { + match r.next_tag(bytes) { + Ok(10) => msg.data = r.read_bytes(bytes)?.to_owned(), + Ok(t) => { r.read_unknown(bytes, t)?; } + Err(e) => return Err(e), + } + } + Ok(msg) + } +} + +impl MessageWrite for Message { + fn get_size(&self) -> usize { + 0 + + if self.data.is_empty() { 0 } else { 1 + sizeof_len((&self.data).len()) } + } + + fn write_message(&self, w: &mut Writer) -> Result<()> { + if !self.data.is_empty() { w.write_with_tag(10, |w| w.write_bytes(&**&self.data))?; } + Ok(()) + } +} + diff --git a/misc/quick-protobuf-codec/src/lib.rs b/misc/quick-protobuf-codec/src/lib.rs index 2d1fda99a70..c50b1264af6 100644 --- a/misc/quick-protobuf-codec/src/lib.rs +++ b/misc/quick-protobuf-codec/src/lib.rs @@ -1,16 +1,21 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use asynchronous_codec::{Decoder, Encoder}; -use bytes::{Bytes, BytesMut}; -use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer}; +use bytes::{Buf, BufMut, BytesMut}; +use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer, WriterBackend}; +use std::io; use std::marker::PhantomData; -use unsigned_varint::codec::UviBytes; + +mod generated; + +#[doc(hidden)] // NOT public API. Do not use. +pub use generated::test as proto; /// [`Codec`] implements [`Encoder`] and [`Decoder`], uses [`unsigned_varint`] /// to prefix messages with their length and uses [`quick_protobuf`] and a provided /// `struct` implementing [`MessageRead`] and [`MessageWrite`] to do the encoding. pub struct Codec { - uvi: UviBytes, + max_message_len_bytes: usize, phantom: PhantomData<(In, Out)>, } @@ -21,10 +26,8 @@ impl Codec { /// Protobuf message. The limit does not include the bytes needed for the /// [`unsigned_varint`]. pub fn new(max_message_len_bytes: usize) -> Self { - let mut uvi = UviBytes::default(); - uvi.set_max_len(max_message_len_bytes); Self { - uvi, + max_message_len_bytes, phantom: PhantomData, } } @@ -35,16 +38,32 @@ impl Encoder for Codec { type Error = Error; fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> { - let mut encoded_msg = Vec::new(); - let mut writer = Writer::new(&mut encoded_msg); - item.write_message(&mut writer) - .expect("Encoding to succeed"); - self.uvi.encode(Bytes::from(encoded_msg), dst)?; + write_length(&item, dst); + write_message(&item, dst)?; Ok(()) } } +/// Write the message's length (i.e. `size`) to `dst` as a variable-length integer. +fn write_length(message: &impl MessageWrite, dst: &mut BytesMut) { + let message_length = message.get_size(); + + let mut uvi_buf = unsigned_varint::encode::usize_buffer(); + let encoded_length = unsigned_varint::encode::usize(message_length, &mut uvi_buf); + + dst.extend_from_slice(encoded_length); +} + +/// Write the message itself to `dst`. +fn write_message(item: &impl MessageWrite, dst: &mut BytesMut) -> io::Result<()> { + let mut writer = Writer::new(BytesMutWriterBackend::new(dst)); + item.write_message(&mut writer) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + + Ok(()) +} + impl Decoder for Codec where Out: for<'a> MessageRead<'a>, @@ -53,24 +72,203 @@ where type Error = Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - let msg = match self.uvi.decode(src)? { - None => return Ok(None), - Some(msg) => msg, + let (message_length, remaining) = match unsigned_varint::decode::usize(src) { + Ok((len, remaining)) => (len, remaining), + Err(unsigned_varint::decode::Error::Insufficient) => return Ok(None), + Err(e) => return Err(Error(io::Error::new(io::ErrorKind::InvalidData, e))), }; - let mut reader = BytesReader::from_bytes(&msg); - let message = Self::Item::from_reader(&mut reader, &msg) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + if message_length > self.max_message_len_bytes { + return Err(Error(io::Error::new( + io::ErrorKind::PermissionDenied, + format!( + "message with {message_length}b exceeds maximum of {}b", + self.max_message_len_bytes + ), + ))); + } + + // Compute how many bytes the varint itself consumed. + let varint_length = src.len() - remaining.len(); + + // Ensure we can read an entire message. + if src.len() < (message_length + varint_length) { + return Ok(None); + } + + // Safe to advance buffer now. + src.advance(varint_length); + + let message = src.split_to(message_length); + + let mut reader = BytesReader::from_bytes(&message); + let message = Self::Item::from_reader(&mut reader, &message) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + Ok(Some(message)) } } +struct BytesMutWriterBackend<'a> { + dst: &'a mut BytesMut, +} + +impl<'a> BytesMutWriterBackend<'a> { + fn new(dst: &'a mut BytesMut) -> Self { + Self { dst } + } +} + +impl<'a> WriterBackend for BytesMutWriterBackend<'a> { + fn pb_write_u8(&mut self, x: u8) -> quick_protobuf::Result<()> { + self.dst.put_u8(x); + + Ok(()) + } + + fn pb_write_u32(&mut self, x: u32) -> quick_protobuf::Result<()> { + self.dst.put_u32_le(x); + + Ok(()) + } + + fn pb_write_i32(&mut self, x: i32) -> quick_protobuf::Result<()> { + self.dst.put_i32_le(x); + + Ok(()) + } + + fn pb_write_f32(&mut self, x: f32) -> quick_protobuf::Result<()> { + self.dst.put_f32_le(x); + + Ok(()) + } + + fn pb_write_u64(&mut self, x: u64) -> quick_protobuf::Result<()> { + self.dst.put_u64_le(x); + + Ok(()) + } + + fn pb_write_i64(&mut self, x: i64) -> quick_protobuf::Result<()> { + self.dst.put_i64_le(x); + + Ok(()) + } + + fn pb_write_f64(&mut self, x: f64) -> quick_protobuf::Result<()> { + self.dst.put_f64_le(x); + + Ok(()) + } + + fn pb_write_all(&mut self, buf: &[u8]) -> quick_protobuf::Result<()> { + self.dst.put_slice(buf); + + Ok(()) + } +} + #[derive(thiserror::Error, Debug)] #[error("Failed to encode/decode message")] -pub struct Error(#[from] std::io::Error); +pub struct Error(#[from] io::Error); -impl From for std::io::Error { +impl From for io::Error { fn from(e: Error) -> Self { e.0 } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::proto; + use asynchronous_codec::FramedRead; + use futures::io::Cursor; + use futures::{FutureExt, StreamExt}; + use quickcheck::{Arbitrary, Gen, QuickCheck}; + use std::error::Error; + + #[test] + fn honors_max_message_length() { + let codec = Codec::::new(1); + let mut src = varint_zeroes(100); + + let mut read = FramedRead::new(Cursor::new(&mut src), codec); + let err = read.next().now_or_never().unwrap().unwrap().unwrap_err(); + + assert_eq!( + err.source().unwrap().to_string(), + "message with 100b exceeds maximum of 1b" + ) + } + + #[test] + fn empty_bytes_mut_does_not_panic() { + let mut codec = Codec::::new(100); + + let mut src = varint_zeroes(100); + src.truncate(50); + + let result = codec.decode(&mut src); + + assert!(result.unwrap().is_none()); + assert_eq!( + src.len(), + 50, + "to not modify `src` if we cannot read a full message" + ) + } + + #[test] + fn only_partial_message_in_bytes_mut_does_not_panic() { + let mut codec = Codec::::new(100); + + let result = codec.decode(&mut BytesMut::new()); + + assert!(result.unwrap().is_none()); + } + + #[test] + fn handles_arbitrary_initial_capacity() { + fn prop(message: proto::Message, initial_capacity: u16) { + let mut buffer = BytesMut::with_capacity(initial_capacity as usize); + let mut codec = Codec::::new(u32::MAX as usize); + + codec.encode(message.clone(), &mut buffer).unwrap(); + let decoded = codec.decode(&mut buffer).unwrap().unwrap(); + + assert_eq!(message, decoded); + } + + QuickCheck::new().quickcheck(prop as fn(_, _) -> _) + } + + /// Constructs a [`BytesMut`] of the provided length where the message is all zeros. + fn varint_zeroes(length: usize) -> BytesMut { + let mut buf = unsigned_varint::encode::usize_buffer(); + let encoded_length = unsigned_varint::encode::usize(length, &mut buf); + + let mut src = BytesMut::new(); + src.extend_from_slice(encoded_length); + src.extend(std::iter::repeat(0).take(length)); + src + } + + impl Arbitrary for proto::Message { + fn arbitrary(g: &mut Gen) -> Self { + Self { + data: Vec::arbitrary(g), + } + } + } + + #[derive(Debug)] + struct Dummy; + + impl<'a> MessageRead<'a> for Dummy { + fn from_reader(_: &mut BytesReader, _: &'a [u8]) -> quick_protobuf::Result { + todo!() + } + } +} diff --git a/misc/quick-protobuf-codec/tests/large_message.rs b/misc/quick-protobuf-codec/tests/large_message.rs new file mode 100644 index 00000000000..65dafe065d1 --- /dev/null +++ b/misc/quick-protobuf-codec/tests/large_message.rs @@ -0,0 +1,16 @@ +use asynchronous_codec::Encoder; +use bytes::BytesMut; +use quick_protobuf_codec::proto; +use quick_protobuf_codec::Codec; + +#[test] +fn encode_large_message() { + let mut codec = Codec::::new(1_001_000); + let mut dst = BytesMut::new(); + dst.reserve(1_001_000); + let message = proto::Message { + data: vec![0; 1_000_000], + }; + + codec.encode(message, &mut dst).unwrap(); +} From d1d90c4e042fe7c2010024eae5006687ca4a6b79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 09:01:33 +0000 Subject: [PATCH 17/48] deps: bump if-watch from 3.1.0 to 3.2.0 Pull-Request: #4918. --- Cargo.lock | 12 ++++++------ protocols/mdns/Cargo.toml | 2 +- transports/quic/Cargo.toml | 2 +- transports/tcp/Cargo.toml | 2 +- transports/webrtc/Cargo.toml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46df5b64478..b2df530a894 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2134,21 +2134,21 @@ dependencies = [ [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys", ] [[package]] name = "if-watch" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb892e5777fe09e16f3d44de7802f4daa7267ecbe8c466f19d94e25bb0c303e" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io 1.13.0", + "async-io 2.2.0", "core-foundation", "fnv", "futures", diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 5c495e7cb15..b314aead811 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -15,7 +15,7 @@ async-std = { version = "1.12.0", optional = true } async-io = { version = "2.2.0", optional = true } data-encoding = "2.4.0" futures = "0.3.29" -if-watch = "3.1.0" +if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index c59c9b64f1c..5f3fd5cfb43 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -13,7 +13,7 @@ async-std = { version = "1.12.0", optional = true } bytes = "1.5.0" futures = "0.3.29" futures-timer = "3.0.2" -if-watch = "3.1.0" +if-watch = "3.2.0" libp2p-core = { workspace = true } libp2p-tls = { workspace = true } libp2p-identity = { workspace = true } diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index 3974459cbe4..d124d5fd748 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -14,7 +14,7 @@ categories = ["network-programming", "asynchronous"] async-io = { version = "2.2.0", optional = true } futures = "0.3.29" futures-timer = "3.0" -if-watch = "3.1.0" +if-watch = "3.2.0" libc = "0.2.150" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 7578c9d5257..8aa43652392 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -16,7 +16,7 @@ bytes = "1" futures = "0.3" futures-timer = "3" hex = "0.4" -if-watch = "3.1" +if-watch = "3.2" libp2p-core = { workspace = true } libp2p-noise = { workspace = true } libp2p-identity = { workspace = true } From ebdc2a580cc11c33995c86812c1b7cd51b2033ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:47:15 +0000 Subject: [PATCH 18/48] deps: bump sysinfo from 0.29.10 to 0.29.11 Pull-Request: #4923. --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2df530a894..e6f6c9bf249 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5521,9 +5521,9 @@ dependencies = [ [[package]] name = "sysinfo" -version = "0.29.10" +version = "0.29.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a18d114d420ada3a891e6bc8e96a2023402203296a47cdd65083377dad18ba5" +checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" dependencies = [ "cfg-if", "core-foundation-sys", From 04eced792ea6bf703ac67c84249254c796e8c40a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:57:33 +0000 Subject: [PATCH 19/48] deps: bump serde from 1.0.192 to 1.0.193 Pull-Request: #4926. --- Cargo.lock | 8 ++++---- hole-punching-tests/Cargo.toml | 2 +- misc/keygen/Cargo.toml | 2 +- misc/server/Cargo.toml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e6f6c9bf249..05320fa31fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5141,18 +5141,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.192" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", diff --git a/hole-punching-tests/Cargo.toml b/hole-punching-tests/Cargo.toml index a1a191d2e36..a6f1bd6571c 100644 --- a/hole-punching-tests/Cargo.toml +++ b/hole-punching-tests/Cargo.toml @@ -13,6 +13,6 @@ libp2p = { path = "../libp2p", features = ["tokio", "dcutr", "identify", "macros tracing = "0.1.37" redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] } tokio = { version = "1.34.0", features = ["full"] } -serde = { version = "1.0.192", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" either = "1.9.0" diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 044a093e991..82619a1035d 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -15,7 +15,7 @@ release = false [dependencies] clap = { version = "4.4.8", features = ["derive"] } zeroize = "1" -serde = { version = "1.0.192", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" libp2p-core = { workspace = true } base64 = "0.21.5" diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index 5c197c6506b..9cc77b09d18 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -18,7 +18,7 @@ futures-timer = "3" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] } prometheus-client = { workspace = true } -serde = "1.0.192" +serde = "1.0.193" serde_derive = "1.0.125" serde_json = "1.0" tokio = { version = "1", features = ["rt-multi-thread", "macros"] } From 8812e090edf31ce1471b97dea124b8d15d569e77 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:07:22 +0000 Subject: [PATCH 20/48] deps: bump data-encoding from 2.4.0 to 2.5.0 Pull-Request: #4928. --- Cargo.lock | 4 ++-- protocols/mdns/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05320fa31fb..f633f5a3a19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1161,9 +1161,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index b314aead811..8a1475fdd40 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.12.0", optional = true } async-io = { version = "2.2.0", optional = true } -data-encoding = "2.4.0" +data-encoding = "2.5.0" futures = "0.3.29" if-watch = "3.2.0" libp2p-core = { workspace = true } From dde6db12d344f94d145ca89a9d883c26fc1333de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:17:19 +0000 Subject: [PATCH 21/48] deps: bump url from 2.4.1 to 2.5.0 Pull-Request: #4929. --- Cargo.lock | 26 ++++++++++++++++++-------- transports/websocket/Cargo.toml | 2 +- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f633f5a3a19..c1cd8133851 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1549,9 +1549,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -1901,7 +1901,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.4.0", "ipnet", "once_cell", "rand 0.8.5", @@ -2132,6 +2132,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -4108,9 +4118,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" @@ -6108,12 +6118,12 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", ] diff --git a/transports/websocket/Cargo.toml b/transports/websocket/Cargo.toml index b4c56539139..31f91874f68 100644 --- a/transports/websocket/Cargo.toml +++ b/transports/websocket/Cargo.toml @@ -21,7 +21,7 @@ pin-project-lite = "0.2.13" rw-stream-sink = { workspace = true } soketto = "0.7.0" tracing = "0.1.37" -url = "2.4" +url = "2.5" webpki-roots = "0.25" [dev-dependencies] From 4759ba8244242628164b7ec9e7dc99d67269e39f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:27:57 +0000 Subject: [PATCH 22/48] deps: bump lru from 0.12.0 to 0.12.1 Pull-Request: #4931. --- Cargo.lock | 4 ++-- protocols/dcutr/Cargo.toml | 2 +- protocols/identify/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1cd8133851..601d14941b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3469,9 +3469,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" +checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" dependencies = [ "hashbrown 0.14.0", ] diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index bf70e3b7868..1c2b0b52d5f 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -24,7 +24,7 @@ quick-protobuf-codec = { workspace = true } thiserror = "1.0" tracing = "0.1.37" void = "1" -lru = "0.12.0" +lru = "0.12.1" futures-bounded = { workspace = true } [dev-dependencies] diff --git a/protocols/identify/Cargo.toml b/protocols/identify/Cargo.toml index c5f8b87e85e..8168a3a3fc6 100644 --- a/protocols/identify/Cargo.toml +++ b/protocols/identify/Cargo.toml @@ -18,7 +18,7 @@ futures-bounded = { workspace = true } libp2p-core = { workspace = true } libp2p-swarm = { workspace = true } libp2p-identity = { workspace = true } -lru = "0.12.0" +lru = "0.12.1" quick-protobuf-codec = { workspace = true } quick-protobuf = "0.8" smallvec = "1.11.2" From dfce3ccecaab40c09b83d67beb914a6a37bc86ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 12:33:01 +0000 Subject: [PATCH 23/48] deps: bump yamux from 0.12.0 to 0.12.1 Pull-Request: #4930. --- Cargo.lock | 4 ++-- muxers/yamux/CHANGELOG.md | 7 +++++++ muxers/yamux/src/lib.rs | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 601d14941b4..168ffb2034c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6774,9 +6774,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0329ef377816896f014435162bb3711ea7a07729c23d0960e6f8048b21b8fe91" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" dependencies = [ "futures", "log", diff --git a/muxers/yamux/CHANGELOG.md b/muxers/yamux/CHANGELOG.md index a2983b31572..c8534166ea6 100644 --- a/muxers/yamux/CHANGELOG.md +++ b/muxers/yamux/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.45.1 + +- Deprecate `WindowUpdateMode::on_receive`. + It does not enforce flow-control, i.e. breaks backpressure. + Use `WindowUpdateMode::on_read` instead. + See `yamux` crate version `v0.12.1` and [Yamux PR #177](https://github.com/libp2p/rust-yamux/pull/177). + ## 0.45.0 - Migrate to `{In,Out}boundConnectionUpgrade` traits. diff --git a/muxers/yamux/src/lib.rs b/muxers/yamux/src/lib.rs index d10cdfa244c..fc7ff430396 100644 --- a/muxers/yamux/src/lib.rs +++ b/muxers/yamux/src/lib.rs @@ -231,7 +231,9 @@ impl WindowUpdateMode { /// > size must be tuned appropriately for the desired /// > throughput and level of tolerance for (temporarily) /// > slow receivers. + #[deprecated(note = "Use `WindowUpdateMode::on_read` instead.")] pub fn on_receive() -> Self { + #[allow(deprecated)] WindowUpdateMode(yamux::WindowUpdateMode::OnReceive) } From 2797df4c1e7b7b0f3cc1f60d1067bb546044cf86 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 00:38:57 +0000 Subject: [PATCH 24/48] deps: bump cbor4ii from 0.3.1 to 0.3.2 Pull-Request: #4922. --- Cargo.lock | 4 ++-- protocols/request-response/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 168ffb2034c..57d490c2fda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -740,9 +740,9 @@ dependencies = [ [[package]] name = "cbor4ii" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13e8c816014cad3f58c2f0607677e8d2c6f76754dd8e735461a440b27b95199c" +checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" dependencies = [ "serde", ] diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 1bfd03e1520..2c7692fb9b4 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-trait = "0.1" -cbor4ii = { version = "0.3.1", features = ["serde1", "use_std"], optional = true } +cbor4ii = { version = "0.3.2", features = ["serde1", "use_std"], optional = true } futures = "0.3.29" instant = "0.1.12" libp2p-core = { workspace = true } From 7ac74bad204b554980a20d8f61a82a2087c4e070 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 00:51:47 +0000 Subject: [PATCH 25/48] deps: bump async-io from 2.2.0 to 2.2.1 Pull-Request: #4919. --- Cargo.lock | 141 ++++++++++++++++++++++++++++---------- protocols/mdns/Cargo.toml | 2 +- transports/tcp/Cargo.toml | 2 +- 3 files changed, 105 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57d490c2fda..7356f080099 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -128,7 +128,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -138,7 +138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -300,9 +300,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9d5715c2d329bf1b4da8d60455b99b187f27ba726df2883799af9af60997" +checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" dependencies = [ "async-lock 3.1.0", "cfg-if", @@ -314,8 +314,7 @@ dependencies = [ "rustix 0.38.21", "slab", "tracing", - "waker-fn", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -365,7 +364,7 @@ dependencies = [ "futures-lite 1.13.0", "rustix 0.37.25", "signal-hook", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1418,7 +1417,7 @@ checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2149,7 +2148,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2158,7 +2157,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io 2.2.0", + "async-io 2.2.1", "core-foundation", "fnv", "futures", @@ -2294,7 +2293,7 @@ checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ "hermit-abi", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2305,7 +2304,7 @@ checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ "socket2 0.5.5", "widestring", - "windows-sys", + "windows-sys 0.48.0", "winreg", ] @@ -2351,7 +2350,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", "rustix 0.38.21", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2758,7 +2757,7 @@ dependencies = [ name = "libp2p-mdns" version = "0.45.1" dependencies = [ - "async-io 2.2.0", + "async-io 2.2.1", "async-std", "data-encoding", "futures", @@ -3169,7 +3168,7 @@ dependencies = [ name = "libp2p-tcp" version = "0.41.0" dependencies = [ - "async-io 2.2.0", + "async-io 2.2.1", "async-std", "futures", "futures-timer", @@ -3605,7 +3604,7 @@ checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4088,7 +4087,7 @@ dependencies = [ "libc", "redox_syscall 0.3.5", "smallvec", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -4228,7 +4227,7 @@ dependencies = [ "libc", "log", "pin-project-lite", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4242,7 +4241,7 @@ dependencies = [ "pin-project-lite", "rustix 0.38.21", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4457,7 +4456,7 @@ dependencies = [ "libc", "socket2 0.5.5", "tracing", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4786,7 +4785,7 @@ dependencies = [ "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4937,7 +4936,7 @@ dependencies = [ "io-lifetimes", "libc", "linux-raw-sys 0.3.8", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -4950,7 +4949,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.10", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5054,7 +5053,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5394,7 +5393,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5575,7 +5574,7 @@ dependencies = [ "fastrand 2.0.0", "redox_syscall 0.4.1", "rustix 0.38.21", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -5723,7 +5722,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.5.5", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -6639,7 +6638,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ "windows-core", - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -6648,7 +6647,7 @@ version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", ] [[package]] @@ -6657,7 +6656,16 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", ] [[package]] @@ -6666,13 +6674,28 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] [[package]] @@ -6681,42 +6704,84 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + [[package]] name = "winreg" version = "0.50.0" @@ -6724,7 +6789,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] diff --git a/protocols/mdns/Cargo.toml b/protocols/mdns/Cargo.toml index 8a1475fdd40..97e7204cd63 100644 --- a/protocols/mdns/Cargo.toml +++ b/protocols/mdns/Cargo.toml @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] async-std = { version = "1.12.0", optional = true } -async-io = { version = "2.2.0", optional = true } +async-io = { version = "2.2.1", optional = true } data-encoding = "2.5.0" futures = "0.3.29" if-watch = "3.2.0" diff --git a/transports/tcp/Cargo.toml b/transports/tcp/Cargo.toml index d124d5fd748..13eeb01786d 100644 --- a/transports/tcp/Cargo.toml +++ b/transports/tcp/Cargo.toml @@ -11,7 +11,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"] categories = ["network-programming", "asynchronous"] [dependencies] -async-io = { version = "2.2.0", optional = true } +async-io = { version = "2.2.1", optional = true } futures = "0.3.29" futures-timer = "3.0" if-watch = "3.2.0" From 9d2bfa1c79a9f71f51d8e26499a498e83ea0b6f6 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 28 Nov 2023 12:10:53 +1100 Subject: [PATCH 26/48] chore(yamux): bump version in `Cargo.toml` Related: #4930. Pull-Request: #4939. --- Cargo.lock | 2 +- Cargo.toml | 2 +- muxers/yamux/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7356f080099..ee687e4b665 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3370,7 +3370,7 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.45.0" +version = "0.45.1" dependencies = [ "async-std", "futures", diff --git a/Cargo.toml b/Cargo.toml index 1dce34011c8..346b316d4dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -112,7 +112,7 @@ libp2p-webrtc-websys = { version = "0.2.0-alpha", path = "transports/webrtc-webs libp2p-websocket = { version = "0.43.0", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.3.1", path = "transports/websocket-websys" } libp2p-webtransport-websys = { version = "0.2.0", path = "transports/webtransport-websys" } -libp2p-yamux = { version = "0.45.0", path = "muxers/yamux" } +libp2p-yamux = { version = "0.45.1", path = "muxers/yamux" } multiaddr = "0.18.1" multihash = "0.19.1" multistream-select = { version = "0.13.0", path = "misc/multistream-select" } diff --git a/muxers/yamux/Cargo.toml b/muxers/yamux/Cargo.toml index ec3d4b85c5b..1456238121b 100644 --- a/muxers/yamux/Cargo.toml +++ b/muxers/yamux/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-yamux" edition = "2021" rust-version = { workspace = true } description = "Yamux multiplexing protocol for libp2p" -version = "0.45.0" +version = "0.45.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" From 89fe06876014245f3e91f15c8408754d032d6a8d Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 28 Nov 2023 12:20:39 +1100 Subject: [PATCH 27/48] ci: remove redundant comparison of features Pull-Request: #4940. --- .github/workflows/ci.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f0c852367a2..bd260eb741a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -325,8 +325,6 @@ jobs: ALL_FEATURES=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features | keys | map(select(. != "full")) | sort | join(" ")') FULL_FEATURE=$(cargo metadata --format-version=1 --no-deps | jq -r '.packages[] | select(.name == "libp2p") | .features["full"] | sort | join(" ")') - test "$ALL_FEATURES = $FULL_FEATURE" - echo "$ALL_FEATURES"; echo "$FULL_FEATURE"; From edc557cabb3b3b6643642f008ddabe77e4ab0442 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 28 Nov 2023 14:48:22 +1100 Subject: [PATCH 28/48] ci: bump `axum`, `tower` and `tower-http` together Pull-Request: #4938. --- .github/dependabot.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b40857107c9..a0656e6162c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -17,6 +17,11 @@ updates: patterns: - "opentelemetry*" - "tracing-opentelemetry" + axum: + patterns: + - "axum" + - "tower" + - "tower-http" - package-ecosystem: "github-actions" directory: "/" schedule: From 69fc1ac497f4a6788b86730591a0189d7e955a6f Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 28 Nov 2023 14:58:41 +1100 Subject: [PATCH 29/48] ci: always compare version in `CHANGELOG.md` and `Cargo.toml` Ensuring that the version numbers within the changelog and the manifest are the same is useful independent of the kind of change. Related: #4939. Pull-Request: #4941. --- .github/workflows/ci.yml | 22 +++++++++++++++++--- scripts/ensure-version-bump-and-changelog.sh | 13 ++---------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd260eb741a..04dab309902 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -61,16 +61,32 @@ jobs: with: tool: tomlq + - name: Extract version from manifest + run: | + CRATE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') + + echo "CRATE_VERSION=$CRATE_VERSION" >> $GITHUB_ENV + - name: Enforce version in `workspace.dependencies` matches latest version if: env.CRATE != 'libp2p' run: | - PACKAGE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') SPECIFIED_VERSION=$(tomlq "workspace.dependencies.$CRATE.version" --file ./Cargo.toml) - echo "Package version: $PACKAGE_VERSION"; + echo "Package version: $CRATE_VERSION"; echo "Specified version: $SPECIFIED_VERSION"; - test "$PACKAGE_VERSION" = "$SPECIFIED_VERSION" || test "=$PACKAGE_VERSION" = "$SPECIFIED_VERSION" + test "$CRATE_VERSION" = "$SPECIFIED_VERSION" || test "=$CRATE_VERSION" = "$SPECIFIED_VERSION" + + - name: Enforce version in CHANGELOG.md matches version in manifest + run: | + MANIFEST_PATH=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .manifest_path') + DIR_TO_CRATE=$(dirname "$MANIFEST_PATH") + VERSION_IN_CHANGELOG=$(awk -F' ' '/^## [0-9]+\.[0-9]+\.[0-9]+/{print $2; exit}' "$DIR_TO_CRATE/CHANGELOG.md") + + echo "Package version: $CRATE_VERSION"; + echo "Changelog version: $VERSION_IN_CHANGELOG"; + + test "$CRATE_VERSION" = "$VERSION_IN_CHANGELOG" - name: Ensure manifest and CHANGELOG are properly updated if: > diff --git a/scripts/ensure-version-bump-and-changelog.sh b/scripts/ensure-version-bump-and-changelog.sh index 1470ec3a5e4..a7a0992005a 100755 --- a/scripts/ensure-version-bump-and-changelog.sh +++ b/scripts/ensure-version-bump-and-changelog.sh @@ -10,15 +10,6 @@ MERGE_BASE=$(git merge-base "$HEAD_SHA" "$PR_BASE") # Find the merge base. This SRC_DIFF_TO_BASE=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-status -- "$DIR_TO_CRATE/src" "$DIR_TO_CRATE/Cargo.toml") CHANGELOG_DIFF=$(git diff "$HEAD_SHA".."$MERGE_BASE" --name-only -- "$DIR_TO_CRATE/CHANGELOG.md") -VERSION_IN_CHANGELOG=$(awk -F' ' '/^## [0-9]+\.[0-9]+\.[0-9]+/{print $2; exit}' "$DIR_TO_CRATE/CHANGELOG.md") -VERSION_IN_MANIFEST=$(cargo metadata --format-version=1 --no-deps | jq -e -r '.packages[] | select(.name == "'"$CRATE"'") | .version') - -# First, ensure version in Cargo.toml and CHANGELOG are in sync. This should always hold, regardless of whether the code in the crate was modified. -if [[ "$VERSION_IN_CHANGELOG" != "$VERSION_IN_MANIFEST" ]]; then - echo "Version in Cargo.toml ($VERSION_IN_MANIFEST) does not match version in CHANGELOG ($VERSION_IN_CHANGELOG)" - exit 1 -fi - # If the source files of this crate weren't touched in this PR, exit early. if [ -z "$SRC_DIFF_TO_BASE" ]; then exit 0; @@ -31,7 +22,7 @@ if [ -z "$CHANGELOG_DIFF" ]; then fi # Code was touched, ensure the version used in the manifest hasn't been released yet. -if git tag | grep -q "^$CRATE-v${VERSION_IN_MANIFEST}$"; then - echo "v$VERSION_IN_MANIFEST of '$CRATE' has already been released, please bump the version." +if git tag | grep -q "^$CRATE-v${CRATE_VERSION}$"; then + echo "v$CRATE_VERSION of '$CRATE' has already been released, please bump the version." exit 1 fi From f05a81eebe70861ee8fee35f0b1d433655a02c84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 08:59:44 +0000 Subject: [PATCH 30/48] deps: bump wasm-bindgen from 0.2.88 to 0.2.89 Pull-Request: #4944. --- Cargo.lock | 20 ++++++++++---------- examples/browser-webrtc/Cargo.toml | 2 +- transports/webrtc-websys/Cargo.toml | 2 +- transports/websocket-websys/Cargo.toml | 2 +- transports/webtransport-websys/Cargo.toml | 2 +- wasm-tests/webtransport-tests/Cargo.toml | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee687e4b665..3adda0fe337 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6225,9 +6225,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6235,9 +6235,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", @@ -6262,9 +6262,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6272,9 +6272,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", @@ -6285,9 +6285,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.88" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "wasm-bindgen-test" diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index 38407c8b667..9f69274f241 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -37,7 +37,7 @@ mime_guess = "2.0.4" js-sys = "0.3.65" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } -wasm-bindgen = "0.2.88" +wasm-bindgen = "0.2.89" wasm-bindgen-futures = "0.4.38" wasm-logger = { version = "0.2.0" } web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 5e74b926b19..44607de30c0 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -26,7 +26,7 @@ send_wrapper = { version = "0.6.0", features = ["futures"] } serde = { version = "1.0", features = ["derive"] } thiserror = "1" tracing = "0.1.37" -wasm-bindgen = { version = "0.2.88" } +wasm-bindgen = { version = "0.2.89" } wasm-bindgen-futures = { version = "0.4.38" } web-sys = { version = "0.3.65", features = ["Document", "Location", "MessageEvent", "Navigator", "RtcCertificate", "RtcConfiguration", "RtcDataChannel", "RtcDataChannelEvent", "RtcDataChannelInit", "RtcDataChannelState", "RtcDataChannelType", "RtcPeerConnection", "RtcSdpType", "RtcSessionDescription", "RtcSessionDescriptionInit", "Window"] } diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index ffa2892e838..80fbe28f25b 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -19,7 +19,7 @@ tracing = "0.1.37" parking_lot = "0.12.1" send_wrapper = "0.6.0" thiserror = "1.0.50" -wasm-bindgen = "0.2.88" +wasm-bindgen = "0.2.89" web-sys = { version = "0.3.65", features = ["BinaryType", "CloseEvent", "MessageEvent", "WebSocket", "Window", "WorkerGlobalScope"] } # Passing arguments to the docsrs builder in order to properly document cfg's. diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 1eddd8e0cc5..1ca98209e78 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -24,7 +24,7 @@ multihash = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } thiserror = "1.0.50" tracing = "0.1.37" -wasm-bindgen = "0.2.88" +wasm-bindgen = "0.2.89" wasm-bindgen-futures = "0.4.38" web-sys = { version = "0.3.65", features = [ "ReadableStreamDefaultReader", diff --git a/wasm-tests/webtransport-tests/Cargo.toml b/wasm-tests/webtransport-tests/Cargo.toml index 11d03f654fc..8d775b6c2fb 100644 --- a/wasm-tests/webtransport-tests/Cargo.toml +++ b/wasm-tests/webtransport-tests/Cargo.toml @@ -17,7 +17,7 @@ libp2p-noise = { workspace = true } libp2p-webtransport-websys = { workspace = true } multiaddr = { workspace = true } multihash = { workspace = true } -wasm-bindgen = "0.2.88" +wasm-bindgen = "0.2.89" wasm-bindgen-futures = "0.4.38" wasm-bindgen-test = "0.3.38" web-sys = { version = "0.3.65", features = ["Response", "Window"] } From 3fc1d18766b758d5b71989056e84f8257a8654cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 10:19:58 +0000 Subject: [PATCH 31/48] deps: bump clap from 4.4.8 to 4.4.10 Pull-Request: #4945. --- Cargo.lock | 8 ++++---- examples/autonat/Cargo.toml | 2 +- examples/dcutr/Cargo.toml | 2 +- examples/file-sharing/Cargo.toml | 2 +- examples/ipfs-kad/Cargo.toml | 2 +- examples/relay-server/Cargo.toml | 2 +- misc/keygen/Cargo.toml | 2 +- misc/server/Cargo.toml | 2 +- protocols/dcutr/Cargo.toml | 2 +- protocols/perf/Cargo.toml | 2 +- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3adda0fe337..a00cdcf021e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -849,9 +849,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.8" +version = "4.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64" +checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" dependencies = [ "clap_builder", "clap_derive", @@ -859,9 +859,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.8" +version = "4.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc" +checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" dependencies = [ "anstream", "anstyle", diff --git a/examples/autonat/Cargo.toml b/examples/autonat/Cargo.toml index 5cfa11c0aa6..cca3b5e326e 100644 --- a/examples/autonat/Cargo.toml +++ b/examples/autonat/Cargo.toml @@ -10,7 +10,7 @@ release = false [dependencies] tokio = { version = "1.34", features = ["full"] } -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3.29" libp2p = { path = "../../libp2p", features = ["tokio", "tcp", "noise", "yamux", "autonat", "identify", "macros"] } tracing = "0.1.37" diff --git a/examples/dcutr/Cargo.toml b/examples/dcutr/Cargo.toml index c65e2ca91e6..8dcb403218e 100644 --- a/examples/dcutr/Cargo.toml +++ b/examples/dcutr/Cargo.toml @@ -9,7 +9,7 @@ license = "MIT" release = false [dependencies] -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3.29" futures-timer = "3.0" libp2p = { path = "../../libp2p", features = [ "dns", "dcutr", "identify", "macros", "noise", "ping", "quic", "relay", "rendezvous", "tcp", "tokio", "yamux"] } diff --git a/examples/file-sharing/Cargo.toml b/examples/file-sharing/Cargo.toml index d4c0ad13fed..52d1b3b9b20 100644 --- a/examples/file-sharing/Cargo.toml +++ b/examples/file-sharing/Cargo.toml @@ -11,7 +11,7 @@ release = false [dependencies] serde = { version = "1.0", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } either = "1.9" futures = "0.3.29" libp2p = { path = "../../libp2p", features = [ "async-std", "cbor", "dns", "kad", "noise", "macros", "request-response", "tcp", "websocket", "yamux"] } diff --git a/examples/ipfs-kad/Cargo.toml b/examples/ipfs-kad/Cargo.toml index 8c1ecebf022..7870d3adb79 100644 --- a/examples/ipfs-kad/Cargo.toml +++ b/examples/ipfs-kad/Cargo.toml @@ -11,7 +11,7 @@ release = false [dependencies] tokio = { version = "1.34", features = ["rt-multi-thread", "macros"] } async-trait = "0.1" -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } env_logger = "0.10" futures = "0.3.29" anyhow = "1.0.75" diff --git a/examples/relay-server/Cargo.toml b/examples/relay-server/Cargo.toml index 8cab5bedeab..fe18106e60c 100644 --- a/examples/relay-server/Cargo.toml +++ b/examples/relay-server/Cargo.toml @@ -9,7 +9,7 @@ license = "MIT" release = false [dependencies] -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } async-std = { version = "1.12", features = ["attributes"] } async-trait = "0.1" futures = "0.3.2" diff --git a/misc/keygen/Cargo.toml b/misc/keygen/Cargo.toml index 82619a1035d..9c1c37bd428 100644 --- a/misc/keygen/Cargo.toml +++ b/misc/keygen/Cargo.toml @@ -13,7 +13,7 @@ publish = false release = false [dependencies] -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } zeroize = "1" serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index 9cc77b09d18..ba848eaeadb 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -12,7 +12,7 @@ license = "MIT" [dependencies] base64 = "0.21" -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3" futures-timer = "3" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } diff --git a/protocols/dcutr/Cargo.toml b/protocols/dcutr/Cargo.toml index 1c2b0b52d5f..1fc4b367dc8 100644 --- a/protocols/dcutr/Cargo.toml +++ b/protocols/dcutr/Cargo.toml @@ -29,7 +29,7 @@ futures-bounded = { workspace = true } [dev-dependencies] async-std = { version = "1.12.0", features = ["attributes"] } -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } libp2p-dns = { workspace = true, features = ["async-std"] } libp2p-identify = { workspace = true } libp2p-noise = { workspace = true } diff --git a/protocols/perf/Cargo.toml b/protocols/perf/Cargo.toml index 4d001e87d30..c2fc146f231 100644 --- a/protocols/perf/Cargo.toml +++ b/protocols/perf/Cargo.toml @@ -12,7 +12,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] anyhow = "1" -clap = { version = "4.4.8", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3.29" futures-bounded = { workspace = true } futures-timer = "3.0" From c796200df55d2ca3ff8fa8e99e530b63f9851e09 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 10:30:35 +0000 Subject: [PATCH 32/48] deps: bump js-sys from 0.3.65 to 0.3.66 Pull-Request: #4947. --- Cargo.lock | 4 ++-- examples/browser-webrtc/Cargo.toml | 2 +- transports/websocket-websys/Cargo.toml | 2 +- transports/webtransport-websys/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a00cdcf021e..e91391a515a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2370,9 +2370,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.65" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index 9f69274f241..2607d0c1056 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -34,7 +34,7 @@ tower-http = { version = "0.4.0", features = ["cors"] } mime_guess = "2.0.4" [target.'cfg(target_arch = "wasm32")'.dependencies] -js-sys = "0.3.65" +js-sys = "0.3.66" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } wasm-bindgen = "0.2.89" diff --git a/transports/websocket-websys/Cargo.toml b/transports/websocket-websys/Cargo.toml index 80fbe28f25b..b0493533e26 100644 --- a/transports/websocket-websys/Cargo.toml +++ b/transports/websocket-websys/Cargo.toml @@ -13,7 +13,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] bytes = "1.4.0" futures = "0.3.29" -js-sys = "0.3.65" +js-sys = "0.3.66" libp2p-core = { workspace = true } tracing = "0.1.37" parking_lot = "0.12.1" diff --git a/transports/webtransport-websys/Cargo.toml b/transports/webtransport-websys/Cargo.toml index 1ca98209e78..d21730e42c4 100644 --- a/transports/webtransport-websys/Cargo.toml +++ b/transports/webtransport-websys/Cargo.toml @@ -15,7 +15,7 @@ categories = ["network-programming", "asynchronous"] [dependencies] futures = "0.3.29" -js-sys = "0.3.65" +js-sys = "0.3.66" libp2p-core = { workspace = true } libp2p-identity = { workspace = true } libp2p-noise = { workspace = true } From ee17df9c5324a27dab9edbbe9f6deed1e36b9f45 Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Tue, 28 Nov 2023 23:04:26 +1100 Subject: [PATCH 33/48] chore(identity): don't inherit rust-version from workspace Another data point in favor of #4742 just popped up. With `libp2p-identity` being part of the workspace, we accidentally published a patch release that bumps the MSRV which is something we normally consider a breaking change. This PR freezes the MSRV of `libp2p-identity` to avoid this problem in the future. Long-term, we should do #4742 to avoid these kind of mistakes instead of special-casing crates within the repository. Pull-Request: #4866. --- identity/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/identity/Cargo.toml b/identity/Cargo.toml index f70db830d3f..e7aa15fef07 100644 --- a/identity/Cargo.toml +++ b/identity/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-identity" version = "0.2.8" edition = "2021" description = "Data structures and algorithms for identifying peers in libp2p." -rust-version = { workspace = true } +rust-version = "1.73.0" # MUST NOT inherit from workspace because we don't want to publish breaking changes to `libp2p-identity`. license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" keywords = ["peer-to-peer", "libp2p", "networking", "cryptography"] From 4d7a535c7fa750ba88622e7d7cbbbbbb6884985c Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Wed, 29 Nov 2023 00:42:17 +1100 Subject: [PATCH 34/48] refactor(relay): use oneshot's to track requested streams This is much cleaner as it allows us to construct a single `Future` that expresses the entire outbound protocol from stream opening to finish. Pull-Request: #4900. --- protocols/relay/src/priv_client/handler.rs | 220 ++++++++----------- protocols/relay/src/protocol/outbound_hop.rs | 4 +- 2 files changed, 99 insertions(+), 125 deletions(-) diff --git a/protocols/relay/src/priv_client/handler.rs b/protocols/relay/src/priv_client/handler.rs index 1925d6f6ab4..662d63cc742 100644 --- a/protocols/relay/src/priv_client/handler.rs +++ b/protocols/relay/src/priv_client/handler.rs @@ -18,9 +18,12 @@ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. +use crate::client::Connection; use crate::priv_client::transport; +use crate::priv_client::transport::ToListenerMsg; use crate::protocol::{self, inbound_stop, outbound_hop}; use crate::{priv_client, proto, HOP_PROTOCOL_NAME, STOP_PROTOCOL_NAME}; +use futures::channel::mpsc::Sender; use futures::channel::{mpsc, oneshot}; use futures::future::FutureExt; use futures_timer::Delay; @@ -28,17 +31,16 @@ use libp2p_core::multiaddr::Protocol; use libp2p_core::upgrade::ReadyUpgrade; use libp2p_core::Multiaddr; use libp2p_identity::PeerId; -use libp2p_swarm::handler::{ - ConnectionEvent, DialUpgradeError, FullyNegotiatedInbound, FullyNegotiatedOutbound, -}; +use libp2p_swarm::handler::{ConnectionEvent, FullyNegotiatedInbound}; use libp2p_swarm::{ - ConnectionHandler, ConnectionHandlerEvent, StreamProtocol, StreamUpgradeError, + ConnectionHandler, ConnectionHandlerEvent, Stream, StreamProtocol, StreamUpgradeError, SubstreamProtocol, }; use std::collections::VecDeque; use std::task::{Context, Poll}; use std::time::Duration; use std::{fmt, io}; +use void::Void; /// The maximum number of circuits being denied concurrently. /// @@ -104,8 +106,7 @@ pub struct Handler { >, >, - /// We issue a stream upgrade for each pending request. - pending_requests: VecDeque, + pending_streams: VecDeque>>>, inflight_reserve_requests: futures_bounded::FuturesTupleSet< Result, @@ -133,7 +134,7 @@ impl Handler { remote_peer_id, remote_addr, queued_events: Default::default(), - pending_requests: Default::default(), + pending_streams: Default::default(), inflight_reserve_requests: futures_bounded::FuturesTupleSet::new( STREAM_TIMEOUT, MAX_CONCURRENT_STREAMS_PER_CONNECTION, @@ -154,57 +155,6 @@ impl Handler { } } - fn on_dial_upgrade_error( - &mut self, - DialUpgradeError { error, .. }: DialUpgradeError< - ::OutboundOpenInfo, - ::OutboundProtocol, - >, - ) { - let pending_request = self - .pending_requests - .pop_front() - .expect("got a stream error without a pending request"); - - match pending_request { - PendingRequest::Reserve { mut to_listener } => { - let error = match error { - StreamUpgradeError::Timeout => { - outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) - } - StreamUpgradeError::Apply(never) => void::unreachable(never), - StreamUpgradeError::NegotiationFailed => { - outbound_hop::ReserveError::Unsupported - } - StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), - }; - - if let Err(e) = - to_listener.try_send(transport::ToListenerMsg::Reservation(Err(error))) - { - tracing::debug!("Unable to send error to listener: {}", e.into_send_error()) - } - self.reservation.failed(); - } - PendingRequest::Connect { - to_dial: send_back, .. - } => { - let error = match error { - StreamUpgradeError::Timeout => { - outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) - } - StreamUpgradeError::NegotiationFailed => { - outbound_hop::ConnectError::Unsupported - } - StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), - StreamUpgradeError::Apply(v) => void::unreachable(v), - }; - - let _ = send_back.send(Err(error)); - } - } - } - fn insert_to_deny_futs(&mut self, circuit: inbound_stop::Circuit) { let src_peer_id = circuit.src_peer_id(); @@ -219,6 +169,62 @@ impl Handler { ) } } + + fn make_new_reservation(&mut self, to_listener: Sender) { + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + self.queued_events + .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); + let result = self.inflight_reserve_requests.try_push( + async move { + let stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(into_reserve_error)?; + + let reservation = outbound_hop::make_reservation(stream).await?; + + Ok(reservation) + }, + to_listener, + ); + + if result.is_err() { + tracing::warn!("Dropping in-flight reservation request because we are at capacity"); + } + } + + fn establish_new_circuit( + &mut self, + to_dial: oneshot::Sender>, + dst_peer_id: PeerId, + ) { + let (sender, receiver) = oneshot::channel(); + + self.pending_streams.push_back(sender); + self.queued_events + .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), + }); + let result = self.inflight_outbound_connect_requests.try_push( + async move { + let stream = receiver + .await + .map_err(|_| io::Error::from(io::ErrorKind::BrokenPipe))? + .map_err(into_connect_error)?; + + outbound_hop::open_circuit(stream, dst_peer_id).await + }, + to_dial, + ); + + if result.is_err() { + tracing::warn!("Dropping in-flight connect request because we are at capacity") + } + } } impl ConnectionHandler for Handler { @@ -236,25 +242,13 @@ impl ConnectionHandler for Handler { fn on_behaviour_event(&mut self, event: Self::FromBehaviour) { match event { In::Reserve { to_listener } => { - self.pending_requests - .push_back(PendingRequest::Reserve { to_listener }); - self.queued_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); + self.make_new_reservation(to_listener); } In::EstablishCircuit { - to_dial: send_back, + to_dial, dst_peer_id, } => { - self.pending_requests.push_back(PendingRequest::Connect { - dst_peer_id, - to_dial: send_back, - }); - self.queued_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); + self.establish_new_circuit(to_dial, dst_peer_id); } } } @@ -402,12 +396,8 @@ impl ConnectionHandler for Handler { } if let Poll::Ready(Some(to_listener)) = self.reservation.poll(cx) { - self.pending_requests - .push_back(PendingRequest::Reserve { to_listener }); - - return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(ReadyUpgrade::new(HOP_PROTOCOL_NAME), ()), - }); + self.make_new_reservation(to_listener); + continue; } // Deny incoming circuit requests. @@ -450,42 +440,16 @@ impl ConnectionHandler for Handler { tracing::warn!("Dropping inbound stream because we are at capacity") } } - ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound { - protocol: stream, - .. - }) => { - let pending_request = self.pending_requests.pop_front().expect( - "opened a stream without a pending connection command or a reserve listener", - ); - match pending_request { - PendingRequest::Reserve { to_listener } => { - if self - .inflight_reserve_requests - .try_push(outbound_hop::make_reservation(stream), to_listener) - .is_err() - { - tracing::warn!("Dropping outbound stream because we are at capacity"); - } - } - PendingRequest::Connect { - dst_peer_id, - to_dial: send_back, - } => { - if self - .inflight_outbound_connect_requests - .try_push(outbound_hop::open_circuit(stream, dst_peer_id), send_back) - .is_err() - { - tracing::warn!("Dropping outbound stream because we are at capacity"); - } - } + ConnectionEvent::FullyNegotiatedOutbound(ev) => { + if let Some(next) = self.pending_streams.pop_front() { + let _ = next.send(Ok(ev.protocol)); } } - ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { - void::unreachable(listen_upgrade_error.error) - } - ConnectionEvent::DialUpgradeError(dial_upgrade_error) => { - self.on_dial_upgrade_error(dial_upgrade_error) + ConnectionEvent::ListenUpgradeError(ev) => void::unreachable(ev.error), + ConnectionEvent::DialUpgradeError(ev) => { + if let Some(next) = self.pending_streams.pop_front() { + let _ = next.send(Err(ev.error)); + } } _ => {} } @@ -614,14 +578,24 @@ impl Reservation { } } -pub(crate) enum PendingRequest { - Reserve { - /// A channel into the [`Transport`](priv_client::Transport). - to_listener: mpsc::Sender, - }, - Connect { - dst_peer_id: PeerId, - /// A channel into the future returned by [`Transport::dial`](libp2p_core::Transport::dial). - to_dial: oneshot::Sender>, - }, +fn into_reserve_error(e: StreamUpgradeError) -> outbound_hop::ReserveError { + match e { + StreamUpgradeError::Timeout => { + outbound_hop::ReserveError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => outbound_hop::ReserveError::Unsupported, + StreamUpgradeError::Io(e) => outbound_hop::ReserveError::Io(e), + } +} + +fn into_connect_error(e: StreamUpgradeError) -> outbound_hop::ConnectError { + match e { + StreamUpgradeError::Timeout => { + outbound_hop::ConnectError::Io(io::ErrorKind::TimedOut.into()) + } + StreamUpgradeError::Apply(never) => void::unreachable(never), + StreamUpgradeError::NegotiationFailed => outbound_hop::ConnectError::Unsupported, + StreamUpgradeError::Io(e) => outbound_hop::ConnectError::Io(e), + } } diff --git a/protocols/relay/src/protocol/outbound_hop.rs b/protocols/relay/src/protocol/outbound_hop.rs index e5f9a6a0a52..3ae824be167 100644 --- a/protocols/relay/src/protocol/outbound_hop.rs +++ b/protocols/relay/src/protocol/outbound_hop.rs @@ -47,7 +47,7 @@ pub enum ConnectError { #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] Unsupported, #[error("IO error")] - Io(#[source] io::Error), + Io(#[from] io::Error), #[error("Protocol error")] Protocol(#[from] ProtocolViolation), } @@ -61,7 +61,7 @@ pub enum ReserveError { #[error("Remote does not support the `{HOP_PROTOCOL_NAME}` protocol")] Unsupported, #[error("IO error")] - Io(#[source] io::Error), + Io(#[from] io::Error), #[error("Protocol error")] Protocol(#[from] ProtocolViolation), } From ab9e7a0283207c9084aaf32012e3189bef03db4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Nov 2023 21:48:33 +0000 Subject: [PATCH 35/48] deps: bump openssl from 0.10.55 to 0.10.60 Pull-Request: #4951. --- Cargo.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e91391a515a..0aea788d220 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3891,11 +3891,11 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.55" +version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", @@ -3923,9 +3923,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ "cc", "libc", From bca3a264ff684d21c0bc92c7e793942c94e2e093 Mon Sep 17 00:00:00 2001 From: Doug A Date: Tue, 28 Nov 2023 22:52:51 -0500 Subject: [PATCH 36/48] fix(browser-webrtc-example): use `tracing-wasm` logger Upgrade `wasm-logger` to `tracing-wasm`. Related: #4950. Pull-Request: #4952. --- Cargo.lock | 13 ++++++++++++- examples/browser-webrtc/Cargo.toml | 4 ++-- examples/browser-webrtc/src/lib.rs | 2 +- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0aea788d220..e11c6ba5460 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -676,9 +676,9 @@ dependencies = [ "tower-http", "tracing", "tracing-subscriber", + "tracing-wasm", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-logger", "web-sys", ] @@ -5967,6 +5967,17 @@ dependencies = [ "tracing-log 0.2.0", ] +[[package]] +name = "tracing-wasm" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" +dependencies = [ + "tracing", + "tracing-subscriber", + "wasm-bindgen", +] + [[package]] name = "try-lock" version = "0.2.4" diff --git a/examples/browser-webrtc/Cargo.toml b/examples/browser-webrtc/Cargo.toml index 2607d0c1056..57232abeb5e 100644 --- a/examples/browser-webrtc/Cargo.toml +++ b/examples/browser-webrtc/Cargo.toml @@ -24,7 +24,7 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] axum = "0.6.19" -libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen", "tokio"] } +libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "tokio"] } libp2p-webrtc = { workspace = true, features = ["tokio"] } rust-embed = { version = "8.0.0", features = ["include-exclude", "interpolate-folder-path"] } tokio = { version = "1.34", features = ["macros", "net", "rt", "signal"] } @@ -37,9 +37,9 @@ mime_guess = "2.0.4" js-sys = "0.3.66" libp2p = { path = "../../libp2p", features = [ "ed25519", "macros", "ping", "wasm-bindgen"] } libp2p-webrtc-websys = { workspace = true } +tracing-wasm = "0.2.1" wasm-bindgen = "0.2.89" wasm-bindgen-futures = "0.4.38" -wasm-logger = { version = "0.2.0" } web-sys = { version = "0.3", features = ['Document', 'Element', 'HtmlElement', 'Node', 'Response', 'Window'] } [lints] diff --git a/examples/browser-webrtc/src/lib.rs b/examples/browser-webrtc/src/lib.rs index 609d72479c4..2112919c6de 100644 --- a/examples/browser-webrtc/src/lib.rs +++ b/examples/browser-webrtc/src/lib.rs @@ -13,7 +13,7 @@ use web_sys::{Document, HtmlElement}; #[wasm_bindgen] pub async fn run(libp2p_endpoint: String) -> Result<(), JsError> { - wasm_logger::init(wasm_logger::Config::default()); + tracing_wasm::set_as_global_default(); let body = Body::from_current_window()?; body.append_p("Let's ping the WebRTC Server!")?; From 0810672d0fb98d0109f4dcf9be00d6a10e2d2e6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 04:04:45 +0000 Subject: [PATCH 37/48] deps: bump aes-gcm from 0.10.2 to 0.10.3 Pull-Request: #4955. --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e11c6ba5460..617d54e9b2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -40,9 +40,9 @@ dependencies = [ [[package]] name = "aes-gcm" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead", "aes", From dfd66f918d6298af535e14367d7a901a0717284f Mon Sep 17 00:00:00 2001 From: Jiyuan Zheng Date: Tue, 28 Nov 2023 22:17:50 -0600 Subject: [PATCH 38/48] fix(tutorial): import dependencies via meta crate Pull-Request: #4949. --- libp2p/src/tutorials/ping.rs | 54 +++++++++++++++++------------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/libp2p/src/tutorials/ping.rs b/libp2p/src/tutorials/ping.rs index db515595bfc..1413531cd72 100644 --- a/libp2p/src/tutorials/ping.rs +++ b/libp2p/src/tutorials/ping.rs @@ -55,7 +55,7 @@ //! edition = "2021" //! //! [dependencies] -//! libp2p = { version = "0.52", features = ["tcp", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } +//! libp2p = { version = "0.52", features = ["tcp", "tls", "dns", "async-std", "noise", "yamux", "websocket", "ping", "macros"] } //! futures = "0.3.21" //! async-std = { version = "1.12.0", features = ["attributes"] } //! tracing-subscriber = { version = "0.3", features = ["env-filter"] } @@ -95,7 +95,6 @@ //! trait. //! //! ```rust -//! use libp2p::{identity, PeerId}; //! use std::error::Error; //! use tracing_subscriber::EnvFilter; //! @@ -106,9 +105,9 @@ //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( -//! libp2p_tcp::Config::default(), -//! libp2p_tls::Config::new, -//! libp2p_yamux::Config::default, +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, //! )?; //! //! Ok(()) @@ -138,8 +137,7 @@ //! With the above in mind, let's extend our example, creating a [`ping::Behaviour`](crate::ping::Behaviour) at the end: //! //! ```rust -//! use libp2p::swarm::NetworkBehaviour; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; //! use tracing_subscriber::EnvFilter; //! use std::error::Error; //! @@ -150,9 +148,9 @@ //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( -//! libp2p_tcp::Config::default(), -//! libp2p_tls::Config::new, -//! libp2p_yamux::Config::default, +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())?; //! @@ -168,8 +166,7 @@ //! to the [`Transport`] as well as events from the [`Transport`] to the [`NetworkBehaviour`]. //! //! ```rust -//! use libp2p::swarm::NetworkBehaviour; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; //! use std::error::Error; //! use tracing_subscriber::EnvFilter; //! @@ -180,9 +177,9 @@ //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( -//! libp2p_tcp::Config::default(), -//! libp2p_tls::Config::new, -//! libp2p_yamux::Config::default, +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? //! .build(); @@ -202,8 +199,7 @@ //! Thus, without any other behaviour in place, we would not be able to observe the pings. //! //! ```rust -//! use libp2p::swarm::NetworkBehaviour; -//! use libp2p::{identity, ping, PeerId}; +//! use libp2p::ping; //! use std::error::Error; //! use std::time::Duration; //! use tracing_subscriber::EnvFilter; @@ -215,9 +211,9 @@ //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( -//! libp2p_tcp::Config::default(), -//! libp2p_tls::Config::new, -//! libp2p_yamux::Config::default, +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? //! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. @@ -254,7 +250,7 @@ //! remote peer. //! //! ```rust -//! use libp2p::{identity, ping, Multiaddr, PeerId}; +//! use libp2p::{ping, Multiaddr}; //! use std::error::Error; //! use std::time::Duration; //! use tracing_subscriber::EnvFilter; @@ -266,9 +262,9 @@ //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( -//! libp2p_tcp::Config::default(), -//! libp2p_tls::Config::new, -//! libp2p_yamux::Config::default, +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? //! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. @@ -298,8 +294,8 @@ //! //! ```no_run //! use futures::prelude::*; -//! use libp2p::swarm::{NetworkBehaviour, SwarmEvent}; -//! use libp2p::{identity, ping, Multiaddr, PeerId}; +//! use libp2p::swarm::SwarmEvent; +//! use libp2p::{ping, Multiaddr}; //! use std::error::Error; //! use std::time::Duration; //! use tracing_subscriber::EnvFilter; @@ -311,9 +307,9 @@ //! let mut swarm = libp2p::SwarmBuilder::with_new_identity() //! .with_async_std() //! .with_tcp( -//! libp2p_tcp::Config::default(), -//! libp2p_tls::Config::new, -//! libp2p_yamux::Config::default, +//! libp2p::tcp::Config::default(), +//! libp2p::tls::Config::new, +//! libp2p::yamux::Config::default, //! )? //! .with_behaviour(|_| ping::Behaviour::default())? //! .with_swarm_config(|cfg| cfg.with_idle_connection_timeout(Duration::from_secs(30))) // Allows us to observe pings for 30 seconds. From 543408bec68c6d8fdef352a1a2a41994af9a4ca3 Mon Sep 17 00:00:00 2001 From: Predrag Gruevski <2348618+obi1kenobi@users.noreply.github.com> Date: Wed, 29 Nov 2023 00:37:30 -0500 Subject: [PATCH 39/48] ci: unset `RUSTFLAGS` value in semver job Don't fail semver-checking if a dependency version has warnings, such as deprecation notices. Related: https://github.com/libp2p/rust-libp2p/pull/4932#issuecomment-1829014527. Related: https://github.com/obi1kenobi/cargo-semver-checks/issues/589. Pull-Request: #4942. --- .github/workflows/ci.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 04dab309902..f8770a29bc9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -307,6 +307,15 @@ jobs: semver: runs-on: ubuntu-latest + env: + # Unset the global `RUSTFLAGS` env to allow warnings. + # cargo-semver-checks intentionally re-locks dependency versions + # before checking, and we shouldn't fail here if a dep has a warning. + # + # More context: + # https://github.com/libp2p/rust-libp2p/pull/4932#issuecomment-1829014527 + # https://github.com/obi1kenobi/cargo-semver-checks/issues/589 + RUSTFLAGS: '' steps: - uses: actions/checkout@v4 - run: wget -q -O- https://github.com/obi1kenobi/cargo-semver-checks/releases/download/v0.25.0/cargo-semver-checks-x86_64-unknown-linux-gnu.tar.gz | tar -xz -C ~/.cargo/bin From e889e2e446e32e29a6c979d4b37775181bb55455 Mon Sep 17 00:00:00 2001 From: Doug A Date: Wed, 29 Nov 2023 20:59:15 -0500 Subject: [PATCH 40/48] deps(webrtc): bump alpha versions Bumps versions of `libp2p-webrtc` and `libp2p-webrtc-websys` up one minor version. Fixes: #4953. Pull-Request: #4959. --- Cargo.lock | 4 ++-- Cargo.toml | 4 ++-- transports/webrtc-websys/CHANGELOG.md | 5 +++++ transports/webrtc-websys/Cargo.toml | 2 +- transports/webrtc/CHANGELOG.md | 5 +++++ transports/webrtc/Cargo.toml | 2 +- 6 files changed, 16 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 617d54e9b2b..3d4acf65eef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3232,7 +3232,7 @@ dependencies = [ [[package]] name = "libp2p-webrtc" -version = "0.6.1-alpha" +version = "0.7.0-alpha" dependencies = [ "async-trait", "bytes", @@ -3283,7 +3283,7 @@ dependencies = [ [[package]] name = "libp2p-webrtc-websys" -version = "0.2.0-alpha" +version = "0.3.0-alpha" dependencies = [ "bytes", "futures", diff --git a/Cargo.toml b/Cargo.toml index 346b316d4dc..c773d56a2a9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,9 +106,9 @@ libp2p-tcp = { version = "0.41.0", path = "transports/tcp" } libp2p-tls = { version = "0.3.0", path = "transports/tls" } libp2p-uds = { version = "0.40.0", path = "transports/uds" } libp2p-upnp = { version = "0.2.0", path = "protocols/upnp" } -libp2p-webrtc = { version = "0.6.1-alpha", path = "transports/webrtc" } +libp2p-webrtc = { version = "0.7.0-alpha", path = "transports/webrtc" } libp2p-webrtc-utils = { version = "0.1.0", path = "misc/webrtc-utils" } -libp2p-webrtc-websys = { version = "0.2.0-alpha", path = "transports/webrtc-websys" } +libp2p-webrtc-websys = { version = "0.3.0-alpha", path = "transports/webrtc-websys" } libp2p-websocket = { version = "0.43.0", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.3.1", path = "transports/websocket-websys" } libp2p-webtransport-websys = { version = "0.2.0", path = "transports/webtransport-websys" } diff --git a/transports/webrtc-websys/CHANGELOG.md b/transports/webrtc-websys/CHANGELOG.md index 3cc263e2ce5..727faa2561b 100644 --- a/transports/webrtc-websys/CHANGELOG.md +++ b/transports/webrtc-websys/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.3.0-alpha + +- Bump version in order to publish a new version dependent on latest `libp2p-core`. + See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). + ## 0.2.0-alpha - Rename `Error::JsError` to `Error::Js`. diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 44607de30c0..5aa0a90200e 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -8,7 +8,7 @@ license = "MIT" name = "libp2p-webrtc-websys" repository = "https://github.com/libp2p/rust-libp2p" rust-version = { workspace = true } -version = "0.2.0-alpha" +version = "0.3.0-alpha" publish = true [dependencies] diff --git a/transports/webrtc/CHANGELOG.md b/transports/webrtc/CHANGELOG.md index 710c2e315d9..7cc2b2b63bc 100644 --- a/transports/webrtc/CHANGELOG.md +++ b/transports/webrtc/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.7.0-alpha + +- Bump version in order to publish a new version dependent on latest `libp2p-core`. + See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). + ## 0.6.1-alpha - Move common dependencies to `libp2p-webrtc-utils` crate. diff --git a/transports/webrtc/Cargo.toml b/transports/webrtc/Cargo.toml index 8aa43652392..214d6778bf0 100644 --- a/transports/webrtc/Cargo.toml +++ b/transports/webrtc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-webrtc" -version = "0.6.1-alpha" +version = "0.7.0-alpha" authors = ["Parity Technologies "] description = "WebRTC transport for libp2p" repository = "https://github.com/libp2p/rust-libp2p" From 7b7cf5f5c1e49cbb7ae7f09b7f2203890f4aa664 Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Thu, 30 Nov 2023 02:20:25 -0500 Subject: [PATCH 41/48] feat(request-response): derive `PartialOrd`,`Ord` for `{Out,In}RequestId` Pull-Request: #4956. --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/request-response/CHANGELOG.md | 5 +++++ protocols/request-response/Cargo.toml | 2 +- protocols/request-response/src/lib.rs | 4 ++-- 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d4acf65eef..349d0e9a82a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3057,7 +3057,7 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.26.0" +version = "0.26.1" dependencies = [ "anyhow", "async-std", diff --git a/Cargo.toml b/Cargo.toml index c773d56a2a9..b48088f6218 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -97,7 +97,7 @@ libp2p-pnet = { version = "0.24.0", path = "transports/pnet" } libp2p-quic = { version = "0.10.1", path = "transports/quic" } libp2p-relay = { version = "0.17.1", path = "protocols/relay" } libp2p-rendezvous = { version = "0.14.0", path = "protocols/rendezvous" } -libp2p-request-response = { version = "0.26.0", path = "protocols/request-response" } +libp2p-request-response = { version = "0.26.1", path = "protocols/request-response" } libp2p-server = { version = "0.12.4", path = "misc/server" } libp2p-swarm = { version = "0.44.0", path = "swarm" } libp2p-swarm-derive = { version = "=0.34.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. diff --git a/protocols/request-response/CHANGELOG.md b/protocols/request-response/CHANGELOG.md index 30fc700da3c..d53ff479ee2 100644 --- a/protocols/request-response/CHANGELOG.md +++ b/protocols/request-response/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.26.1 + +- Derive `PartialOrd` and `Ord` for `{Out,In}boundRequestId`. + See [PR 4956](https://github.com/libp2p/rust-libp2p/pull/4956). + ## 0.26.0 - Remove `request_response::Config::set_connection_keep_alive` in favor of `SwarmBuilder::idle_connection_timeout`. diff --git a/protocols/request-response/Cargo.toml b/protocols/request-response/Cargo.toml index 2c7692fb9b4..6da25e24862 100644 --- a/protocols/request-response/Cargo.toml +++ b/protocols/request-response/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-request-response" edition = "2021" rust-version = { workspace = true } description = "Generic Request/Response Protocols" -version = "0.26.0" +version = "0.26.1" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/request-response/src/lib.rs b/protocols/request-response/src/lib.rs index 824839ebac8..fc68bd6cf1f 100644 --- a/protocols/request-response/src/lib.rs +++ b/protocols/request-response/src/lib.rs @@ -274,7 +274,7 @@ impl ResponseChannel { /// /// Note: [`InboundRequestId`]'s uniqueness is only guaranteed between /// inbound requests of the same originating [`Behaviour`]. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct InboundRequestId(u64); impl fmt::Display for InboundRequestId { @@ -287,7 +287,7 @@ impl fmt::Display for InboundRequestId { /// /// Note: [`OutboundRequestId`]'s uniqueness is only guaranteed between /// outbound requests of the same originating [`Behaviour`]. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct OutboundRequestId(u64); impl fmt::Display for OutboundRequestId { From ec2258e36e20dc8e933ddc68a0f83ef3f3b48545 Mon Sep 17 00:00:00 2001 From: zhiqiangxu <652732310@qq.com> Date: Thu, 30 Nov 2023 15:38:58 +0800 Subject: [PATCH 42/48] refactor(connection-limits): make `check_limit` a free-function Pull-Request: #4958. --- misc/connection-limits/src/lib.rs | 37 +++++++++++++------------------ 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/misc/connection-limits/src/lib.rs b/misc/connection-limits/src/lib.rs index af76e9a57d9..cb12599ad79 100644 --- a/misc/connection-limits/src/lib.rs +++ b/misc/connection-limits/src/lib.rs @@ -79,22 +79,17 @@ impl Behaviour { established_per_peer: Default::default(), } } +} - fn check_limit( - &mut self, - limit: Option, - current: usize, - kind: Kind, - ) -> Result<(), ConnectionDenied> { - let limit = limit.unwrap_or(u32::MAX); - let current = current as u32; +fn check_limit(limit: Option, current: usize, kind: Kind) -> Result<(), ConnectionDenied> { + let limit = limit.unwrap_or(u32::MAX); + let current = current as u32; - if current >= limit { - return Err(ConnectionDenied::new(Exceeded { limit, kind })); - } - - Ok(()) + if current >= limit { + return Err(ConnectionDenied::new(Exceeded { limit, kind })); } + + Ok(()) } /// A connection limit has been exceeded. @@ -210,7 +205,7 @@ impl NetworkBehaviour for Behaviour { _: &Multiaddr, _: &Multiaddr, ) -> Result<(), ConnectionDenied> { - self.check_limit( + check_limit( self.limits.max_pending_incoming, self.pending_inbound_connections.len(), Kind::PendingIncoming, @@ -230,12 +225,12 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { self.pending_inbound_connections.remove(&connection_id); - self.check_limit( + check_limit( self.limits.max_established_incoming, self.established_inbound_connections.len(), Kind::EstablishedIncoming, )?; - self.check_limit( + check_limit( self.limits.max_established_per_peer, self.established_per_peer .get(&peer) @@ -243,7 +238,7 @@ impl NetworkBehaviour for Behaviour { .unwrap_or(0), Kind::EstablishedPerPeer, )?; - self.check_limit( + check_limit( self.limits.max_established_total, self.established_inbound_connections.len() + self.established_outbound_connections.len(), @@ -260,7 +255,7 @@ impl NetworkBehaviour for Behaviour { _: &[Multiaddr], _: Endpoint, ) -> Result, ConnectionDenied> { - self.check_limit( + check_limit( self.limits.max_pending_outgoing, self.pending_outbound_connections.len(), Kind::PendingOutgoing, @@ -280,12 +275,12 @@ impl NetworkBehaviour for Behaviour { ) -> Result, ConnectionDenied> { self.pending_outbound_connections.remove(&connection_id); - self.check_limit( + check_limit( self.limits.max_established_outgoing, self.established_outbound_connections.len(), Kind::EstablishedOutgoing, )?; - self.check_limit( + check_limit( self.limits.max_established_per_peer, self.established_per_peer .get(&peer) @@ -293,7 +288,7 @@ impl NetworkBehaviour for Behaviour { .unwrap_or(0), Kind::EstablishedPerPeer, )?; - self.check_limit( + check_limit( self.limits.max_established_total, self.established_inbound_connections.len() + self.established_outbound_connections.len(), From 6d21e6ed7f208c1ed2ba840a739f2445faaa5e5a Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 1 Dec 2023 17:51:26 +1100 Subject: [PATCH 43/48] chore(webrtc-utils): bump version to allow for new release We didn't bump this crate's version despite it depending on `libp2p_noise`. As such, we can't release `libp2p-webrtc-websys` at the moment because it needs a new release of this crate. Pull-Request: #4968. --- Cargo.lock | 2 +- Cargo.toml | 2 +- misc/webrtc-utils/CHANGELOG.md | 5 +++++ misc/webrtc-utils/Cargo.toml | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 349d0e9a82a..825217393f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3261,7 +3261,7 @@ dependencies = [ [[package]] name = "libp2p-webrtc-utils" -version = "0.1.0" +version = "0.2.0" dependencies = [ "asynchronous-codec", "bytes", diff --git a/Cargo.toml b/Cargo.toml index b48088f6218..1e243c418eb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,7 +107,7 @@ libp2p-tls = { version = "0.3.0", path = "transports/tls" } libp2p-uds = { version = "0.40.0", path = "transports/uds" } libp2p-upnp = { version = "0.2.0", path = "protocols/upnp" } libp2p-webrtc = { version = "0.7.0-alpha", path = "transports/webrtc" } -libp2p-webrtc-utils = { version = "0.1.0", path = "misc/webrtc-utils" } +libp2p-webrtc-utils = { version = "0.2.0", path = "misc/webrtc-utils" } libp2p-webrtc-websys = { version = "0.3.0-alpha", path = "transports/webrtc-websys" } libp2p-websocket = { version = "0.43.0", path = "transports/websocket" } libp2p-websocket-websys = { version = "0.3.1", path = "transports/websocket-websys" } diff --git a/misc/webrtc-utils/CHANGELOG.md b/misc/webrtc-utils/CHANGELOG.md index c3485aa1dbf..6949113a377 100644 --- a/misc/webrtc-utils/CHANGELOG.md +++ b/misc/webrtc-utils/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.2.0 + +- Update to latest version of `libp2p-noise`. + See [PR 4968](https://github.com/libp2p/rust-libp2p/pull/4968). + ## 0.1.0 - Initial release. diff --git a/misc/webrtc-utils/Cargo.toml b/misc/webrtc-utils/Cargo.toml index 8ae36754d0f..7173dedae7b 100644 --- a/misc/webrtc-utils/Cargo.toml +++ b/misc/webrtc-utils/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT" name = "libp2p-webrtc-utils" repository = "https://github.com/libp2p/rust-libp2p" rust-version = { workspace = true } -version = "0.1.0" +version = "0.2.0" publish = true [dependencies] From c1925e5e612bd609e57d6ac50a7b28cf16a1aa2c Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Fri, 1 Dec 2023 18:01:27 +1100 Subject: [PATCH 44/48] feat(webrtc-websys): hide `libp2p_noise` from the public API Currently, `libp2p-webrtc-websys` exposes the `libp2p_noise` dependency in its public API. It should really be a private dependency of the crate. By wrapping it in a new-type, we can achieve this. Pull-Request: #4969. --- Cargo.lock | 1 - misc/webrtc-utils/src/noise.rs | 6 ++++-- transports/webrtc-websys/CHANGELOG.md | 2 ++ transports/webrtc-websys/Cargo.toml | 1 - transports/webrtc-websys/src/error.rs | 9 +++++++-- transports/webrtc-websys/src/upgrade.rs | 5 ++++- 6 files changed, 17 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 825217393f2..3d6031af9cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3294,7 +3294,6 @@ dependencies = [ "js-sys", "libp2p-core", "libp2p-identity", - "libp2p-noise", "libp2p-ping", "libp2p-swarm", "libp2p-webrtc-utils", diff --git a/misc/webrtc-utils/src/noise.rs b/misc/webrtc-utils/src/noise.rs index ac2e58c9163..9180acfc1ca 100644 --- a/misc/webrtc-utils/src/noise.rs +++ b/misc/webrtc-utils/src/noise.rs @@ -27,12 +27,14 @@ use libp2p_noise as noise; use crate::fingerprint::Fingerprint; +pub use noise::Error; + pub async fn inbound( id_keys: identity::Keypair, stream: T, client_fingerprint: Fingerprint, server_fingerprint: Fingerprint, -) -> Result +) -> Result where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { @@ -54,7 +56,7 @@ pub async fn outbound( stream: T, server_fingerprint: Fingerprint, client_fingerprint: Fingerprint, -) -> Result +) -> Result where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { diff --git a/transports/webrtc-websys/CHANGELOG.md b/transports/webrtc-websys/CHANGELOG.md index 727faa2561b..634120c53c3 100644 --- a/transports/webrtc-websys/CHANGELOG.md +++ b/transports/webrtc-websys/CHANGELOG.md @@ -2,6 +2,8 @@ - Bump version in order to publish a new version dependent on latest `libp2p-core`. See [PR 4959](https://github.com/libp2p/rust-libp2p/pull/4959). +- Remove `libp2p_noise` from the public API. + See [PR 4969](https://github.com/libp2p/rust-libp2p/pull/4969). ## 0.2.0-alpha diff --git a/transports/webrtc-websys/Cargo.toml b/transports/webrtc-websys/Cargo.toml index 5aa0a90200e..a5eb8f0b14d 100644 --- a/transports/webrtc-websys/Cargo.toml +++ b/transports/webrtc-websys/Cargo.toml @@ -20,7 +20,6 @@ hex = "0.4.3" js-sys = { version = "0.3" } libp2p-core = { workspace = true } libp2p-identity = { workspace = true } -libp2p-noise = { workspace = true } libp2p-webrtc-utils = { workspace = true } send_wrapper = { version = "0.6.0", features = ["futures"] } serde = { version = "1.0", features = ["derive"] } diff --git a/transports/webrtc-websys/src/error.rs b/transports/webrtc-websys/src/error.rs index c95b1caf49b..a2df1a182ea 100644 --- a/transports/webrtc-websys/src/error.rs +++ b/transports/webrtc-websys/src/error.rs @@ -20,9 +20,14 @@ pub enum Error { Connection(String), #[error("Authentication error")] - Authentication(#[from] libp2p_noise::Error), + Authentication(#[from] AuthenticationError), } +/// New-type wrapper to hide `libp2p_noise` from the public API. +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +pub struct AuthenticationError(pub(crate) libp2p_webrtc_utils::noise::Error); + impl Error { pub(crate) fn from_js_value(value: JsValue) -> Self { let s = if value.is_instance_of::() { @@ -38,7 +43,7 @@ impl Error { } } -impl std::convert::From for Error { +impl From for Error { fn from(value: JsValue) -> Self { Error::from_js_value(value) } diff --git a/transports/webrtc-websys/src/upgrade.rs b/transports/webrtc-websys/src/upgrade.rs index cc053835041..d42f2e3ae18 100644 --- a/transports/webrtc-websys/src/upgrade.rs +++ b/transports/webrtc-websys/src/upgrade.rs @@ -1,5 +1,6 @@ use super::Error; use crate::connection::RtcPeerConnection; +use crate::error::AuthenticationError; use crate::sdp; use crate::Connection; use libp2p_identity::{Keypair, PeerId}; @@ -48,7 +49,9 @@ async fn outbound_inner( tracing::trace!(?local_fingerprint); tracing::trace!(?remote_fingerprint); - let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint).await?; + let peer_id = noise::outbound(id_keys, channel, remote_fingerprint, local_fingerprint) + .await + .map_err(AuthenticationError)?; tracing::debug!(peer=%peer_id, "Remote peer identified"); From d8d548250092752958a96ae7fd83b8ab553839be Mon Sep 17 00:00:00 2001 From: maqi Date: Sat, 2 Dec 2023 02:05:23 +0800 Subject: [PATCH 45/48] fix(kad): iterator progress to be decided by any of new peers Pull-Request: #4932. --- Cargo.lock | 2 +- Cargo.toml | 2 +- protocols/kad/CHANGELOG.md | 5 +++++ protocols/kad/Cargo.toml | 2 +- protocols/kad/src/query/peers/closest.rs | 17 +++++++++-------- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d6031af9cf..c26c1f83958 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2720,7 +2720,7 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.45.2" +version = "0.45.3" dependencies = [ "arrayvec", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 1e243c418eb..8366148650c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ libp2p-floodsub = { version = "0.44.0", path = "protocols/floodsub" } libp2p-gossipsub = { version = "0.46.1", path = "protocols/gossipsub" } libp2p-identify = { version = "0.44.1", path = "protocols/identify" } libp2p-identity = { version = "0.2.8" } -libp2p-kad = { version = "0.45.2", path = "protocols/kad" } +libp2p-kad = { version = "0.45.3", path = "protocols/kad" } libp2p-mdns = { version = "0.45.1", path = "protocols/mdns" } libp2p-memory-connection-limits = { version = "0.2.0", path = "misc/memory-connection-limits" } libp2p-metrics = { version = "0.14.1", path = "misc/metrics" } diff --git a/protocols/kad/CHANGELOG.md b/protocols/kad/CHANGELOG.md index 4740e4b1f95..b27a6943659 100644 --- a/protocols/kad/CHANGELOG.md +++ b/protocols/kad/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.45.3 + +- The progress of the close query iterator shall be decided by ANY of the new peers. + See [PR 4932](https://github.com/libp2p/rust-libp2p/pull/4932). + ## 0.45.2 - Ensure `Multiaddr` handled and returned by `Behaviour` are `/p2p` terminated. diff --git a/protocols/kad/Cargo.toml b/protocols/kad/Cargo.toml index 1e4c788cf00..f4ad83972b4 100644 --- a/protocols/kad/Cargo.toml +++ b/protocols/kad/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-kad" edition = "2021" rust-version = { workspace = true } description = "Kademlia protocol for libp2p" -version = "0.45.2" +version = "0.45.3" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/protocols/kad/src/query/peers/closest.rs b/protocols/kad/src/query/peers/closest.rs index 01155b7f010..dc913f1bbca 100644 --- a/protocols/kad/src/query/peers/closest.rs +++ b/protocols/kad/src/query/peers/closest.rs @@ -175,10 +175,14 @@ impl ClosestPeersIter { }, } - let num_closest = self.closest_peers.len(); - let mut progress = false; - // Incorporate the reported closer peers into the iterator. + // + // The iterator makes progress if: + // 1, the iterator did not yet accumulate enough closest peers. + // OR + // 2, any of the new peers is closer to the target than any peer seen so far + // (i.e. is the first entry after being incorporated) + let mut progress = self.closest_peers.len() < self.config.num_results.get(); for peer in closer_peers { let key = peer.into(); let distance = self.target.distance(&key); @@ -187,11 +191,8 @@ impl ClosestPeersIter { state: PeerState::NotContacted, }; self.closest_peers.entry(distance).or_insert(peer); - // The iterator makes progress if the new peer is either closer to the target - // than any peer seen so far (i.e. is the first entry), or the iterator did - // not yet accumulate enough closest peers. - progress = self.closest_peers.keys().next() == Some(&distance) - || num_closest < self.config.num_results.get(); + + progress = self.closest_peers.keys().next() == Some(&distance) || progress; } // Update the iterator state. From 9584ee3e5110b8a931ada19b61feb597ce494491 Mon Sep 17 00:00:00 2001 From: Darius Clark Date: Fri, 1 Dec 2023 16:34:11 -0500 Subject: [PATCH 46/48] chore(quic): set `max_idle_timeout` to quinn default timeout Resolves #4917. Pull-Request: #4965. --- Cargo.lock | 2 +- Cargo.toml | 2 +- transports/quic/CHANGELOG.md | 5 +++++ transports/quic/Cargo.toml | 2 +- transports/quic/src/config.rs | 4 ++-- 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c26c1f83958..075198af86a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2969,7 +2969,7 @@ dependencies = [ [[package]] name = "libp2p-quic" -version = "0.10.1" +version = "0.10.2" dependencies = [ "async-std", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 8366148650c..1182a0ecff8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,7 +94,7 @@ libp2p-perf = { version = "0.3.0", path = "protocols/perf" } libp2p-ping = { version = "0.44.0", path = "protocols/ping" } libp2p-plaintext = { version = "0.41.0", path = "transports/plaintext" } libp2p-pnet = { version = "0.24.0", path = "transports/pnet" } -libp2p-quic = { version = "0.10.1", path = "transports/quic" } +libp2p-quic = { version = "0.10.2", path = "transports/quic" } libp2p-relay = { version = "0.17.1", path = "protocols/relay" } libp2p-rendezvous = { version = "0.14.0", path = "protocols/rendezvous" } libp2p-request-response = { version = "0.26.1", path = "protocols/request-response" } diff --git a/transports/quic/CHANGELOG.md b/transports/quic/CHANGELOG.md index 2283c41262b..538db11d79e 100644 --- a/transports/quic/CHANGELOG.md +++ b/transports/quic/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.10.2 + +- Change `max_idle_timeout`to 10s. + See [PR XXXX](https://github.com/libp2p/rust-libp2p/pull/XXXX). + ## 0.10.1 - Allow disabling path MTU discovery. diff --git a/transports/quic/Cargo.toml b/transports/quic/Cargo.toml index 5f3fd5cfb43..3bf57dddb51 100644 --- a/transports/quic/Cargo.toml +++ b/transports/quic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-quic" -version = "0.10.1" +version = "0.10.2" authors = ["Parity Technologies "] edition = "2021" rust-version = { workspace = true } diff --git a/transports/quic/src/config.rs b/transports/quic/src/config.rs index 4e4bb94cd7a..540f13e726b 100644 --- a/transports/quic/src/config.rs +++ b/transports/quic/src/config.rs @@ -78,9 +78,9 @@ impl Config { server_tls_config, support_draft_29: false, handshake_timeout: Duration::from_secs(5), - max_idle_timeout: 30 * 1000, + max_idle_timeout: 10 * 1000, max_concurrent_stream_limit: 256, - keep_alive_interval: Duration::from_secs(15), + keep_alive_interval: Duration::from_secs(5), max_connection_data: 15_000_000, // Ensure that one stream is not consuming the whole connection. From a83c30a869b6cd9459944e428f758629bb1a6549 Mon Sep 17 00:00:00 2001 From: stormshield-frb <144998884+stormshield-frb@users.noreply.github.com> Date: Fri, 1 Dec 2023 22:43:35 +0100 Subject: [PATCH 47/48] feat(core): impl Display on ListenerId Fixes: #4935. Pull-Request: #4936. --- Cargo.lock | 2 +- Cargo.toml | 2 +- core/CHANGELOG.md | 5 +++++ core/Cargo.toml | 2 +- core/src/transport.rs | 6 ++++++ 5 files changed, 14 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 075198af86a..0a9ff52f9de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2527,7 +2527,7 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.41.1" +version = "0.41.2" dependencies = [ "async-std", "either", diff --git a/Cargo.toml b/Cargo.toml index 1182a0ecff8..dbb89afaed6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ libp2p = { version = "0.53.0", path = "libp2p" } libp2p-allow-block-list = { version = "0.3.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.12.0", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.3.0", path = "misc/connection-limits" } -libp2p-core = { version = "0.41.1", path = "core" } +libp2p-core = { version = "0.41.2", path = "core" } libp2p-dcutr = { version = "0.11.0", path = "protocols/dcutr" } libp2p-dns = { version = "0.41.1", path = "transports/dns" } libp2p-floodsub = { version = "0.44.0", path = "protocols/floodsub" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 833af07982c..a7cd7fd46b4 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.41.2 + +- Implement `std::fmt::Display` on `ListenerId`. + See [PR 4936](https://github.com/libp2p/rust-libp2p/pull/4936). + ## 0.41.1 - Implement `{In,Out}boundConnectionUpgrade` for `SelectUpgrade`. diff --git a/core/Cargo.toml b/core/Cargo.toml index 7380695023b..7ead34b69d9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p-core" edition = "2021" rust-version = { workspace = true } description = "Core traits and structs of libp2p" -version = "0.41.1" +version = "0.41.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/core/src/transport.rs b/core/src/transport.rs index 8656b89228c..22e7a0532fa 100644 --- a/core/src/transport.rs +++ b/core/src/transport.rs @@ -256,6 +256,12 @@ impl ListenerId { } } +impl std::fmt::Display for ListenerId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + /// Event produced by [`Transport`]s. pub enum TransportEvent { /// A new address is being listened on. From 4f0013cc3fcf85f1434987d8821115801f612bcc Mon Sep 17 00:00:00 2001 From: Max Inden Date: Sat, 2 Dec 2023 00:33:47 +0100 Subject: [PATCH 48/48] feat(server): support websocket Pull-Request: #4937. --- Cargo.lock | 4 ++-- Cargo.toml | 4 ++-- libp2p/CHANGELOG.md | 5 +++++ libp2p/Cargo.toml | 2 +- libp2p/src/builder/phase/relay.rs | 19 ++++++++++++++++++- misc/server/CHANGELOG.md | 7 +++++++ misc/server/Cargo.toml | 4 ++-- misc/server/src/main.rs | 2 ++ 8 files changed, 39 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a9ff52f9de..b69a700f7ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2422,7 +2422,7 @@ checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libp2p" -version = "0.53.1" +version = "0.53.2" dependencies = [ "async-std", "async-trait", @@ -3086,7 +3086,7 @@ dependencies = [ [[package]] name = "libp2p-server" -version = "0.12.4" +version = "0.12.5" dependencies = [ "base64 0.21.5", "clap", diff --git a/Cargo.toml b/Cargo.toml index dbb89afaed6..8fca61c365e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,7 +72,7 @@ rust-version = "1.73.0" [workspace.dependencies] asynchronous-codec = { version = "0.7.0" } futures-bounded = { version = "0.2.3", path = "misc/futures-bounded" } -libp2p = { version = "0.53.0", path = "libp2p" } +libp2p = { version = "0.53.2", path = "libp2p" } libp2p-allow-block-list = { version = "0.3.0", path = "misc/allow-block-list" } libp2p-autonat = { version = "0.12.0", path = "protocols/autonat" } libp2p-connection-limits = { version = "0.3.0", path = "misc/connection-limits" } @@ -98,7 +98,7 @@ libp2p-quic = { version = "0.10.2", path = "transports/quic" } libp2p-relay = { version = "0.17.1", path = "protocols/relay" } libp2p-rendezvous = { version = "0.14.0", path = "protocols/rendezvous" } libp2p-request-response = { version = "0.26.1", path = "protocols/request-response" } -libp2p-server = { version = "0.12.4", path = "misc/server" } +libp2p-server = { version = "0.12.5", path = "misc/server" } libp2p-swarm = { version = "0.44.0", path = "swarm" } libp2p-swarm-derive = { version = "=0.34.0", path = "swarm-derive" } # `libp2p-swarm-derive` may not be compatible with different `libp2p-swarm` non-breaking releases. E.g. `libp2p-swarm` might introduce a new enum variant `FromSwarm` (which is `#[non-exhaustive]`) in a non-breaking release. Older versions of `libp2p-swarm-derive` would not forward this enum variant within the `NetworkBehaviour` hierarchy. Thus the version pinning is required. libp2p-swarm-test = { version = "0.3.0", path = "swarm-test" } diff --git a/libp2p/CHANGELOG.md b/libp2p/CHANGELOG.md index bb6d8211bd5..80b32c35643 100644 --- a/libp2p/CHANGELOG.md +++ b/libp2p/CHANGELOG.md @@ -1,3 +1,8 @@ +## 0.53.2 + +- Allow `SwarmBuilder::with_bandwidth_metrics` after `SwarmBuilder::with_websocket`. + See [PR 4937](https://github.com/libp2p/rust-libp2p/pull/4937). + ## 0.53.1 - Allow `SwarmBuilder::with_quic_config` to be called without `with_tcp` first. diff --git a/libp2p/Cargo.toml b/libp2p/Cargo.toml index 6ced40e1dfe..9dc9667be10 100644 --- a/libp2p/Cargo.toml +++ b/libp2p/Cargo.toml @@ -3,7 +3,7 @@ name = "libp2p" edition = "2021" rust-version = { workspace = true } description = "Peer-to-peer networking library" -version = "0.53.1" +version = "0.53.2" authors = ["Parity Technologies "] license = "MIT" repository = "https://github.com/libp2p/rust-libp2p" diff --git a/libp2p/src/builder/phase/relay.rs b/libp2p/src/builder/phase/relay.rs index 2d47810ca9e..f8305f9d246 100644 --- a/libp2p/src/builder/phase/relay.rs +++ b/libp2p/src/builder/phase/relay.rs @@ -116,11 +116,28 @@ impl SwarmBuilder> { } // Shortcuts +#[cfg(feature = "metrics")] +impl SwarmBuilder> { + pub fn with_bandwidth_metrics( + self, + registry: &mut libp2p_metrics::Registry, + ) -> SwarmBuilder< + Provider, + BehaviourPhase, + > { + self.without_relay() + .without_bandwidth_logging() + .with_bandwidth_metrics(registry) + } +} impl SwarmBuilder> { pub fn with_behaviour>( self, constructor: impl FnOnce(&libp2p_identity::Keypair) -> R, ) -> Result>, R::Error> { - self.without_relay().with_behaviour(constructor) + self.without_relay() + .without_bandwidth_logging() + .without_bandwidth_metrics() + .with_behaviour(constructor) } } diff --git a/misc/server/CHANGELOG.md b/misc/server/CHANGELOG.md index d476a8722eb..e4c5dd4a103 100644 --- a/misc/server/CHANGELOG.md +++ b/misc/server/CHANGELOG.md @@ -1,3 +1,10 @@ +## 0.12.5 + +### Added + +- Add `/wss` support. + See [PR 4937](https://github.com/libp2p/rust-libp2p/pull/4937). + ## 0.12.4 ### Added diff --git a/misc/server/Cargo.toml b/misc/server/Cargo.toml index ba848eaeadb..87bdc20b169 100644 --- a/misc/server/Cargo.toml +++ b/misc/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libp2p-server" -version = "0.12.4" +version = "0.12.5" authors = ["Max Inden "] edition = "2021" repository = "https://github.com/libp2p/rust-libp2p" @@ -16,7 +16,7 @@ clap = { version = "4.4.10", features = ["derive"] } futures = "0.3" futures-timer = "3" hyper = { version = "0.14", features = ["server", "tcp", "http1"] } -libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic"] } +libp2p = { workspace = true, features = ["autonat", "dns", "tokio", "noise", "tcp", "yamux", "identify", "kad", "ping", "relay", "metrics", "rsa", "macros", "quic", "websocket"] } prometheus-client = { workspace = true } serde = "1.0.193" serde_derive = "1.0.125" diff --git a/misc/server/src/main.rs b/misc/server/src/main.rs index 2349ebf6485..16e6530e946 100644 --- a/misc/server/src/main.rs +++ b/misc/server/src/main.rs @@ -82,6 +82,8 @@ async fn main() -> Result<(), Box> { )? .with_quic() .with_dns()? + .with_websocket(noise::Config::new, yamux::Config::default) + .await? .with_bandwidth_metrics(&mut metric_registry) .with_behaviour(|key| { behaviour::Behaviour::new(key.public(), opt.enable_kademlia, opt.enable_autonat)