diff --git a/.github/workflows/build-release-artifacts.yml b/.github/workflows/build-release-artifacts.yml index 6139ddc413..b30d4e1803 100644 --- a/.github/workflows/build-release-artifacts.yml +++ b/.github/workflows/build-release-artifacts.yml @@ -1,21 +1,27 @@ -name: build release artifacts +# This workflow builds and packages the release artifacts, without actually running a release. +# +# It can sometimes be useful to obtain these binaries built from other branches, or a tag, or when +# the release process is being updated, this workflow can be used to test some of the changes. +# +# The built and packaged binaries will be attached to the workflow run as artifacts, available for +# download. +name: build and package release artifacts on: workflow_dispatch: inputs: branch: - description: The branch to build. + description: Set to build a particular branch type: string tag: - description: The tag to build. + description: Set to build a particular tag type: string -# Copied from `release.yml` -# During the build step, the env variable has to be manually sent to the containers for cross platform builds. -# Update the Justfile as well. +# The key variables also need to be passed to `cross`, which runs in a container and does not +# inherit variables from the parent environment. The `cross` tool is used in the `build` +# job. If any keys are added, the `build-release-artifacts` target in the Justfile must +# also be updated. env: - JUST_BIN_URL: https://github.com/casey/just/releases/download/1.25.2/just-1.25.2-x86_64-unknown-linux-musl.tar.gz - WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }} GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }} FOUNDATION_PK: ${{ secrets.STABLE_FOUNDATION_PK }} @@ -48,8 +54,6 @@ jobs: with: ref: ${{ inputs.tag || inputs.branch }} - uses: dtolnay/rust-toolchain@stable - # cargo-binstall will try and use pre-built binaries if they are available and also speeds up - # installing `cross` - uses: cargo-bins/cargo-binstall@main - shell: bash run: cargo binstall --no-confirm just @@ -63,8 +67,6 @@ jobs: artifacts !artifacts/.cargo-lock - # This job isn't necessary, but it's useful for debugging the packaging process for the real release - # workflow, just in case any issues are ever encountered there. package: name: package artifacts runs-on: ubuntu-latest @@ -104,19 +106,21 @@ jobs: - uses: cargo-bins/cargo-binstall@main - shell: bash run: cargo binstall --no-confirm just - - name: package artifacts + + - name: package binaries shell: bash run: | - just package-release-assets "faucet" - just package-release-assets "nat-detection" - just package-release-assets "node-launchpad" - just package-release-assets "safe" - just package-release-assets "safenode" - just package-release-assets "safenode_rpc_client" - just package-release-assets "safenode-manager" - just package-release-assets "safenodemand" - just package-release-assets "sn_auditor" + just package-all-bins - uses: actions/upload-artifact@main with: name: packaged_binaries - path: deploy + path: packaged_bins + + - name: package architectures + shell: bash + run: | + just package-all-architectures + - uses: actions/upload-artifact@main + with: + name: packaged_architectures + path: packaged_architectures diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 011fd53640..7f9f79a5ab 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -5,9 +5,9 @@ on: # on main, we want to know that all commits are passing at a glance, any deviation should help bisecting errors # the merge run checks should show on master and enable this clear test/passing history merge_group: - branches: [ main, alpha*, beta*, rc* ] + branches: [main, alpha*, beta*, rc*] pull_request: - branches: [ "*" ] + branches: ["*"] env: CARGO_INCREMENTAL: 0 # bookkeeping for incremental builds has overhead, not useful in CI. @@ -95,7 +95,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 @@ -342,7 +342,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 @@ -415,72 +415,72 @@ jobs: log_file_prefix: safe_test_logs_spend platform: ${{ matrix.os }} - # runs with increased node count - spend_simulation: - if: "!startsWith(github.event.head_commit.message, 'chore(release):')" - name: spend simulation - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - - uses: Swatinem/rust-cache@v2 - - - name: Build binaries - run: cargo build --release --features=local-discovery --bin safenode - timeout-minutes: 30 - - - name: Build faucet binary - run: cargo build --release --bin faucet --features="local-discovery,gifting" - timeout-minutes: 30 - - - name: Build testing executable - run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run - env: - # only set the target dir for windows to bypass the linker issue. - # happens if we build the node manager via testnet action - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 30 - - - name: Start a local network - uses: maidsafe/sn-local-testnet-action@main - with: - action: start - interval: 2000 - node-count: 50 - node-path: target/release/safenode - faucet-path: target/release/faucet - platform: ${{ matrix.os }} - build: true - - - name: Check SAFE_PEERS was set - shell: bash - run: | - if [[ -z "$SAFE_PEERS" ]]; then - echo "The SAFE_PEERS variable has not been set" - exit 1 - else - echo "SAFE_PEERS has been set to $SAFE_PEERS" - fi - - - name: execute the spend simulation - run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture - env: - CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} - timeout-minutes: 25 - - - name: Stop the local network and upload logs - if: always() - uses: maidsafe/sn-local-testnet-action@main - with: - action: stop - log_file_prefix: safe_test_logs_spend_simulation - platform: ${{ matrix.os }} + # # runs with increased node count + # spend_simulation: + # if: "!startsWith(github.event.head_commit.message, 'chore(release):')" + # name: spend simulation + # runs-on: ${{ matrix.os }} + # strategy: + # matrix: + # os: [ ubuntu-latest, windows-latest, macos-latest ] + # steps: + # - uses: actions/checkout@v4 + + # - name: Install Rust + # uses: dtolnay/rust-toolchain@stable + + # - uses: Swatinem/rust-cache@v2 + + # - name: Build binaries + # run: cargo build --release --features=local-discovery --bin safenode + # timeout-minutes: 30 + + # - name: Build faucet binary + # run: cargo build --release --bin faucet --features="local-discovery,gifting" + # timeout-minutes: 30 + + # - name: Build testing executable + # run: cargo test --release -p sn_node --features=local-discovery --test spend_simulation --no-run + # env: + # # only set the target dir for windows to bypass the linker issue. + # # happens if we build the node manager via testnet action + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 30 + + # - name: Start a local network + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: start + # interval: 2000 + # node-count: 50 + # node-path: target/release/safenode + # faucet-path: target/release/faucet + # platform: ${{ matrix.os }} + # build: true + + # - name: Check SAFE_PEERS was set + # shell: bash + # run: | + # if [[ -z "$SAFE_PEERS" ]]; then + # echo "The SAFE_PEERS variable has not been set" + # exit 1 + # else + # echo "SAFE_PEERS has been set to $SAFE_PEERS" + # fi + + # - name: execute the spend simulation + # run: cargo test --release -p sn_node --features="local-discovery" --test spend_simulation -- --nocapture + # env: + # CARGO_TARGET_DIR: ${{ matrix.os == 'windows-latest' && './test-target' || '.' }} + # timeout-minutes: 25 + + # - name: Stop the local network and upload logs + # if: always() + # uses: maidsafe/sn-local-testnet-action@main + # with: + # action: stop + # log_file_prefix: safe_test_logs_spend_simulation + # platform: ${{ matrix.os }} token_distribution_test: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" @@ -488,7 +488,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ ubuntu-latest, windows-latest, macos-latest ] + os: [ubuntu-latest, windows-latest, macos-latest] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 338cab6eb0..edb068d2a2 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,25 +1,13 @@ -# Will automatically generate relases for version bumped code. -# Can optionally be run as workflow action to generate a soft release (no publish steps, just s3 and github release) - name: release -# prevent concurrent version bumps + releases from running at the same time -concurrency: - group: "version-bump-release-${{ github.ref }}" - on: workflow_dispatch: - inputs: - network_version_mode: - description: "Set NETWORK_VERSION_MODE if desired. (This restricts the network from contacting any other network mode. If left empty, the default protocol is used)" - required: false - default: "" - -# During the build step, the env variable has to be manually sent to the containers for cross platform builds. -# Update the Justfile as well. + +# The key variables also need to be passed to `cross`, which runs in a container and does not +# inherit variables from the parent environment. The `cross` tool is used in the `build` +# job. If any keys are added, the `build-release-artifacts` target in the Justfile must +# also be updated. env: - RELEASE_PLZ_BIN_URL: https://github.com/MarcoIeni/release-plz/releases/download/release-plz-v0.3.43/release-plz-x86_64-unknown-linux-gnu.tar.gz - JUST_BIN_URL: https://github.com/casey/just/releases/download/1.25.2/just-1.25.2-x86_64-unknown-linux-musl.tar.gz WORKFLOW_URL: https://github.com/maidsafe/safe_network/actions/runs GENESIS_PK: ${{ secrets.STABLE_GENESIS_PK }} GENESIS_SK: ${{ secrets.STABLE_GENESIS_SK }} @@ -29,10 +17,10 @@ env: jobs: build: - if: | - github.repository_owner == 'maidsafe' && - startsWith(github.event.head_commit.message, 'chore(release):') || - github.event_name == 'workflow_dispatch' + if: ${{ + github.repository_owner == 'maidsafe' && + (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc')) + }} name: build runs-on: ${{ matrix.os }} strategy: @@ -42,6 +30,8 @@ jobs: target: x86_64-pc-windows-msvc - os: macos-latest target: x86_64-apple-darwin + - os: macos-latest + target: aarch64-apple-darwin - os: ubuntu-latest target: x86_64-unknown-linux-musl - os: ubuntu-latest @@ -53,18 +43,10 @@ jobs: steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - # cargo-binstall will try and use pre-built binaries if they are available and also speeds up - # installing `cross` - uses: cargo-bins/cargo-binstall@main - shell: bash run: cargo binstall --no-confirm just - # Set the network versioning based on our branch or workflow input - - name: provide network versioning - shell: bash - run: | - echo "NETWORK_VERSION_MODE=${{ github.event.inputs.network_version_mode || '' }}" >> $GITHUB_ENV - - name: build release artifacts shell: bash run: | @@ -76,6 +58,7 @@ jobs: path: | artifacts !artifacts/.cargo-lock + - name: post notification to slack on failure if: ${{ failure() }} uses: bryannice/gitactions-slack-notification@2.0.0 @@ -84,26 +67,20 @@ jobs: SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" SLACK_TITLE: "Release Failed" - release: - if: | - github.repository_owner == 'maidsafe' && - startsWith(github.event.head_commit.message, 'chore(release):') || - github.event_name == 'workflow_dispatch' - name: publish flows and release creation + s3-release: + if: ${{ + github.repository_owner == 'maidsafe' && + (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc')) + }} + name: s3 release runs-on: ubuntu-latest needs: [ build ] env: AWS_ACCESS_KEY_ID: ${{ secrets.S3_DEPLOY_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.S3_DEPLOY_AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: eu-west-2 - GH_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} - steps: - uses: actions/checkout@v4 - with: - fetch-depth: "0" - token: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} - - uses: actions/download-artifact@master with: name: safe_network-x86_64-pc-windows-msvc @@ -132,42 +109,13 @@ jobs: with: name: safe_network-aarch64-unknown-linux-musl path: artifacts/aarch64-unknown-linux-musl/release - - shell: bash - run: | - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" - # It's possible to `cargo install` these tools, but it's very slow to compile on GHA infra. - # Therefore we just pull some binaries from the Github Releases. - - name: install tools + + - uses: cargo-bins/cargo-binstall@main + - name: install just shell: bash - run: | - curl -L -O $RELEASE_PLZ_BIN_URL - tar xvf release-plz-x86_64-unknown-linux-gnu.tar.gz - rm release-plz-x86_64-unknown-linux-gnu.tar.gz - sudo mv release-plz /usr/local/bin - - curl -L -O $JUST_BIN_URL - mkdir just - tar xvf just-1.25.2-x86_64-unknown-linux-musl.tar.gz -C just - rm just-1.25.2-x86_64-unknown-linux-musl.tar.gz - sudo mv just/just /usr/local/bin - rm -rf just - - # only publish if we're on the stable branch - - name: Conditionally remove 'publish = false' from workspace in release-plz.toml on stable branch - if: startsWith(github.ref_name, 'stable') - run: | - ls -la - sed -i '/^\[workspace\]/,/^\[/ {/^publish = false$/d;}' ./release-plz.toml - sed -i '/^\[workspace\]/,/^\[/ {/^git_release_draft = true$/d;}' ./release-plz.toml - sed -i '/^\[workspace\]/,/^\[/ {/^git_tag_enable = false$/d;}' ./release-plz.toml - - # only publish if we're on the stable branch - - name: Conditionally remove 'git_release_draft = true' from workspace in release-plz.toml on stable branch - if: startsWith(github.ref_name, 'stable') - run: | + run: cargo binstall --no-confirm just - - name: upload to s3 + - name: upload binaries to S3 shell: bash run: | # Package versioned assets as tar.gz and zip archives, and upload them to S3. @@ -180,68 +128,125 @@ jobs: # because the process gets the latest version from `crates.io` then downloads the binaries # from S3, using that version number. Uploading the binaries to S3 before publishing # ensures that they will exist after the new crate has been published. + just package-all-bins + just upload-all-packaged-bins-to-s3 + + github-release: + if: ${{ + github.repository_owner == 'maidsafe' && + (github.ref == 'refs/heads/stable' || startsWith(github.ref, 'refs/heads/rc')) + }} + name: github release + runs-on: ubuntu-latest + needs: [ build ] + + steps: + - uses: actions/checkout@v4 + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-pc-windows-msvc + path: artifacts/x86_64-pc-windows-msvc/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-unknown-linux-musl + path: artifacts/x86_64-unknown-linux-musl/release + - uses: actions/download-artifact@master + with: + name: safe_network-x86_64-apple-darwin + path: artifacts/x86_64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-apple-darwin + path: artifacts/aarch64-apple-darwin/release + - uses: actions/download-artifact@master + with: + name: safe_network-arm-unknown-linux-musleabi + path: artifacts/arm-unknown-linux-musleabi/release + - uses: actions/download-artifact@master + with: + name: safe_network-armv7-unknown-linux-musleabihf + path: artifacts/armv7-unknown-linux-musleabihf/release + - uses: actions/download-artifact@master + with: + name: safe_network-aarch64-unknown-linux-musl + path: artifacts/aarch64-unknown-linux-musl/release + + - uses: cargo-bins/cargo-binstall@main + - name: install just + shell: bash + run: cargo binstall --no-confirm just - just package-release-assets "faucet" - just package-release-assets "nat-detection" - just package-release-assets "node-launchpad" - just package-release-assets "safe" - just package-release-assets "safenode" - just package-release-assets "safenode_rpc_client" - just package-release-assets "safenode-manager" - just package-release-assets "safenodemand" - just package-release-assets "sn_auditor" - just upload-release-assets-to-s3 "faucet" - just upload-release-assets-to-s3 "nat-detection" - just upload-release-assets-to-s3 "node-launchpad" - just upload-release-assets-to-s3 "safe" - just upload-release-assets-to-s3 "safenode" - just upload-release-assets-to-s3 "safenode-manager" - just upload-release-assets-to-s3 "safenodemand" - just upload-release-assets-to-s3 "safenode_rpc_client" - just upload-release-assets-to-s3 "sn_auditor" - - # unless release plz toml is changed (as above removing certain limits) - # github releases are drafts, and we do not publish to crates.io - - name: publish and release + - name: set package version shell: bash run: | - # The `release-plz` command publishes crates which had their versions bumped, and also - # creates Github releases. The binaries are then attached to the releases in the - # `upload-github-release-assets` target. - cargo login "${{ secrets.CRATES_IO_TOKEN }}" - # The use of 'awk' suppresses the annoying instrumentation output - # that makes the log difficult to read. - release-plz release --git-token ${{ secrets.VERSION_BUMP_COMMIT_PAT }} | \ - awk '{ if (!/^\s*in release with input/ && !/^\s{4}/) print }' + current_date=$(date +%Y.%m) + release_cycle=$(grep 'release-cycle:' release-cycle-info | awk '{print $2}') + release_cycle_counter=$(grep 'release-cycle-counter:' release-cycle-info | awk '{print $2}') + version="$current_date.$release_cycle.$release_cycle_counter" + echo "PACKAGE_VERSION=$version" >> $GITHUB_ENV + + - name: package release artifacts + shell: bash + run: just package-all-architectures - - name: create github release assets + # For the next two steps, it seems to be necessary to set `GITHUB_TOKEN` on the step rather + # than the job level. + - name: create release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + with: + tag_name: ${{ env.PACKAGE_VERSION }} + release_name: ${{ env.PACKAGE_VERSION }} + draft: false + prerelease: ${{ startsWith(github.ref, 'refs/heads/rc') && true || false }} + + - name: upload artifacts as assets + env: + GITHUB_TOKEN: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} shell: bash - run: just upload-github-release-assets + run: | + ( + cd packaged_architectures + ls | xargs gh release upload ${{ env.PACKAGE_VERSION }} + ) + + - name: post notification to slack on failure + if: ${{ failure() }} + uses: bryannice/gitactions-slack-notification@2.0.0 + env: + SLACK_INCOMING_WEBHOOK: ${{ secrets.SLACK_GH_ACTIONS_WEBHOOK_URL }} + SLACK_MESSAGE: "Please check the logs for the run at ${{ env.WORKFLOW_URL }}/${{ github.run_id }}" + SLACK_TITLE: "Release Failed" + + publish-crates: + if: ${{ github.repository_owner == 'maidsafe' && github.ref == 'refs/heads/stable' }} + needs: [ build, s3-release ] + runs-on: self-hosted + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: "0" + token: ${{ secrets.VERSION_BUMP_COMMIT_PAT }} + + # Required for the creation of tags + - shell: bash + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" - - name: upload as latest release + - uses: cargo-bins/cargo-binstall@main + - shell: bash + run: cargo binstall --no-confirm release-plz + + - name: publish crates shell: bash - if: github.event_name != 'workflow_dispatch' run: | - # Now upload the 'latest' versions to S3. This can be done later because the node manager - # does not depend on these existing. - just package-release-assets "faucet" "latest" - just package-release-assets "nat-detection" "latest" - just package-release-assets "node-launchpad" "latest" - just package-release-assets "safe" "latest" - just package-release-assets "safenode" "latest" - just package-release-assets "safenode_rpc_client" "latest" - just package-release-assets "safenode-manager" "latest" - just package-release-assets "safenodemand" "latest" - just package-release-assets "sn_auditor" "latest" - just upload-release-assets-to-s3 "faucet" - just upload-release-assets-to-s3 "nat-detection" - just upload-release-assets-to-s3 "node-launchpad" - just upload-release-assets-to-s3 "safe" - just upload-release-assets-to-s3 "safenode" - just upload-release-assets-to-s3 "safenode-manager" - just upload-release-assets-to-s3 "safenodemand" - just upload-release-assets-to-s3 "safenode_rpc_client" - just upload-release-assets-to-s3 "sn_auditor" + cargo login "${{ secrets.CRATES_IO_TOKEN }}" + # The use of 'awk' suppresses the annoying instrumentation output that makes the log + # difficult to read. + release-plz release --git-token ${{ secrets.VERSION_BUMP_COMMIT_PAT }} | \ + awk '{ if (!/^\s*in release with input/ && !/^\s{4}/) print }' - name: post notification to slack on failure if: ${{ failure() }} diff --git a/.gitignore b/.gitignore index b19247f664..99b9fcf479 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,8 @@ /target/ /artifacts/ /deploy/ +/packaged_architectures/ +/packaged_bins/ # These are backup files generated by rustfmt **/*.rs.bk diff --git a/CHANGELOG.md b/CHANGELOG.md index 9ce23135f3..2d2915d56e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,175 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 *When editing this file, please respect a line length of 100.* +## 2024-07-25 + +### Binaries + +* `faucet` v0.4.31 +* `nat-detection` v0.2.1 +* `node-launchpad` v0.3.11 +* `safe` v0.94.0 +* `safenode` v0.110.0 +* `safenode-manager` v0.10.1 +* `safenodemand` v0.10.1 +* `safenode_rpc_client` v0.6.26 +* `sn_auditor` v0.2.3 + +### 🔦 Highlights + +* The introduction of a record-store cache has significantly reduced the node's disk IO. As a side + effect, the CPU does less work, and performance improves. RAM usage has increased by around 25MB per + node, but we view this as a reasonable trade off. +* The node's relay server now supports more connections: when running with `--home-network`, up to + 256 will be supported, and otherwise, it will be 1024. Along with minor tweaks to utilize the + relay server properly, this should hopefully result in less connections being dropped. +* Reward forwarding is more robust. +* Chunk verification is now probabilistic, which should reduce messaging. In combination with + replication messages also being reduced, this should result in a bandwidth usage reduction of + ~20%. +* Replication messages are less frequent, reducing bandwidth by ~20% per node. +* Bad nodes and nodes with a mismatched protocol are now added to a block list. This reduces the + chance of a network interference and the impact of a bad node in the network. +* For the time being, hole punching has been removed. It was causing handshake time outs, resulting + in home nodes being less stable. It will be re-enabled in the future. +* Wallet password encryption enhances security, and in the case of secret key leakage, prevents + unauthorized access. +* Native Apple Silicon (M-series) binaries have been added to our releases, meaning M-series Mac + users do not have to rely on running Intel binaries with Rosetta. + +### Merged Pull Requests + +2024-07-11 [#1945](https://github.com/maidsafe/safe_network/pull/1945) -- feat: double spend spam protection + +2024-07-11 [#1952](https://github.com/maidsafe/safe_network/pull/1952) -- fix(auditor): create auditor directory if it doesn't exist + +2024-07-11 [#1951](https://github.com/maidsafe/safe_network/pull/1951) -- test(spend_simulation): add more attacks + +2024-07-11 [#1953](https://github.com/maidsafe/safe_network/pull/1953) -- chore/fix(resources): use more portable shebang + +2024-07-12 [#1959](https://github.com/maidsafe/safe_network/pull/1959) -- refactor outdated conn removal + +2024-07-12 [#1964](https://github.com/maidsafe/safe_network/pull/1964) -- refactor(cli)!: `wallet address` and `wallet create` changes + +2024-07-15 [#1946](https://github.com/maidsafe/safe_network/pull/1946) -- docs(sn_client): Basic documentation + +2024-07-15 [#1966](https://github.com/maidsafe/safe_network/pull/1966) -- fix(network): do not add bootstrap peer as relay candidate + +2024-07-16 [#1969](https://github.com/maidsafe/safe_network/pull/1969) -- chore(network): force close connection if there is a protocol mistmatch + +2024-07-16 [#1972](https://github.com/maidsafe/safe_network/pull/1972) -- feat(safenode_rpc_client): added `--version` flag + +2024-07-17 [#1973](https://github.com/maidsafe/safe_network/pull/1973) -- Auditor supplement features + +2024-07-17 [#1975](https://github.com/maidsafe/safe_network/pull/1975) -- feat(networking): remove self.close_group and checks there as unused + +2024-07-18 [#1976](https://github.com/maidsafe/safe_network/pull/1976) -- chore(networking): make ChunkVerification probabalistic + +2024-07-18 [#1949](https://github.com/maidsafe/safe_network/pull/1949) -- feat(wallet): wallet secret key file encryption + +2024-07-18 [#1977](https://github.com/maidsafe/safe_network/pull/1977) -- Reduce replication msg processing + +2024-07-18 [#1983](https://github.com/maidsafe/safe_network/pull/1983) -- fix(node): remove cn from disk and flush to confirmed_spends during forwarding + +2024-07-18 [#1980](https://github.com/maidsafe/safe_network/pull/1980) -- feat(networking): add small record cache + +2024-07-18 [#1982](https://github.com/maidsafe/safe_network/pull/1982) -- feat(network): implement blocklist behaviour + +2024-07-18 [#1984](https://github.com/maidsafe/safe_network/pull/1984) -- chore(node): move sn_client to dev deps + +2024-07-18 [#1985](https://github.com/maidsafe/safe_network/pull/1985) -- Fix Nano count disappearing from Launchpad after restart + +2024-07-19 [#1971](https://github.com/maidsafe/safe_network/pull/1971) -- feat!: limit error surface + +2024-07-19 [#1986](https://github.com/maidsafe/safe_network/pull/1986) -- Add native Apple Silicon binaries to the release artifacts + +2024-07-19 [#1955](https://github.com/maidsafe/safe_network/pull/1955) -- feat(networking): relax relay limits + +2024-07-24 [#1990](https://github.com/maidsafe/safe_network/pull/1990) -- chore: implement new process in release workflow + +### Detailed Changes + +#### Network + +##### Added + +- Protection against an attack allowing bad nodes or clients to shadow a spend (make it disappear) + through spamming. +- Nodes allow more relayed connections through them. Also, home nodes will relay through 4 nodes + instead of 2. Without these changes, relays were denying new connections to home nodes, making them + difficult to reach. +- Auditor tracks forwarded payments using the default key. +- Auditor tracks burnt spend attempts and only credits them once. +- Auditor collects balance of UTXOs. +- Added different attack types to the spend simulation test to ensure spend validation is solid. +- Bad nodes and nodes with a mismatched protocol are now added to a block list. This reduces the + chance of a network interference and the impact of a bad node in the network. +- The introduction of a record-store cache has significantly reduced the node's disk IO. As a side + effect, the CPU does less work, and performance improves. RAM usage has increased by around 25MB per + node, but we view this as a reasonable trade off. + +##### Changed + +- For the time being, hole punching has been removed. It was causing handshake time outs, resulting + in home nodes being less stable. It will be re-enabled in the future. +- Force connection closure if a peer is using a different protocol. +- Reserve trace level logs for tracking event statistics. Now you can use `SN_LOG=v` to get more + relevant logs without being overwhelmed by event handling stats. +- Chunk verification is now probabilistic, which should reduce messaging. In combination with + replication messages also being reduced, this should result in a bandwidth usage reduction of + ~20%. + +##### Fixed + +- During payment forwarding, CashNotes are removed from disk and confirmed spends are stored to + disk. This is necessary for resolving burnt spend attempts for forwarded payments. +- Fix a bug where the auditor was not storing data to disk because of a missing directory. +- Bootstrap peers are not added as relay candidates as we do not want to overwhelm them. + +#### Client + +##### Added + +- Basic global documentation for the `sn_client` crate. +- Option to encrypt the wallet private key with a password, in a file called + `main_secret_key.encrypted`, inside the wallet directory. +- Option to load a wallet from an encrypted secret-key file using a password. +- The `wallet create` command provides a `--password` argument to encrypt the wallet. +- The `wallet create` command provides a `--no-password` argument skip encryption. +- The `wallet create` command provides a `--no-replace` argument to suppress a prompt to replace an + existing wallet. +- The `wallet create` command provides a `--key` argument to create a wallet from a hex-encoded + private key. +- The `wallet create` command provides a `--derivation` argument to set a derivation passphrase to + be used with the mnemonic to create a new private key. +- A new `wallet encrypt` command encrypts an existing wallet. + +##### Changed + +- The `wallet address` command no longer creates a new wallet if no wallet exists. +- The `wallet create` command creates a wallet using the account mnemonic instead of requiring a + hex-encoded secret key. +- The `wallet create` `--key` and `--derivation` arguments are mutually exclusive. + +#### Launchpad + +##### Fixed + +- The `Total Nanos Earned` stat no longer resets on restart. + +#### RPC Client + +##### Added + +- A `--version` argument shows the binary version + +#### Other + +##### Added + +- Native Apple Silicon (M-series) binaries have been added to our releases, meaning M-series Mac + users do not have to rely on running Intel binaries with Rosetta. + ## 2024-07-10 ### Binaries diff --git a/Cargo.lock b/Cargo.lock index a95719c1a5..03451177cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4525,7 +4525,7 @@ dependencies = [ [[package]] name = "nat-detection" -version = "0.2.0" +version = "0.2.1" dependencies = [ "clap", "clap-verbosity-flag", @@ -4640,7 +4640,7 @@ dependencies = [ [[package]] name = "node-launchpad" -version = "0.3.10" +version = "0.3.11" dependencies = [ "atty", "better-panic", @@ -6912,7 +6912,7 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "sn-node-manager" -version = "0.10.0" +version = "0.10.1" dependencies = [ "assert_cmd", "assert_fs", @@ -6974,7 +6974,7 @@ dependencies = [ [[package]] name = "sn_auditor" -version = "0.2.2" +version = "0.2.3" dependencies = [ "blsttc", "clap", @@ -7008,14 +7008,14 @@ dependencies = [ [[package]] name = "sn_build_info" -version = "0.1.9" +version = "0.1.10" dependencies = [ "vergen", ] [[package]] name = "sn_cli" -version = "0.93.9" +version = "0.94.0" dependencies = [ "aes 0.7.5", "base64 0.22.1", @@ -7057,7 +7057,7 @@ dependencies = [ [[package]] name = "sn_client" -version = "0.108.0" +version = "0.109.0" dependencies = [ "assert_matches", "async-trait", @@ -7140,7 +7140,7 @@ dependencies = [ [[package]] name = "sn_faucet" -version = "0.4.30" +version = "0.4.31" dependencies = [ "assert_fs", "base64 0.22.1", @@ -7172,7 +7172,7 @@ dependencies = [ [[package]] name = "sn_logging" -version = "0.2.30" +version = "0.2.31" dependencies = [ "chrono", "color-eyre", @@ -7197,7 +7197,7 @@ dependencies = [ [[package]] name = "sn_metrics" -version = "0.1.10" +version = "0.1.11" dependencies = [ "clap", "color-eyre", @@ -7211,7 +7211,7 @@ dependencies = [ [[package]] name = "sn_networking" -version = "0.17.0" +version = "0.17.1" dependencies = [ "aes-gcm-siv", "async-trait", @@ -7254,7 +7254,7 @@ dependencies = [ [[package]] name = "sn_node" -version = "0.109.0" +version = "0.110.0" dependencies = [ "assert_fs", "assert_matches", @@ -7308,7 +7308,7 @@ dependencies = [ [[package]] name = "sn_node_rpc_client" -version = "0.6.25" +version = "0.6.26" dependencies = [ "assert_fs", "async-trait", @@ -7335,7 +7335,7 @@ dependencies = [ [[package]] name = "sn_peers_acquisition" -version = "0.4.0" +version = "0.4.1" dependencies = [ "clap", "lazy_static", @@ -7351,7 +7351,7 @@ dependencies = [ [[package]] name = "sn_protocol" -version = "0.17.5" +version = "0.17.6" dependencies = [ "blsttc", "bytes", @@ -7378,7 +7378,7 @@ dependencies = [ [[package]] name = "sn_registers" -version = "0.3.15" +version = "0.3.16" dependencies = [ "blsttc", "crdts", @@ -7395,7 +7395,7 @@ dependencies = [ [[package]] name = "sn_service_management" -version = "0.3.8" +version = "0.3.9" dependencies = [ "async-trait", "dirs-next", @@ -7421,7 +7421,7 @@ dependencies = [ [[package]] name = "sn_transfers" -version = "0.18.8" +version = "0.18.9" dependencies = [ "assert_fs", "blsttc", @@ -7754,7 +7754,7 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test_utils" -version = "0.4.1" +version = "0.4.2" dependencies = [ "color-eyre", "dirs-next", @@ -7886,7 +7886,7 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "token_supplies" -version = "0.1.48" +version = "0.1.49" dependencies = [ "dirs-next", "reqwest 0.11.27", diff --git a/Justfile b/Justfile index 84a4b0980b..450ea0ca71 100644 --- a/Justfile +++ b/Justfile @@ -107,12 +107,7 @@ build-release-artifacts arch: mkdir artifacts cargo clean - echo "Using the keys: GENESIS_PK=$GENESIS_PK, FOUNDATION_PK=$FOUNDATION_PK, NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK, PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK" cross_container_opts="--env \"GENESIS_PK=$GENESIS_PK\" --env \"GENESIS_SK=$GENESIS_SK\" --env \"FOUNDATION_PK=$FOUNDATION_PK\" --env \"NETWORK_ROYALTIES_PK=$NETWORK_ROYALTIES_PK\" --env \"PAYMENT_FORWARD_PK=$PAYMENT_FORWARD_PK\"" - if [[ -n "${NETWORK_VERSION_MODE+x}" ]]; then - echo "The NETWORK_VERSION_MODE variable is set to $NETWORK_VERSION_MODE" - cross_container_opts="$cross_container_opts --env NETWORK_VERSION_MODE=$NETWORK_VERSION_MODE" - fi export CROSS_CONTAINER_OPTS=$cross_container_opts if [[ $arch == arm* || $arch == armv7* || $arch == aarch64* ]]; then @@ -166,7 +161,20 @@ make-artifacts-directory: rm safe_network-$arch.zip done -package-release-assets bin version="": +package-all-bins: + #!/usr/bin/env bash + set -e + just package-bin "faucet" + just package-bin "nat-detection" + just package-bin "node-launchpad" + just package-bin "safe" + just package-bin "safenode" + just package-bin "safenode_rpc_client" + just package-bin "safenode-manager" + just package-bin "safenodemand" + just package-bin "sn_auditor" + +package-bin bin version="": #!/usr/bin/env bash set -e @@ -225,7 +233,6 @@ package-release-assets bin version="": sn_auditor) crate_dir_name="sn_auditor" ;; - *) echo "The $bin binary is not supported" exit 1 @@ -244,7 +251,7 @@ package-release-assets bin version="": exit 1 fi - rm -rf deploy/$bin + rm -rf packaged_bins/$bin find artifacts/ -name "$bin" -exec chmod +x '{}' \; for arch in "${architectures[@]}" ; do echo "Packaging for $arch..." @@ -253,95 +260,30 @@ package-release-assets bin version="": tar -C artifacts/$arch/release -zcvf $bin-$version-$arch.tar.gz $bin_name done - mkdir -p deploy/$bin - mv *.tar.gz deploy/$bin - mv *.zip deploy/$bin + mkdir -p packaged_bins/$bin + mv *.tar.gz packaged_bins/$bin + mv *.zip packaged_bins/$bin -upload-github-release-assets: +upload-all-packaged-bins-to-s3: #!/usr/bin/env bash set -e - binary_crates=( - "sn_faucet" - "node-launchpad" - "sn_cli" - "sn_node" - "sn-node-manager" - "sn_node_rpc_client" - "sn_auditor" - "nat-detection" + binaries=( + faucet + nat-detection + node-launchpad + safe + safenode + safenode-manager + safenode_rpc_client + safenodemand + sn_auditor ) - - commit_msg=$(git log -1 --pretty=%B) - commit_msg=${commit_msg#*: } # Remove 'chore(release): ' prefix - - IFS='/' read -ra crates_with_versions <<< "$commit_msg" - declare -a crate_names - for crate_with_version in "${crates_with_versions[@]}"; do - crate=$(echo "$crate_with_version" | awk -F'-v' '{print $1}') - crates+=("$crate") - done - - for crate in "${crates[@]}"; do - for binary_crate in "${binary_crates[@]}"; do - if [[ "$crate" == "$binary_crate" ]]; then - case "$crate" in - sn_faucet) - bin_name="faucet" - bucket="sn-faucet" - ;; - node-launchpad) - bin_name="node-launchpad" - bucket="node-launchpad" - ;; - sn_cli) - bin_name="safe" - bucket="sn-cli" - ;; - sn_node) - bin_name="safenode" - bucket="sn-node" - ;; - sn-node-manager) - bin_name="safenode-manager" - bucket="sn-node-manager" - ;; - sn_node_rpc_client) - bin_name="safenode_rpc_client" - bucket="sn-node-rpc-client" - ;; - sn_auditor) - bin_name="sn_auditor" - bucket="sn-auditor" - ;; - nat-detection) - bin_name="nat-detection" - bucket="nat-detection" - ;; - *) - echo "The $crate crate is not supported" - exit 1 - ;; - esac - # The crate_with_version variable will correspond to the tag name of the release. - # However, only binary crates have releases, so we need to skip any tags that don't - # correspond to a binary. - for crate_with_version in "${crates_with_versions[@]}"; do - if [[ $crate_with_version == $crate-v* ]]; then - ( - cd deploy/$bin_name - if [[ "$crate" == "node-launchpad" || "$crate" == "sn_cli" || "$crate" == "sn_node" || "$crate" == "sn-node-manager" || "$crate" == "sn_auditor" ]]; then - echo "Uploading $bin_name assets to $crate_with_version release..." - ls | xargs gh release upload $crate_with_version --repo {{release_repo}} - fi - ) - fi - done - fi - done + for binary in "${binaries[@]}"; do + just upload-packaged-bin-to-s3 "$binary" done -upload-release-assets-to-s3 bin_name: +upload-packaged-bin-to-s3 bin_name: #!/usr/bin/env bash set -e @@ -379,7 +321,7 @@ upload-release-assets-to-s3 bin_name: ;; esac - cd deploy/{{bin_name}} + cd packaged_bins/{{bin_name}} for file in *.zip *.tar.gz; do dest="s3://$bucket/$file" if [[ "$file" == *latest* ]]; then @@ -387,16 +329,77 @@ upload-release-assets-to-s3 bin_name: aws s3 cp "$file" "$dest" --acl public-read else if aws s3 ls "$dest" > /dev/null 2>&1; then - echo "$dest already exists. This suggests an error somewhere." - echo "If you intentionally want to overwrite, remove the file and run the workflow again." - exit 1 + echo "$dest already exists. Will not overwrite." else - aws s3 cp "$file" "$dest" --acl public-read - echo "$dest uploaded." + # This command outputs a lot text which makes the build log difficult to read, so we will + # suppress it. + aws s3 cp "$file" "$dest" --acl public-read > /dev/null 2>&1 + echo "$dest uploaded" fi fi done +package-all-architectures: + #!/usr/bin/env bash + set -e + + architectures=( + "x86_64-pc-windows-msvc" + "x86_64-apple-darwin" + "aarch64-apple-darwin" + "x86_64-unknown-linux-musl" + "arm-unknown-linux-musleabi" + "armv7-unknown-linux-musleabihf" + "aarch64-unknown-linux-musl" + ) + + rm -rf packaged_architectures + for arch in "${architectures[@]}" ; do + echo "Packaging artifacts for $arch..." + just package-arch "$arch" + done + +package-arch arch: + #!/usr/bin/env bash + set -e + + if [[ -n $PACKAGE_VERSION ]]; then + version="$PACKAGE_VERSION" + else + current_date=$(date +%Y.%m) + release_cycle=$(grep 'release-cycle:' release-cycle-info | awk '{print $2}') + release_cycle_counter=$(grep 'release-cycle-counter:' release-cycle-info | awk '{print $2}') + version="$current_date.$release_cycle.$release_cycle_counter" + fi + architecture="{{arch}}" + zip_filename="${version}.autonomi.${architecture}.zip" + + mkdir -p packaged_architectures + cd artifacts/$architecture/release + + binaries=( + faucet + nat-detection + node-launchpad + safe + safenode + safenode-manager + safenode_rpc_client + safenodemand + sn_auditor + ) + + if [[ "$architecture" == *"windows"* ]]; then + for binary in "${binaries[@]}"; do + binaries_with_extension+=("$binary.exe") + done + zip "../../../packaged_architectures/$zip_filename" "${binaries_with_extension[@]}" + else + zip "../../../packaged_architectures/$zip_filename" "${binaries[@]}" + fi + + cd ../../.. + node-man-integration-tests: #!/usr/bin/env bash set -e diff --git a/nat-detection/Cargo.toml b/nat-detection/Cargo.toml index 3eafb22135..35c4cfab43 100644 --- a/nat-detection/Cargo.toml +++ b/nat-detection/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "nat-detection" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.0" +version = "0.2.1" [[bin]] name = "nat-detection" @@ -28,7 +28,7 @@ libp2p = { version = "0.53", features = [ "macros", "upnp", ] } -sn_networking = { path = "../sn_networking", version = "0.17.0" } +sn_networking = { path = "../sn_networking", version = "0.17.1" } tokio = { version = "1.32.0", features = ["full"] } tracing = { version = "~0.1.26" } tracing-log = "0.2.0" diff --git a/node-launchpad/Cargo.toml b/node-launchpad/Cargo.toml index eae344f53d..813f973d53 100644 --- a/node-launchpad/Cargo.toml +++ b/node-launchpad/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Node Launchpad" name = "node-launchpad" -version = "0.3.10" +version = "0.3.11" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -48,10 +48,10 @@ reqwest = { version = "0.12.2", default-features = false, features = [ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" signal-hook = "0.3.17" -sn-node-manager = { version = "0.10.0", path = "../sn_node_manager" } -sn_peers_acquisition = { version = "0.4.0", path = "../sn_peers_acquisition" } +sn-node-manager = { version = "0.10.1", path = "../sn_node_manager" } +sn_peers_acquisition = { version = "0.4.1", path = "../sn_peers_acquisition" } sn-releases = "~0.2.6" -sn_service_management = { version = "0.3.8", path = "../sn_service_management" } +sn_service_management = { version = "0.3.9", path = "../sn_service_management" } strip-ansi-escapes = "0.2.0" strum = { version = "0.26.1", features = ["derive"] } sysinfo = "0.30.12" diff --git a/release-cycle-info b/release-cycle-info new file mode 100644 index 0000000000..14b23f7ad5 --- /dev/null +++ b/release-cycle-info @@ -0,0 +1,10 @@ +# The release-cycle is the cycle within the current month. It will be 1 or 2. It is set at the +# beginning of the cycle. +# +# The release-cycle-counter is initially set to 1 at the beginning of each cycle, and during the +# cycle, it will be incremented for each RC build. +# +# Both of these numbers are used in the packaged version number, which is a collective version +# number for all the released binaries. +release-cycle: 1 +release-cycle-counter: 1 diff --git a/release-plz.toml b/release-plz.toml index 41d3c9b000..e896f4f03c 100644 --- a/release-plz.toml +++ b/release-plz.toml @@ -1,143 +1,8 @@ [workspace] -publish = false -git_release_draft = true -git_tag_enable = false -allow_dirty = false -changelog_update = true -dependencies_update = false -git_release_enable = true -publish_allow_dirty = false -semver_check = false -git_release_type = "auto" -release = false - -[[package]] -name = "sn_auditor" -changelog_update = true -git_release_enable = true -release = true - -[[package]] -name = "sn_build_info" -changelog_update = true -git_release_enable = false -release = true - -[[package]] -name = "sn_cli" -release = true -changelog_update = true -changelog_include = [ - "sn_client", - "sn_networking", - "sn_transfers", - "sn_registers", - "sn_peers_acquisition", - "sn_protocol", -] - -[[package]] -name = "sn_client" -release = true -changelog_update = true -git_release_enable = false -changelog_include = [ - "sn_networking", - "sn_transfers", - "sn_registers", - "sn_peers_acquisition", - "sn_protocol", -] - -[[package]] -name = "sn_faucet" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_logging" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_metrics" -release = true -changelog_update = true -git_release_enable = false - - -[[package]] -name = "sn_networking" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_node" -release = true -changelog_update = true -changelog_include = [ - "sn_networking", - "sn_transfers", - "sn_registers", - "sn_peers_acquisition", - "sn_protocol", -] - -[[package]] -name = "sn-node-manager" -release = true -changelog_update = true -changelog_include = [ - "sn_node", - "sn_peers_acquisition", - "sn_protocol", - "sn_service_management", - "sn_transfers", -] - - -[[package]] -name = "node-launchpad" -release = true -changelog_update = true -git_release_enable = true -changelog_include = ["sn_node", "sn_protocol", "sn-node-manager"] - -[[package]] -name = "sn_node_rpc_client" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_peers_acquisition" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_protocol" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_registers" -release = true -changelog_update = true -git_release_enable = false - -[[package]] -name = "sn_service_management" -release = true -changelog_update = true +changelog_update = false git_release_enable = false +semver_check = false [[package]] -name = "sn_transfers" -release = true -changelog_update = true -git_release_enable = false +name = "test_utils" +release = false diff --git a/resources/rc_template.md b/resources/rc_template.md new file mode 100644 index 0000000000..9ad5f0f5b9 --- /dev/null +++ b/resources/rc_template.md @@ -0,0 +1,108 @@ +# Release Candidate YYYY.MM.X.Y + +*PLEASE DO NOT EDIT THIS POST.* + +It should only be edited by the RC owner, i.e., the original poster. + +## Binary Versions + +* `faucet` __REPLACE__ +* `nat-detection` __REPLACE__ +* `node-launchpad` __REPLACE__ +* `safe` __REPLACE__ +* `safenode` __REPLACE__ +* `safenode-manager` __REPLACE__ +* `safenodemand` __REPLACE__ +* `sn_auditor` __REPLACE__ +* `safenode_rpc_client` __REPLACE__ + +## Closed Pull Requests + +Replace this with the list of closed PRs since the last version. This is intended to help developers +with their contributions to the changelog. + +## Changelog/Testing Contributions + +Please reply with a description of any contributions you made that will be included in this release. +The list of closed PRs is provided for reference. You can also provide direction or suggestions as +to how we could test your contributions with community participation. + +Use this checklist to track the changelog contributions that are needed. + +*Remove people who didn't close any PRs during this cycle.* + +You will be ticked off when your reply is provided: + +- [] Anselme +- [] Benno +- [] Chris +- [] Josh +- [] Mazzi +- [] Mick +- [] Qi +- [] Roland + +## Contribution Template + +To provide your contributions for the changelog, the template below can be used: +``` +### Network + +#### Added + +- Provide any added entries or remove the section if it doesn't apply + +#### Changed + +- Provide any changed entries or remove the section if it doesn't apply + +#### Fixed + +- Provide any fixed entries or remove the section if it doesn't apply + +### Client + +#### Added + +- Provide any added entries or remove the section if it doesn't apply + +#### Changed + +- Provide any changed entries or remove the section if it doesn't apply + +#### Fixed + +- Provide any fixed entries or remove the section if it doesn't apply +### Node Manager + +#### Added + +- Provide any added entries or remove the section if it doesn't apply + +#### Changed + +- Provide any changed entries or remove the section if it doesn't apply + +#### Fixed + +- Provide any fixed entries or remove the section if it doesn't apply + +### Launchpad + +Remove whole section if it does not apply. + +#### Added + +- Provide any added entries or remove the section if it doesn't apply + +#### Changed + +- Provide any changed entries or remove the section if it doesn't apply + +#### Fixed + +- Provide any fixed entries or remove the section if it doesn't apply +``` + +If you have any suggestions for testing your contributions with the community, please add them to +your reply, or provide them as a separate reply in the thread. diff --git a/resources/scripts/bump_version.sh b/resources/scripts/bump_version.sh deleted file mode 100755 index d10b2db7ba..0000000000 --- a/resources/scripts/bump_version.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# Suffix to append to the version. Passed as an argument to this script. -SUFFIX="$1" - -# Ensure cargo set-version is installed -if ! cargo set-version --help > /dev/null 2>&1; then - echo "cargo set-version command not found." - echo "Please install cargo-edit with the command: cargo install cargo-edit --features vendored-openssl" - exit 1 -fi - -release-plz update 2>&1 | tee bump_version_output - -crates_bumped=() -while IFS= read -r line; do - name=$(echo "$line" | awk -F"\`" '{print $2}') - version=$(echo "$line" | awk -F"-> " '{print $2}') - crates_bumped+=("${name}-v${version}") -done < <(cat bump_version_output | grep "^\*") - - -if [[ -z "$SUFFIX" ]]; then - echo "Removing any existing suffixes and bumping versions to stable." - for crate in $(cargo metadata --no-deps --format-version 1 | jq -r '.packages[] | .name'); do - version=$(cargo metadata --no-deps --format-version 1 | jq -r --arg crate_name "$crate" '.packages[] | select(.name==$crate_name) | .version') - new_version=$(echo "$version" | sed -E 's/(-alpha\.[0-9]+|-beta\.[0-9]+)$//') - if [[ "$version" != "$new_version" ]]; then - echo "Removing suffix from $crate, setting version to $new_version" - cargo set-version -p $crate $new_version - crates_bumped+=("${crate}-v${new_version}") - fi - done -fi - - -if [[ -n "$SUFFIX" ]]; then - echo "We are releasing to the $SUFFIX channel" - echo "Versions with $SUFFIX are not supported by release-plz" - echo "Reverting changes by release-plz" - git checkout -- . -fi - -commit_message="chore(release): " -for crate in "${crates_bumped[@]}"; do - # Extract the crate name and version in a cross-platform way - crate_name=$(echo "$crate" | sed -E 's/-v.*$//') - version=$(echo "$crate" | sed -E 's/^.*-v(.*)$/\1/') - new_version=$version - - echo "----------------------------------------------------------" - echo "Processing $crate_name" - echo "----------------------------------------------------------" - if [[ -n "$SUFFIX" ]]; then - # if we're already in a release channel, reapplying the suffix will reset things. - if [[ "$version" == *"-alpha."* || "$version" == *"-beta."* ]]; then - base_version=$(echo "$version" | sed -E 's/(-alpha\.[0-9]+|-beta\.[0-9]+)$//') - pre_release_identifier=$(echo "$version" | sed -E 's/.*-(alpha|beta)\.([0-9]+)$/\2/') - new_version="${base_version}-${SUFFIX}.$pre_release_identifier" - else - new_version="${version}-${SUFFIX}.0" - fi - else - # For main release, strip any alpha or beta suffix from the version - new_version=$(echo "$version" | sed -E 's/(-alpha\.[0-9]+|-beta\.[0-9]+)$//') - fi - - echo "Using set-version to apply $new_version to $crate_name" - cargo set-version -p $crate_name $new_version - commit_message="${commit_message}${crate_name}-v$new_version/" # append crate to commit message -done -commit_message=${commit_message%/} # strip off trailing '/' character - -git add --all -git commit -m "$commit_message" -echo "Generated release commit: $commit_message" diff --git a/resources/scripts/bump_version_for_rc.sh b/resources/scripts/bump_version_for_rc.sh new file mode 100755 index 0000000000..655345e199 --- /dev/null +++ b/resources/scripts/bump_version_for_rc.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +set -e + +# This script must run from the root of the repository. + +# This allows for, e.g., "alpha" to be passed when calling the script. +pre_release_identifer=${1:-"rc"} + +all_crates=($(awk '/members = \[/{flag=1; next} /\]/{flag=0} flag {gsub(/[",]/, ""); print $0}' \ + Cargo.toml)) + +if ! cargo set-version --help > /dev/null 2>&1; then + echo "cargo set-version not found" + echo "Please install cargo-edit: cargo install cargo-edit --features vendored-openssl" + exit 1 +fi + +declare -A crates_bumped +crates_bumped_with_version=() + +release-plz update 2>&1 | tee bump_version_output + +while IFS= read -r line; do + # Sometimes this list can include crates that were not bumped. The presence of "->" indicates + # whether a bump occurred. + if [[ "$line" == *"->"* ]]; then + name=$(echo "$line" | awk -F"\`" '{print $2}') + version=$(echo "$line" | awk -F"-> " '{print $2}') + crates_bumped["$name"]=1 + crates_bumped_with_version+=("${name}-v${version}") + fi +done < <(cat bump_version_output | grep "^\*") + +# The bumps performed by release-plz need to be reverted, because going to an `rc` pre-release +# specifier is considered a downgrade, so `set-version` won't do it. We will take the bumps that +# release-plz provided and use `set-version` to put the `rc` specifier on them. +git checkout -- . + +for crate in "${crates_bumped_with_version[@]}"; do + name=$(echo "$crate" | sed -E 's/-v.*$//') + version=$(echo "$crate" | sed -E 's/^.*-v(.*)$/\1/') + new_version="${version}-${pre_release_identifer}.1" + echo "Setting $crate to $new_version" + cargo set-version --package $name $new_version +done + +echo "Now performing safety bumps for any crates not bumped by release-plz..." +for crate in "${all_crates[@]}"; do + # The node manager is an annoying special case where the directory and crate name don't match. + if [[ $crate == "sn_node_manager" ]]; then + crate="sn-node-manager" + fi + + if [[ -z "${crates_bumped[$crate]}" ]]; then + echo "===============================" + echo " Safety bump for $crate" + echo "===============================" + echo "release-plz did not bump $crate" + version=$(grep "^version" < $crate/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g') + echo "Current version is $version" + + IFS='.' read -r major minor patch <<< "$version" + patch=$((patch + 1)) + new_version="${major}.${minor}.${patch}-${pre_release_identifer}.1" + + echo "Safety bump to $new_version" + cargo set-version --package $crate $new_version + fi +done + +echo "======================" +echo " New Crate Versions " +echo "======================" +for crate in "${all_crates[@]}"; do + version=$(grep "^version" < $crate/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g') + echo "$crate: $version" +done + +echo "=======================" +echo " New Binary Versions " +echo "=======================" +echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" diff --git a/resources/scripts/list-safe-network-closed-prs.py b/resources/scripts/list-safe-network-closed-prs.py new file mode 100755 index 0000000000..6355703c43 --- /dev/null +++ b/resources/scripts/list-safe-network-closed-prs.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +import os +import sys +from collections import defaultdict +from github import Github + +def has_breaking_change(commits): + for commit in commits: + commit_message = commit.commit.message + if '!' in commit_message.split('\n')[0] or 'BREAKING CHANGE' in commit_message: + return True + return False + + +def main(last_released_pr_number): + token = os.getenv("GITHUB_PAT_SAFE_NETWORK_PR_LIST") + if not token: + raise Exception("The GITHUB_PAT_SAFE_NETWORK_PR_LIST environment variable must be set") + + g = Github(token) + repo = g.get_repo("maidsafe/safe_network") + + last_released_pr = repo.get_pull(last_released_pr_number) + if not last_released_pr: + raise Exception(f"Could not retrieve PR #{last_released_pr_number}") + last_release_date = last_released_pr.closed_at + if not last_release_date: + raise Exception(f"PR #{last_released_pr_number} has not been merged") + + print("Base comparison PR:") + print(f"#{last_released_pr.number}: {last_released_pr.title} closed at {last_released_pr.closed_at}") + print() + + pulls = repo.get_pulls(state="closed", sort="updated", direction="desc") + filtered_pulls = [] + for pr in pulls: + if not pr.closed_at: + print(f"PR {pr.number} is not closed yet") + continue + print(f"Processing PR {pr.number}...") + if pr.closed_at <= last_release_date: + break + if pr.merged_at: + commits = pr.get_commits() + breaking = has_breaking_change(commits) + filtered_pulls.append({ + "number": pr.number, + "title": pr.title, + "author": pr.user.login, + "closed_at": pr.closed_at, + "breaking": breaking, + "commits": commits + }) + filtered_pulls.sort(key=lambda pr: pr["closed_at"]) + + print("Flat list:") + for pr in filtered_pulls: + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f"{closed_date} #{pr['number']} -- {pr['title']} [@{pr['author']}] {breaking_text}") + print("Flat list markdown:") + for pr in filtered_pulls: + pr_number = pr["number"] + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f"{closed_date} [#{pr_number}](https://github.com/maidsafe/safe_network/pull/{pr_number}) -- {pr['title']} [@{pr['author']}] {breaking_text}") + + print() + grouped_pulls = defaultdict(list) + for pr in filtered_pulls: + grouped_pulls[pr["author"]].append(pr) + + print("Grouped by author:") + for author, prs in grouped_pulls.items(): + print(f"@{author}") + for pr in prs: + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f" {closed_date} #{pr['number']} -- {pr['title']} {breaking_text}") + print() + + print("Grouped by author with commits:") + for author, prs in grouped_pulls.items(): + print(f"@{author}") + for pr in prs: + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f" {closed_date} #{pr['number']} -- {pr['title']} {breaking_text}") + for commit in pr["commits"]: + print(f" {commit.commit.message.split('\n')[0]}") + print() + + print("Grouped by author markdown:") + for author, prs in grouped_pulls.items(): + print(f"@{author}") + for pr in prs: + pr_number = pr["number"] + closed_date = pr["closed_at"].date() + breaking_text = "[BREAKING]" if pr["breaking"] else "" + print(f" {closed_date} [#{pr_number}](https://github.com/maidsafe/safe_network/pull/{pr_number}) -- {pr['title']} {breaking_text}") + print() + + +if __name__ == "__main__": + if len(sys.argv) != 2: + print("Usage: python script.py ") + sys.exit(1) + + last_release_pr_number = int(sys.argv[1]) + main(last_release_pr_number) diff --git a/resources/scripts/print-versions.sh b/resources/scripts/print-versions.sh new file mode 100755 index 0000000000..b2a75fdb49 --- /dev/null +++ b/resources/scripts/print-versions.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +all_crates=($(awk '/members = \[/{flag=1; next} /\]/{flag=0} flag {gsub(/[",]/, ""); print $0}' \ + Cargo.toml)) + +echo "==================" +echo " Crate Versions " +echo "==================" +for crate in "${all_crates[@]}"; do + version=$(grep "^version" < $crate/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g') + echo "$crate: $version" +done + +echo "===================" +echo " Binary Versions " +echo "===================" +echo "faucet: $(grep "^version" < sn_faucet/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "nat-detection: $(grep "^version" < nat-detection/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "node-launchpad: $(grep "^version" < node-launchpad/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safe: $(grep "^version" < sn_cli/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenode: $(grep "^version" < sn_node/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenode-manager: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenode_rpc_client: $(grep "^version" < sn_node_rpc_client/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "safenodemand: $(grep "^version" < sn_node_manager/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" +echo "sn_auditor: $(grep "^version" < sn_auditor/Cargo.toml | head -n 1 | awk '{ print $3 }' | sed 's/\"//g')" diff --git a/resources/scripts/set-release-channel.sh b/resources/scripts/set-release-channel.sh deleted file mode 100755 index d61928b3c0..0000000000 --- a/resources/scripts/set-release-channel.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -# Define the workspace Cargo.toml location (ensure you're in the workspace root) -WORKSPACE_CARGO_TOML="./Cargo.toml" - -# Suffix to append to the version. Passed as an argument to this script. -SUFFIX="$1" - -# Ensure the suffix starts with a dash if it's provided and not empty -if [ -n "$SUFFIX" ] && [[ "$SUFFIX" != -* ]]; then - SUFFIX="-$SUFFIX" -fi - -# Check if jq is installed -if ! command -v jq > /dev/null 2>&1; then - echo "jq is not installed. Please install jq to continue." - exit 1 -fi - - -# Check if the 'cargo set-version' command is available -if ! cargo set-version --help > /dev/null 2>&1; then - echo "cargo set-version command not found." - echo "Please install cargo-edit with the command: cargo install cargo-edit --features vendored-openssl" - exit 1 -fi - -# Function to update version for a single crate with suffix -update_version_with_suffix() { - local crate=$1 - local suffix=$2 - local current_version=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.name == \"$crate\") | .version") - # Perform the dry run to get the upgrade message - local dry_run_output=$(cargo set-version -p $crate --bump patch --dry-run 2>&1) - # Use grep and awk to extract the new version - local new_version=$(echo "$dry_run_output" | grep "Upgrading $crate from" | awk '{print $6}') - - echo "Updating $crate from $current_version to $new_version with suffix $suffix..." - cargo set-version -p $crate "$new_version$suffix" -} - -# Function to bump patch version for the whole workspace -bump_patch_version_for_workspace() { - echo "Bumping patch version for the whole workspace..." - cargo set-version --bump patch -} - -# Use cargo metadata and jq to parse workspace members -MEMBERS=$(cargo metadata --format-version 1 | jq -r '.workspace_members[] | split(" ") | .[0] | split("(") | .[0] | rtrimstr(")")') - -if [ -n "$SUFFIX" ]; then - # Update each crate with the new version and suffix - for member in $MEMBERS; do - member_name=$(echo $member | cut -d' ' -f1) - update_version_with_suffix "$member_name" "$SUFFIX" - done -else - # If no suffix is provided, bump the patch version for the whole workspace - bump_patch_version_for_workspace -fi - -echo "Version update process completed." diff --git a/sn_auditor/Cargo.toml b/sn_auditor/Cargo.toml index 7c8497118a..6919e4ae63 100644 --- a/sn_auditor/Cargo.toml +++ b/sn_auditor/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Network Auditor" name = "sn_auditor" -version = "0.2.2" +version = "0.2.3" edition = "2021" homepage = "https://maidsafe.net" repository = "https://github.com/maidsafe/safe_network" @@ -31,9 +31,9 @@ graphviz-rust = { version = "0.9.0", optional = true } lazy_static = "1.4.0" serde = { version = "1.0.133", features = ["derive", "rc"] } serde_json = "1.0.108" -sn_client = { path = "../sn_client", version = "0.108.0" } -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_client = { path = "../sn_client", version = "0.109.0" } +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } tiny_http = { version = "0.12", features = ["ssl-rustls"] } tracing = { version = "~0.1.26" } tokio = { version = "1.32.0", features = [ diff --git a/sn_build_info/Cargo.toml b/sn_build_info/Cargo.toml index 43517827bb..8f61fdff57 100644 --- a/sn_build_info/Cargo.toml +++ b/sn_build_info/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_build_info" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.9" +version = "0.1.10" [build-dependencies] vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } diff --git a/sn_cli/Cargo.toml b/sn_cli/Cargo.toml index 8290d6f1bf..ab809fc121 100644 --- a/sn_cli/Cargo.toml +++ b/sn_cli/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_cli" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.93.9" +version = "0.94.0" [[bin]] path = "src/bin/main.rs" @@ -58,11 +58,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ rmp-serde = "1.1.1" rpassword = "7.3.1" serde = { version = "1.0.133", features = ["derive"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.9" } -sn_client = { path = "../sn_client", version = "0.108.0" } -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5" } +sn_build_info = { path = "../sn_build_info", version = "0.1.10" } +sn_client = { path = "../sn_client", version = "0.109.0" } +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6" } tempfile = "3.6.0" tiny-keccak = "~2.0.2" tokio = { version = "1.32.0", features = [ @@ -84,7 +84,7 @@ eyre = "0.6.8" criterion = "0.5.1" tempfile = "3.6.0" rand = { version = "~0.8.5", features = ["small_rng"] } -sn_client = { path = "../sn_client", version = "0.108.0", features = [ +sn_client = { path = "../sn_client", version = "0.109.0", features = [ "test-utils", ] } diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index dd91d65e2f..e637c17321 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.108.0" +version = "0.109.0" [features] default = [] @@ -49,16 +49,16 @@ rayon = "1.8.0" rmp-serde = "1.1.1" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_networking = { path = "../sn_networking", version = "0.17.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5" } -sn_registers = { path = "../sn_registers", version = "0.3.15" } -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_networking = { path = "../sn_networking", version = "0.17.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6" } +sn_registers = { path = "../sn_registers", version = "0.3.16" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } tempfile = "3.6.0" thiserror = "1.0.23" tiny-keccak = "~2.0.2" tracing = { version = "~0.1.26" } xor_name = "5.0.0" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0", optional = true } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1", optional = true } eyre = { version = "0.6.8", optional = true } [dev-dependencies] @@ -67,8 +67,8 @@ dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_client = { path = "../sn_client", features = ["test-utils"] } -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_registers = { path = "../sn_registers", version = "0.3.15", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_registers = { path = "../sn_registers", version = "0.3.16", features = [ "test-utils", ] } @@ -83,7 +83,7 @@ crate-type = ["cdylib", "rlib"] getrandom = { version = "0.2.12", features = ["js"] } wasm-bindgen = "0.2.90" wasm-bindgen-futures = "0.4.40" -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } console_error_panic_hook = "0.1.6" tracing-wasm = "0.2.1" wasmtimer = "0.2.0" diff --git a/sn_client/src/api.rs b/sn_client/src/api.rs index e9531862ce..93418ec44f 100644 --- a/sn_client/src/api.rs +++ b/sn_client/src/api.rs @@ -57,13 +57,6 @@ pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(30); /// The timeout duration for the client to receive any response from the network. const INACTIVITY_TIMEOUT: Duration = Duration::from_secs(30); -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const QUORUM_2: NonZeroUsize = match NonZeroUsize::new(2) { - Some(v) => v, - None => panic!("2 is not be zero"), -}; - impl Client { /// A quick client with a random secret key and some peers. pub async fn quick_start(peers: Option>) -> Result { @@ -121,9 +114,7 @@ impl Client { #[cfg(feature = "open-metrics")] network_builder.metrics_registry(Some(Registry::default())); - let (network, mut network_event_receiver, swarm_driver) = network_builder - .build_client() - .map_err(NetworkError::MdnsBuildBehaviourError)?; + let (network, mut network_event_receiver, swarm_driver) = network_builder.build_client()?; info!("Client constructed network and swarm_driver"); // If the events broadcaster is not provided by the caller, then we create a new one. @@ -459,7 +450,7 @@ impl Client { ) -> Result { let key = NetworkAddress::from_register_address(address).to_record_key(); let get_quorum = if is_verifying { - Quorum::N(QUORUM_2) + Quorum::Majority } else { Quorum::One }; @@ -660,7 +651,9 @@ impl Client { let verification = if verify_store { let verification_cfg = GetRecordCfg { - get_quorum: Quorum::N(QUORUM_2), + get_quorum: Quorum::N( + NonZeroUsize::new(2).ok_or(Error::NonZeroUsizeWasInitialisedAsZero)?, + ), retry_strategy, target_record: None, // Not used since we use ChunkProof expected_holders: Default::default(), @@ -775,7 +768,7 @@ impl Client { address.clone(), random_nonce, expected_proof, - Quorum::N(QUORUM_2), + Quorum::N(NonZeroUsize::new(2).ok_or(Error::NonZeroUsizeWasInitialisedAsZero)?), None, ) .await diff --git a/sn_client/src/error.rs b/sn_client/src/error.rs index 50ce6525e1..adbe0ef884 100644 --- a/sn_client/src/error.rs +++ b/sn_client/src/error.rs @@ -90,6 +90,9 @@ pub enum Error { #[error("Total price exceed possible token amount")] TotalPriceTooHigh, + #[error("Logic error: NonZeroUsize was initialised as zero")] + NonZeroUsizeWasInitialisedAsZero, + #[error("Could not connect to the network in {0:?}")] ConnectionTimeout(Duration), diff --git a/sn_client/src/lib.rs b/sn_client/src/lib.rs index 5505008e43..daf06d6f4c 100644 --- a/sn_client/src/lib.rs +++ b/sn_client/src/lib.rs @@ -16,19 +16,19 @@ //! Here are the key functionalities provided by this crate: //! //! 1. **Network Communication**: It handles communication with the Safe Network, enabling clients to -//! send and receive messages from the decentralized nodes that make up the network. +//! send and receive messages from the decentralized nodes that make up the network. //! //! 2. **Data Storage and Retrieval**: to store and retrieve data on the Safe Network. -//! This includes both private and public data, ensuring privacy and security. +//! This includes both private and public data, ensuring privacy and security. //! //! 3. **Authentication and Access Control**: It provides mechanisms for authenticating users and -//! managing access to data, ensuring that only authorized users can access sensitive information. +//! managing access to data, ensuring that only authorized users can access sensitive information. //! //! 4. **File Management**: The crate supports operations related to file management, such as uploading, -//! downloading, and managing files and directories on the Safe Network. +//! downloading, and managing files and directories on the Safe Network. //! //! 5. **Token Management**: It includes functionality for managing Safe Network tokens, which can be -//! used for various purposes within the network, including paying for storage and services. +//! used for various purposes within the network, including paying for storage and services. //! //! ## Quick links //! - [Crates.io](https://crates.io/crates/sn_client) diff --git a/sn_client/src/register.rs b/sn_client/src/register.rs index 430d12092b..16efd3db1d 100644 --- a/sn_client/src/register.rs +++ b/sn_client/src/register.rs @@ -843,7 +843,7 @@ impl ClientRegister { let verification_cfg = GetRecordCfg { get_quorum: Quorum::One, - retry_strategy: Some(RetryStrategy::Balanced), + retry_strategy: Some(RetryStrategy::Quick), target_record: record_to_verify, expected_holders, }; @@ -865,7 +865,7 @@ impl ClientRegister { ) -> Result { debug!("Retrieving Register from: {address}"); let reg = client - .get_signed_register_from_network(address, false) + .get_signed_register_from_network(address, true) .await?; reg.verify_with_address(address)?; Ok(reg.register()?) diff --git a/sn_client/tests/folders_api.rs b/sn_client/tests/folders_api.rs index 4b7c74fc9f..8340c3ad32 100644 --- a/sn_client/tests/folders_api.rs +++ b/sn_client/tests/folders_api.rs @@ -190,6 +190,9 @@ async fn test_folder_remove_replace_entries() -> Result<()> { #[tokio::test] async fn test_folder_retrieve() -> Result<()> { + let _log_guards = + sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_retrieve", false); + let owner_sk = SecretKey::random(); let client = get_new_client(owner_sk).await?; let tmp_dir = tempfile::tempdir()?; @@ -267,6 +270,9 @@ async fn test_folder_retrieve() -> Result<()> { #[tokio::test] async fn test_folder_merge_changes() -> Result<()> { + let _log_guards = + sn_logging::LogBuilder::init_single_threaded_tokio_test("test_folder_merge_changes", false); + let owner_sk = SecretKey::random(); let client = get_new_client(owner_sk.clone()).await?; let tmp_dir = tempfile::tempdir()?; diff --git a/sn_faucet/Cargo.toml b/sn_faucet/Cargo.toml index 18e85260da..4fd88e3198 100644 --- a/sn_faucet/Cargo.toml +++ b/sn_faucet/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_faucet" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.30" +version = "0.4.31" [features] default = ["gifting"] @@ -37,13 +37,13 @@ indicatif = { version = "0.17.5", features = ["tokio"] } minreq = { version = "2.11.0", features = ["https-rustls"], optional = true } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" -sn_build_info = { path = "../sn_build_info", version = "0.1.9" } -sn_cli = { path = "../sn_cli", version = "0.93.9" } -sn_client = { path = "../sn_client", version = "0.108.0" } -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5" } -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_build_info = { path = "../sn_build_info", version = "0.1.10" } +sn_cli = { path = "../sn_cli", version = "0.94.0" } +sn_client = { path = "../sn_client", version = "0.109.0" } +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } tokio = { version = "1.32.0", features = ["parking_lot", "rt"] } tracing = { version = "~0.1.26" } url = "2.5.0" diff --git a/sn_logging/Cargo.toml b/sn_logging/Cargo.toml index 01f93bd42d..66cc6870ce 100644 --- a/sn_logging/Cargo.toml +++ b/sn_logging/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_logging" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.2.30" +version = "0.2.31" [dependencies] chrono = "~0.4.19" diff --git a/sn_metrics/Cargo.toml b/sn_metrics/Cargo.toml index c34a35f57d..00aef41f04 100644 --- a/sn_metrics/Cargo.toml +++ b/sn_metrics/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_metrics" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.10" +version = "0.1.11" [[bin]] path = "src/main.rs" diff --git a/sn_networking/Cargo.toml b/sn_networking/Cargo.toml index ee5a975d00..d443f763e0 100644 --- a/sn_networking/Cargo.toml +++ b/sn_networking/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_networking" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.0" +version = "0.17.1" [features] default = ["libp2p/quic"] @@ -53,10 +53,10 @@ rand = { version = "~0.8.5", features = ["small_rng"] } rayon = "1.8.0" rmp-serde = "1.1.1" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path="../sn_build_info", version = "0.1.9" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5" } -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } -sn_registers = { path = "../sn_registers", version = "0.3.15" } +sn_build_info = { path="../sn_build_info", version = "0.1.10" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_registers = { path = "../sn_registers", version = "0.3.16" } sysinfo = { version = "0.30.8", default-features = false, optional = true } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = ["sha3"] } diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 0752f236f4..9b3417d1ed 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -10,7 +10,7 @@ use crate::{ driver::{PendingGetClosestType, SwarmDriver}, error::{NetworkError, Result}, event::TerminateNodeReason, - multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, + multiaddr_pop_p2p, GetRecordCfg, GetRecordError, MsgResponder, NetworkEvent, CLOSE_GROUP_SIZE, REPLICATION_PEERS_COUNT, }; use libp2p::{ @@ -56,12 +56,7 @@ pub enum NodeIssue { } /// Commands to send to the Swarm -#[allow(clippy::large_enum_variant)] -pub enum SwarmCmd { - Dial { - addr: Multiaddr, - sender: oneshot::Sender>, - }, +pub enum LocalSwarmCmd { /// Get a map where each key is the ilog2 distance of that Kbucket and each value is a vector of peers in that /// bucket. GetKBuckets { @@ -72,29 +67,12 @@ pub enum SwarmCmd { GetClosestKLocalPeers { sender: oneshot::Sender>, }, - // Get closest peers from the network - GetClosestPeersToAddressFromNetwork { + // Get closest peers from the local RoutingTable + GetCloseGroupLocalPeers { key: NetworkAddress, sender: oneshot::Sender>, }, GetSwarmLocalState(oneshot::Sender), - // Send Request to the PeerId. - SendRequest { - req: Request, - peer: PeerId, - - // If a `sender` is provided, the requesting node will await for a `Response` from the - // Peer. The result is then returned at the call site. - // - // If a `sender` is not provided, the requesting node will not wait for the Peer's - // response. Instead we trigger a `NetworkEvent::ResponseReceived` which calls the common - // `response_handler` - sender: Option>>, - }, - SendResponse { - resp: Response, - channel: MsgResponder, - }, /// Check if the local RecordStore contains the provided key RecordStoreHasKey { key: RecordKey, @@ -104,11 +82,10 @@ pub enum SwarmCmd { GetAllLocalRecordAddresses { sender: oneshot::Sender>, }, - /// Get Record from the Kad network - GetNetworkRecord { + /// Get data from the local RecordStore + GetLocalRecord { key: RecordKey, - sender: oneshot::Sender>, - cfg: GetRecordCfg, + sender: oneshot::Sender>, }, /// GetLocalStoreCost for this node GetLocalStoreCost { @@ -117,24 +94,6 @@ pub enum SwarmCmd { }, /// Notify the node received a payment. PaymentReceived, - /// Get data from the local RecordStore - GetLocalRecord { - key: RecordKey, - sender: oneshot::Sender>, - }, - /// Put record to network - PutRecord { - record: Record, - sender: oneshot::Sender>, - quorum: Quorum, - }, - /// Put record to specific node - PutRecordTo { - peers: Vec, - record: Record, - sender: oneshot::Sender>, - quorum: Quorum, - }, /// Put record to the local RecordStore PutLocalRecord { record: Record, @@ -150,8 +109,6 @@ pub enum SwarmCmd { key: RecordKey, record_type: RecordType, }, - /// Triggers interval repliation - TriggerIntervalReplication, /// Notify whether peer is in trouble RecordNodeIssue { peer_id: PeerId, @@ -167,120 +124,199 @@ pub enum SwarmCmd { quotes: Vec<(PeerId, PaymentQuote)>, }, // Notify a fetch completion - FetchCompleted(RecordKey), + FetchCompleted((RecordKey, RecordType)), + /// Triggers interval repliation + /// NOTE: This does result in outgoing messages, but is produced locally + TriggerIntervalReplication, +} + +/// Commands to send to the Swarm +pub enum NetworkSwarmCmd { + Dial { + addr: Multiaddr, + sender: oneshot::Sender>, + }, + // Get closest peers from the network + GetClosestPeersToAddressFromNetwork { + key: NetworkAddress, + sender: oneshot::Sender>, + }, + + // Send Request to the PeerId. + SendRequest { + req: Request, + peer: PeerId, + + // If a `sender` is provided, the requesting node will await for a `Response` from the + // Peer. The result is then returned at the call site. + // + // If a `sender` is not provided, the requesting node will not wait for the Peer's + // response. Instead we trigger a `NetworkEvent::ResponseReceived` which calls the common + // `response_handler` + sender: Option>>, + }, + SendResponse { + resp: Response, + channel: MsgResponder, + }, + + /// Get Record from the Kad network + GetNetworkRecord { + key: RecordKey, + sender: oneshot::Sender>, + cfg: GetRecordCfg, + }, + + /// Put record to network + PutRecord { + record: Record, + sender: oneshot::Sender>, + quorum: Quorum, + }, + /// Put record to specific node + PutRecordTo { + peers: Vec, + record: Record, + sender: oneshot::Sender>, + quorum: Quorum, + }, } -/// Debug impl for SwarmCmd to avoid printing full Record, instead only RecodKey +/// Debug impl for LocalSwarmCmd to avoid printing full Record, instead only RecodKey /// and RecordKind are printed. -impl Debug for SwarmCmd { +impl Debug for LocalSwarmCmd { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - SwarmCmd::Dial { addr, .. } => { - write!(f, "SwarmCmd::Dial {{ addr: {addr:?} }}") - } - SwarmCmd::GetNetworkRecord { key, cfg, .. } => { + LocalSwarmCmd::PutLocalRecord { record } => { write!( f, - "SwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", - PrettyPrintRecordKey::from(key) + "LocalSwarmCmd::PutLocalRecord {{ key: {:?} }}", + PrettyPrintRecordKey::from(&record.key) ) } - SwarmCmd::PutRecord { record, .. } => { + LocalSwarmCmd::RemoveFailedLocalRecord { key } => { write!( f, - "SwarmCmd::PutRecord {{ key: {:?} }}", - PrettyPrintRecordKey::from(&record.key) + "LocalSwarmCmd::RemoveFailedLocalRecord {{ key: {:?} }}", + PrettyPrintRecordKey::from(key) ) } - SwarmCmd::PutRecordTo { peers, record, .. } => { + LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } => { write!( f, - "SwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", - PrettyPrintRecordKey::from(&record.key) + "LocalSwarmCmd::AddLocalRecordAsStored {{ key: {:?}, record_type: {record_type:?} }}", + PrettyPrintRecordKey::from(key) ) } - SwarmCmd::PutLocalRecord { record } => { + + LocalSwarmCmd::GetClosestKLocalPeers { .. } => { + write!(f, "LocalSwarmCmd::GetClosestKLocalPeers") + } + LocalSwarmCmd::GetCloseGroupLocalPeers { key, .. } => { write!( f, - "SwarmCmd::PutLocalRecord {{ key: {:?} }}", - PrettyPrintRecordKey::from(&record.key) + "LocalSwarmCmd::GetCloseGroupLocalPeers {{ key: {key:?} }}" ) } - SwarmCmd::RemoveFailedLocalRecord { key } => { + LocalSwarmCmd::GetLocalStoreCost { .. } => { + write!(f, "LocalSwarmCmd::GetLocalStoreCost") + } + LocalSwarmCmd::PaymentReceived => { + write!(f, "LocalSwarmCmd::PaymentReceived") + } + LocalSwarmCmd::GetLocalRecord { key, .. } => { write!( f, - "SwarmCmd::RemoveFailedLocalRecord {{ key: {:?} }}", + "LocalSwarmCmd::GetLocalRecord {{ key: {:?} }}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::AddLocalRecordAsStored { key, record_type } => { + LocalSwarmCmd::GetAllLocalRecordAddresses { .. } => { + write!(f, "LocalSwarmCmd::GetAllLocalRecordAddresses") + } + LocalSwarmCmd::GetKBuckets { .. } => { + write!(f, "LocalSwarmCmd::GetKBuckets") + } + LocalSwarmCmd::GetSwarmLocalState { .. } => { + write!(f, "LocalSwarmCmd::GetSwarmLocalState") + } + LocalSwarmCmd::RecordStoreHasKey { key, .. } => { write!( f, - "SwarmCmd::AddLocalRecordAsStored {{ key: {:?}, record_type: {record_type:?} }}", + "LocalSwarmCmd::RecordStoreHasKey {:?}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::TriggerIntervalReplication => { - write!(f, "SwarmCmd::TriggerIntervalReplication") - } - SwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { - write!(f, "SwarmCmd::GetClosestPeers {{ key: {key:?} }}") - } - SwarmCmd::GetClosestKLocalPeers { .. } => { - write!(f, "SwarmCmd::GetClosestKLocalPeers") + + LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { + write!( + f, + "LocalSwarmCmd::SendNodeStatus peer {peer_id:?}, issue: {issue:?}" + ) } - SwarmCmd::GetLocalStoreCost { .. } => { - write!(f, "SwarmCmd::GetLocalStoreCost") + LocalSwarmCmd::IsPeerShunned { target, .. } => { + write!(f, "LocalSwarmCmd::IsPeerInTrouble target: {target:?}") } - SwarmCmd::PaymentReceived => { - write!(f, "SwarmCmd::PaymentReceived") + LocalSwarmCmd::QuoteVerification { quotes } => { + write!( + f, + "LocalSwarmCmd::QuoteVerification of {} quotes", + quotes.len() + ) } - SwarmCmd::GetLocalRecord { key, .. } => { + LocalSwarmCmd::FetchCompleted((key, record_type)) => { write!( f, - "SwarmCmd::GetLocalRecord {{ key: {:?} }}", + "LocalSwarmCmd::FetchCompleted({record_type:?} : {:?})", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::GetAllLocalRecordAddresses { .. } => { - write!(f, "SwarmCmd::GetAllLocalRecordAddresses") - } - SwarmCmd::GetKBuckets { .. } => { - write!(f, "SwarmCmd::GetKBuckets") + LocalSwarmCmd::TriggerIntervalReplication => { + write!(f, "LocalSwarmCmd::TriggerIntervalReplication") } - SwarmCmd::GetSwarmLocalState { .. } => { - write!(f, "SwarmCmd::GetSwarmLocalState") + } + } +} + +/// Debug impl for NetworkSwarmCmd to avoid printing full Record, instead only RecodKey +/// and RecordKind are printed. +impl Debug for NetworkSwarmCmd { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + NetworkSwarmCmd::Dial { addr, .. } => { + write!(f, "NetworkSwarmCmd::Dial {{ addr: {addr:?} }}") } - SwarmCmd::RecordStoreHasKey { key, .. } => { + NetworkSwarmCmd::GetNetworkRecord { key, cfg, .. } => { write!( f, - "SwarmCmd::RecordStoreHasKey {:?}", + "NetworkSwarmCmd::GetNetworkRecord {{ key: {:?}, cfg: {cfg:?}", PrettyPrintRecordKey::from(key) ) } - SwarmCmd::SendResponse { resp, .. } => { - write!(f, "SwarmCmd::SendResponse resp: {resp:?}") - } - SwarmCmd::SendRequest { req, peer, .. } => { - write!(f, "SwarmCmd::SendRequest req: {req:?}, peer: {peer:?}") + NetworkSwarmCmd::PutRecord { record, .. } => { + write!( + f, + "NetworkSwarmCmd::PutRecord {{ key: {:?} }}", + PrettyPrintRecordKey::from(&record.key) + ) } - SwarmCmd::RecordNodeIssue { peer_id, issue } => { + NetworkSwarmCmd::PutRecordTo { peers, record, .. } => { write!( f, - "SwarmCmd::SendNodeStatus peer {peer_id:?}, issue: {issue:?}" + "NetworkSwarmCmd::PutRecordTo {{ peers: {peers:?}, key: {:?} }}", + PrettyPrintRecordKey::from(&record.key) ) } - SwarmCmd::IsPeerShunned { target, .. } => { - write!(f, "SwarmCmd::IsPeerInTrouble target: {target:?}") + NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, .. } => { + write!(f, "NetworkSwarmCmd::GetClosestPeers {{ key: {key:?} }}") } - SwarmCmd::QuoteVerification { quotes } => { - write!(f, "SwarmCmd::QuoteVerification of {} quotes", quotes.len()) + NetworkSwarmCmd::SendResponse { resp, .. } => { + write!(f, "NetworkSwarmCmd::SendResponse resp: {resp:?}") } - SwarmCmd::FetchCompleted(key) => { + NetworkSwarmCmd::SendRequest { req, peer, .. } => { write!( f, - "SwarmCmd::FetchCompleted({:?})", - PrettyPrintRecordKey::from(key) + "NetworkSwarmCmd::SendRequest req: {req:?}, peer: {peer:?}" ) } } @@ -296,15 +332,11 @@ pub struct SwarmLocalState { } impl SwarmDriver { - pub(crate) fn handle_cmd(&mut self, cmd: SwarmCmd) -> Result<(), NetworkError> { + pub(crate) fn handle_network_cmd(&mut self, cmd: NetworkSwarmCmd) -> Result<(), NetworkError> { let start = Instant::now(); - let mut cmd_string; + let cmd_string; match cmd { - SwarmCmd::TriggerIntervalReplication => { - cmd_string = "TriggerIntervalReplication"; - self.try_interval_replication()?; - } - SwarmCmd::GetNetworkRecord { key, sender, cfg } => { + NetworkSwarmCmd::GetNetworkRecord { key, sender, cfg } => { cmd_string = "GetNetworkRecord"; let query_id = self.swarm.behaviour_mut().kademlia.get_record(key.clone()); @@ -331,41 +363,7 @@ impl SwarmDriver { info!("We now have {} pending get record attempts and cached {total_records} fetched copies", self.pending_get_record.len()); } - SwarmCmd::GetLocalStoreCost { key, sender } => { - cmd_string = "GetLocalStoreCost"; - let cost = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .store_cost(&key); - #[cfg(feature = "open-metrics")] - if let Some(metrics) = &self.network_metrics { - let _ = metrics.store_cost.set(cost.0.as_nano() as i64); - } - - let _res = sender.send(cost); - } - SwarmCmd::PaymentReceived => { - cmd_string = "PaymentReceived"; - self.swarm - .behaviour_mut() - .kademlia - .store_mut() - .payment_received(); - } - SwarmCmd::GetLocalRecord { key, sender } => { - cmd_string = "GetLocalRecord"; - let record = self - .swarm - .behaviour_mut() - .kademlia - .store_mut() - .get(&key) - .map(|rec| rec.into_owned()); - let _ = sender.send(record); - } - SwarmCmd::PutRecord { + NetworkSwarmCmd::PutRecord { record, sender, quorum, @@ -397,7 +395,7 @@ impl SwarmDriver { error!("Could not send response to PutRecord cmd: {:?}", err); } } - SwarmCmd::PutRecordTo { + NetworkSwarmCmd::PutRecordTo { peers, record, sender, @@ -421,7 +419,146 @@ impl SwarmDriver { error!("Could not send response to PutRecordTo cmd: {:?}", err); } } - SwarmCmd::PutLocalRecord { record } => { + + NetworkSwarmCmd::Dial { addr, sender } => { + cmd_string = "Dial"; + + if let Some(peer_id) = multiaddr_pop_p2p(&mut addr.clone()) { + // Only consider the dial peer is bootstrap node when proper PeerId is provided. + if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(peer_id) { + let ilog2 = kbucket.range().0.ilog2(); + let peers = self.bootstrap_peers.entry(ilog2).or_default(); + peers.insert(peer_id); + } + } + let _ = match self.dial(addr) { + Ok(_) => sender.send(Ok(())), + Err(e) => sender.send(Err(e.into())), + }; + } + NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { + cmd_string = "GetClosestPeersToAddressFromNetwork"; + let query_id = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_peers(key.as_bytes()); + let _ = self.pending_get_closest_peers.insert( + query_id, + ( + PendingGetClosestType::FunctionCall(sender), + Default::default(), + ), + ); + } + + NetworkSwarmCmd::SendRequest { req, peer, sender } => { + cmd_string = "SendRequest"; + // If `self` is the recipient, forward the request directly to our upper layer to + // be handled. + // `self` then handles the request and sends a response back again to itself. + if peer == *self.swarm.local_peer_id() { + trace!("Sending query request to self"); + if let Request::Query(query) = req { + self.send_event(NetworkEvent::QueryRequestReceived { + query, + channel: MsgResponder::FromSelf(sender), + }); + } else { + // We should never receive a Replicate request from ourselves. + // we already hold this data if we do... so we can ignore + trace!("Replicate cmd to self received, ignoring"); + } + } else { + let request_id = self + .swarm + .behaviour_mut() + .request_response + .send_request(&peer, req); + trace!("Sending request {request_id:?} to peer {peer:?}"); + let _ = self.pending_requests.insert(request_id, sender); + + trace!("Pending Requests now: {:?}", self.pending_requests.len()); + } + } + NetworkSwarmCmd::SendResponse { resp, channel } => { + cmd_string = "SendResponse"; + match channel { + // If the response is for `self`, send it directly through the oneshot channel. + MsgResponder::FromSelf(channel) => { + trace!("Sending response to self"); + match channel { + Some(channel) => { + channel + .send(Ok(resp)) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + } + None => { + // responses that are not awaited at the call site must be handled + // separately + self.send_event(NetworkEvent::ResponseReceived { res: resp }); + } + } + } + MsgResponder::FromPeer(channel) => { + self.swarm + .behaviour_mut() + .request_response + .send_response(channel, resp) + .map_err(NetworkError::OutgoingResponseDropped)?; + } + } + } + } + + self.log_handling(cmd_string.to_string(), start.elapsed()); + + Ok(()) + } + pub(crate) fn handle_local_cmd(&mut self, cmd: LocalSwarmCmd) -> Result<(), NetworkError> { + let start = Instant::now(); + let mut cmd_string; + match cmd { + LocalSwarmCmd::TriggerIntervalReplication => { + cmd_string = "TriggerIntervalReplication"; + self.try_interval_replication()?; + } + LocalSwarmCmd::GetLocalStoreCost { key, sender } => { + cmd_string = "GetLocalStoreCost"; + let cost = self + .swarm + .behaviour_mut() + .kademlia + .store_mut() + .store_cost(&key); + #[cfg(feature = "open-metrics")] + if let Some(metrics) = &self.network_metrics { + let _ = metrics.store_cost.set(cost.0.as_nano() as i64); + } + + let _res = sender.send(cost); + } + LocalSwarmCmd::PaymentReceived => { + cmd_string = "PaymentReceived"; + self.swarm + .behaviour_mut() + .kademlia + .store_mut() + .payment_received(); + } + LocalSwarmCmd::GetLocalRecord { key, sender } => { + cmd_string = "GetLocalRecord"; + let record = self + .swarm + .behaviour_mut() + .kademlia + .store_mut() + .get(&key) + .map(|rec| rec.into_owned()); + let _ = sender.send(record); + } + + LocalSwarmCmd::PutLocalRecord { record } => { cmd_string = "PutLocalRecord"; let key = record.key.clone(); let record_key = PrettyPrintRecordKey::from(&key); @@ -510,7 +647,7 @@ impl SwarmDriver { return Err(err.into()); }; } - SwarmCmd::AddLocalRecordAsStored { key, record_type } => { + LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } => { info!( "Adding Record locally, for {:?} and {record_type:?}", PrettyPrintRecordKey::from(&key) @@ -524,7 +661,7 @@ impl SwarmDriver { // Reset counter on any success HDD write. self.hard_disk_write_error = 0; } - SwarmCmd::RemoveFailedLocalRecord { key } => { + LocalSwarmCmd::RemoveFailedLocalRecord { key } => { info!("Removing Record locally, for {key:?}"); cmd_string = "RemoveFailedLocalRecord"; self.swarm.behaviour_mut().kademlia.store_mut().remove(&key); @@ -537,7 +674,7 @@ impl SwarmDriver { }); } } - SwarmCmd::RecordStoreHasKey { key, sender } => { + LocalSwarmCmd::RecordStoreHasKey { key, sender } => { cmd_string = "RecordStoreHasKey"; let has_key = self .swarm @@ -547,7 +684,7 @@ impl SwarmDriver { .contains(&key); let _ = sender.send(has_key); } - SwarmCmd::GetAllLocalRecordAddresses { sender } => { + LocalSwarmCmd::GetAllLocalRecordAddresses { sender } => { cmd_string = "GetAllLocalRecordAddresses"; #[allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress let addresses = self @@ -558,38 +695,7 @@ impl SwarmDriver { .record_addresses(); let _ = sender.send(addresses); } - SwarmCmd::Dial { addr, sender } => { - cmd_string = "Dial"; - - if let Some(peer_id) = multiaddr_pop_p2p(&mut addr.clone()) { - // Only consider the dial peer is bootstrap node when proper PeerId is provided. - if let Some(kbucket) = self.swarm.behaviour_mut().kademlia.kbucket(peer_id) { - let ilog2 = kbucket.range().0.ilog2(); - let peers = self.bootstrap_peers.entry(ilog2).or_default(); - peers.insert(peer_id); - } - } - let _ = match self.dial(addr) { - Ok(_) => sender.send(Ok(())), - Err(e) => sender.send(Err(e.into())), - }; - } - SwarmCmd::GetClosestPeersToAddressFromNetwork { key, sender } => { - cmd_string = "GetClosestPeersToAddressFromNetwork"; - let query_id = self - .swarm - .behaviour_mut() - .kademlia - .get_closest_peers(key.as_bytes()); - let _ = self.pending_get_closest_peers.insert( - query_id, - ( - PendingGetClosestType::FunctionCall(sender), - Default::default(), - ), - ); - } - SwarmCmd::GetKBuckets { sender } => { + LocalSwarmCmd::GetKBuckets { sender } => { cmd_string = "GetKBuckets"; let mut ilog2_kbuckets = BTreeMap::new(); for kbucket in self.swarm.behaviour_mut().kademlia.kbuckets() { @@ -607,68 +713,29 @@ impl SwarmDriver { } let _ = sender.send(ilog2_kbuckets); } - SwarmCmd::GetClosestKLocalPeers { sender } => { + LocalSwarmCmd::GetCloseGroupLocalPeers { key, sender } => { + cmd_string = "GetCloseGroupLocalPeers"; + let key = key.as_kbucket_key(); + // calls `kbuckets.closest_keys(key)` internally, which orders the peers by + // increasing distance + // Note it will return all peers, heance a chop down is required. + let closest_peers = self + .swarm + .behaviour_mut() + .kademlia + .get_closest_local_peers(&key) + .map(|peer| peer.into_preimage()) + .take(CLOSE_GROUP_SIZE) + .collect(); + + let _ = sender.send(closest_peers); + } + LocalSwarmCmd::GetClosestKLocalPeers { sender } => { cmd_string = "GetClosestKLocalPeers"; let _ = sender.send(self.get_closest_k_value_local_peers()); } - SwarmCmd::SendRequest { req, peer, sender } => { - cmd_string = "SendRequest"; - // If `self` is the recipient, forward the request directly to our upper layer to - // be handled. - // `self` then handles the request and sends a response back again to itself. - if peer == *self.swarm.local_peer_id() { - debug!("Sending query request to self"); - if let Request::Query(query) = req { - self.send_event(NetworkEvent::QueryRequestReceived { - query, - channel: MsgResponder::FromSelf(sender), - }); - } else { - // We should never receive a Replicate request from ourselves. - // we already hold this data if we do... so we can ignore - debug!("Replicate cmd to self received, ignoring"); - } - } else { - let request_id = self - .swarm - .behaviour_mut() - .request_response - .send_request(&peer, req); - debug!("Sending request {request_id:?} to peer {peer:?}"); - let _ = self.pending_requests.insert(request_id, sender); - debug!("Pending Requests now: {:?}", self.pending_requests.len()); - } - } - SwarmCmd::SendResponse { resp, channel } => { - cmd_string = "SendResponse"; - match channel { - // If the response is for `self`, send it directly through the oneshot channel. - MsgResponder::FromSelf(channel) => { - debug!("Sending response to self"); - match channel { - Some(channel) => { - channel - .send(Ok(resp)) - .map_err(|_| NetworkError::InternalMsgChannelDropped)?; - } - None => { - // responses that are not awaited at the call site must be handled - // separately - self.send_event(NetworkEvent::ResponseReceived { res: resp }); - } - } - } - MsgResponder::FromPeer(channel) => { - self.swarm - .behaviour_mut() - .request_response - .send_response(channel, resp) - .map_err(NetworkError::OutgoingResponseDropped)?; - } - } - } - SwarmCmd::GetSwarmLocalState(sender) => { + LocalSwarmCmd::GetSwarmLocalState(sender) => { cmd_string = "GetSwarmLocalState"; let current_state = SwarmLocalState { connected_peers: self.swarm.connected_peers().cloned().collect(), @@ -680,11 +747,11 @@ impl SwarmDriver { .map_err(|_| NetworkError::InternalMsgChannelDropped)?; } - SwarmCmd::RecordNodeIssue { peer_id, issue } => { + LocalSwarmCmd::RecordNodeIssue { peer_id, issue } => { cmd_string = "RecordNodeIssues"; self.record_node_issue(peer_id, issue); } - SwarmCmd::IsPeerShunned { target, sender } => { + LocalSwarmCmd::IsPeerShunned { target, sender } => { cmd_string = "IsPeerInTrouble"; let is_bad = if let Some(peer_id) = target.as_peer_id() { if let Some((_issues, is_bad)) = self.bad_nodes.get(&peer_id) { @@ -697,7 +764,7 @@ impl SwarmDriver { }; let _ = sender.send(is_bad); } - SwarmCmd::QuoteVerification { quotes } => { + LocalSwarmCmd::QuoteVerification { quotes } => { cmd_string = "QuoteVerification"; for (peer_id, quote) in quotes { // Do nothing if already being bad @@ -709,13 +776,15 @@ impl SwarmDriver { self.verify_peer_quote(peer_id, quote); } } - SwarmCmd::FetchCompleted(key) => { + LocalSwarmCmd::FetchCompleted((key, record_type)) => { info!( - "Fetch {:?} early completed, may fetched an old version record.", + "Fetch of {record_type:?} {:?} early completed, may have fetched an old version of the record.", PrettyPrintRecordKey::from(&key) ); cmd_string = "FetchCompleted"; - let new_keys_to_fetch = self.replication_fetcher.notify_fetch_early_completed(key); + let new_keys_to_fetch = self + .replication_fetcher + .notify_fetch_early_completed(key, record_type); if !new_keys_to_fetch.is_empty() { self.send_event(NetworkEvent::KeysToFetchForReplication(new_keys_to_fetch)); } diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index a5c290d9f2..51a133089e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -13,7 +13,7 @@ use crate::metrics_service::run_metrics_server; use crate::{ bootstrap::{ContinuousBootstrap, BOOTSTRAP_INTERVAL}, circular_vec::CircularVec, - cmd::SwarmCmd, + cmd::{LocalSwarmCmd, NetworkSwarmCmd}, error::{NetworkError, Result}, event::{NetworkEvent, NodeEvent}, multiaddr_pop_p2p, @@ -117,13 +117,6 @@ const NETWORKING_CHANNEL_SIZE: usize = 10_000; /// Time before a Kad query times out if no response is received const KAD_QUERY_TIMEOUT_S: Duration = Duration::from_secs(10); -// Init during compilation, instead of runtime error that should never happen -// Option::expect will be stabilised as const in the future (https://github.com/rust-lang/rust/issues/67441) -const REPLICATION_FACTOR: NonZeroUsize = match NonZeroUsize::new(CLOSE_GROUP_SIZE) { - Some(v) => v, - None => panic!("CLOSE_GROUP_SIZE should not be zero"), -}; - /// The various settings to apply to when fetching a record from network #[derive(Clone)] pub struct GetRecordCfg { @@ -311,7 +304,10 @@ impl NetworkBuilder { // 1mb packet size .set_max_packet_size(MAX_PACKET_SIZE) // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR) + .set_replication_factor( + NonZeroUsize::new(CLOSE_GROUP_SIZE) + .ok_or_else(|| NetworkError::InvalidCloseGroupSize)?, + ) .set_query_timeout(KAD_QUERY_TIMEOUT_S) // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) @@ -380,12 +376,7 @@ impl NetworkBuilder { } /// Same as `build_node` API but creates the network components in client mode - pub fn build_client( - self, - ) -> std::result::Result< - (Network, mpsc::Receiver, SwarmDriver), - MdnsBuildBehaviourError, - > { + pub fn build_client(self) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { // Create a Kademlia behaviour for client mode, i.e. set req/resp protocol // to outbound-only mode and don't listen on any address let mut kad_cfg = kad::Config::default(); // default query timeout is 60 secs @@ -397,7 +388,10 @@ impl NetworkBuilder { // Require iterative queries to use disjoint paths for increased resiliency in the presence of potentially adversarial nodes. .disjoint_query_paths(true) // How many nodes _should_ store data. - .set_replication_factor(REPLICATION_FACTOR); + .set_replication_factor( + NonZeroUsize::new(CLOSE_GROUP_SIZE) + .ok_or_else(|| NetworkError::InvalidCloseGroupSize)?, + ); let (network, net_event_recv, driver) = self.build( kad_cfg, @@ -421,10 +415,7 @@ impl NetworkBuilder { req_res_protocol: ProtocolSupport, identify_version: String, #[cfg(feature = "upnp")] upnp: bool, - ) -> std::result::Result< - (Network, mpsc::Receiver, SwarmDriver), - MdnsBuildBehaviourError, - > { + ) -> Result<(Network, mpsc::Receiver, SwarmDriver)> { let peer_id = PeerId::from(self.keypair.public()); // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): #[cfg(not(target_arch = "wasm32"))] @@ -466,7 +457,10 @@ impl NetworkBuilder { }; let (network_event_sender, network_event_receiver) = mpsc::channel(NETWORKING_CHANNEL_SIZE); - let (swarm_cmd_sender, swarm_cmd_receiver) = mpsc::channel(NETWORKING_CHANNEL_SIZE); + let (network_swarm_cmd_sender, network_swarm_cmd_receiver) = + mpsc::channel(NETWORKING_CHANNEL_SIZE); + let (local_swarm_cmd_sender, local_swarm_cmd_receiver) = + mpsc::channel(NETWORKING_CHANNEL_SIZE); // Kademlia Behaviour let kademlia = { @@ -476,7 +470,7 @@ impl NetworkBuilder { peer_id, store_cfg, network_event_sender.clone(), - swarm_cmd_sender.clone(), + local_swarm_cmd_sender.clone(), ); #[cfg(feature = "open-metrics")] let mut node_record_store = node_record_store; @@ -613,7 +607,8 @@ impl NetworkBuilder { replication_fetcher, #[cfg(feature = "open-metrics")] network_metrics, - cmd_receiver: swarm_cmd_receiver, + network_cmd_receiver: network_swarm_cmd_receiver, + local_cmd_receiver: local_swarm_cmd_receiver, event_sender: network_event_sender, pending_get_closest_peers: Default::default(), pending_requests: Default::default(), @@ -632,7 +627,13 @@ impl NetworkBuilder { replication_targets: Default::default(), }; - let network = Network::new(swarm_cmd_sender, peer_id, self.root_dir, self.keypair); + let network = Network::new( + network_swarm_cmd_sender, + local_swarm_cmd_sender, + peer_id, + self.root_dir, + self.keypair, + ); Ok((network, network_event_receiver, swarm_driver)) } @@ -655,7 +656,8 @@ pub struct SwarmDriver { #[cfg(feature = "open-metrics")] pub(crate) network_metrics: Option, - cmd_receiver: mpsc::Receiver, + local_cmd_receiver: mpsc::Receiver, + network_cmd_receiver: mpsc::Receiver, event_sender: mpsc::Sender, // Use `self.send_event()` to send a NetworkEvent. /// Trackers for underlying behaviour related events @@ -696,24 +698,44 @@ impl SwarmDriver { loop { tokio::select! { - swarm_event = self.swarm.select_next_some() => { - // logging for handling events happens inside handle_swarm_events - // otherwise we're rewriting match statements etc around this anwyay - if let Err(err) = self.handle_swarm_events(swarm_event) { - warn!("Error while handling swarm event: {err}"); - } + // polls futures in order they appear here (as opposed to random) + biased; + + // Prioritise any local cmds pending. + // https://github.com/libp2p/rust-libp2p/blob/master/docs/coding-guidelines.md#prioritize-local-work-over-new-work-from-a-remote + local_cmd = self.local_cmd_receiver.recv() => match local_cmd { + Some(cmd) => { + let start = Instant::now(); + let cmd_string = format!("{cmd:?}"); + if let Err(err) = self.handle_local_cmd(cmd) { + warn!("Error while handling local cmd: {err}"); + } + trace!("LocalCmd handled in {:?}: {cmd_string:?}", start.elapsed()); + }, + None => continue, }, - some_cmd = self.cmd_receiver.recv() => match some_cmd { + // next check if we have locally generated network cmds + some_cmd = self.network_cmd_receiver.recv() => match some_cmd { Some(cmd) => { let start = Instant::now(); let cmd_string = format!("{cmd:?}"); - if let Err(err) = self.handle_cmd(cmd) { + if let Err(err) = self.handle_network_cmd(cmd) { warn!("Error while handling cmd: {err}"); } trace!("SwarmCmd handled in {:?}: {cmd_string:?}", start.elapsed()); }, None => continue, }, + // next take and react to external swarm events + swarm_event = self.swarm.select_next_some() => { + // logging for handling events happens inside handle_swarm_events + // otherwise we're rewriting match statements etc around this anwyay + if let Err(err) = self.handle_swarm_events(swarm_event) { + warn!("Error while handling swarm event: {err}"); + } + }, + // thereafter we can check our intervals + // runs every bootstrap_interval time _ = bootstrap_interval.tick() => { if let Some(new_interval) = self.run_bootstrap_continuously(bootstrap_interval.period()).await { @@ -875,7 +897,3 @@ impl SwarmDriver { Ok(()) } } - -#[derive(Debug, thiserror::Error)] -#[error("building the mDNS behaviour failed: {0}")] -pub struct MdnsBuildBehaviourError(#[from] std::io::Error); diff --git a/sn_networking/src/error.rs b/sn_networking/src/error.rs index 5b413547ce..de5cb56c3f 100644 --- a/sn_networking/src/error.rs +++ b/sn_networking/src/error.rs @@ -24,8 +24,6 @@ use thiserror::Error; use tokio::sync::oneshot; use xor_name::XorName; -use crate::driver::MdnsBuildBehaviourError; - pub(super) type Result = std::result::Result; /// GetRecord Query errors @@ -140,7 +138,7 @@ pub enum NetworkError { // ---------- Spend Errors #[error("Spend not found: {0:?}")] NoSpendFoundInsideRecord(SpendAddress), - #[error("Double spend(s) was detected. The signed spends are: {0:?}")] + #[error("Double spend(s) attempt was detected. The signed spends are: {0:?}")] DoubleSpendAttempt(Vec), // ---------- Store Error @@ -157,6 +155,9 @@ pub enum NetworkError { #[error("Could not get enough peers ({required}) to satisfy the request, found {found}")] NotEnoughPeers { found: usize, required: usize }, + #[error("Close group size must be a non-zero usize")] + InvalidCloseGroupSize, + #[error("Node Listen Address was not provided during construction")] ListenAddressNotProvided, @@ -184,7 +185,7 @@ pub enum NetworkError { OutgoingResponseDropped(Response), #[error("Error setting up behaviour: {0}")] - MdnsBuildBehaviourError(#[from] MdnsBuildBehaviourError), + BahviourErr(String), } #[cfg(test)] diff --git a/sn_networking/src/event/swarm.rs b/sn_networking/src/event/swarm.rs index 00f8f2cba9..9db2195ece 100644 --- a/sn_networking/src/event/swarm.rs +++ b/sn_networking/src/event/swarm.rs @@ -7,7 +7,7 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{ - cmd::SwarmCmd, + cmd::LocalSwarmCmd, event::NodeEvent, multiaddr_is_global, multiaddr_strip_p2p, relay_manager::is_a_relayed_peer, @@ -503,7 +503,7 @@ impl SwarmDriver { { self.update_on_peer_removal(*dead_peer.node.key.preimage()); - self.handle_cmd(SwarmCmd::RecordNodeIssue { + self.handle_local_cmd(LocalSwarmCmd::RecordNodeIssue { peer_id: failed_peer_id, issue: crate::NodeIssue::ConnectionIssue, })?; diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 1d3c10f70c..1c92230546 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -31,6 +31,7 @@ mod transfers; mod transport; pub mod version; +use cmd::LocalSwarmCmd; // re-export arch dependent deps for use in the crate, or above pub use target_arch::{interval, sleep, spawn, Instant, Interval}; @@ -45,13 +46,14 @@ pub use self::{ transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record}, }; -use self::{cmd::SwarmCmd, error::Result}; +use self::{cmd::NetworkSwarmCmd, error::Result}; use backoff::{Error as BackoffError, ExponentialBackoff}; use futures::future::select_all; use libp2p::{ identity::Keypair, kad::{KBucketDistance, KBucketKey, Quorum, Record, RecordKey}, multiaddr::Protocol, + request_response::OutboundFailure, Multiaddr, PeerId, }; use rand::Rng; @@ -161,7 +163,8 @@ pub struct Network { /// The actual implementation of the Network. The other is just a wrapper around this, so that we don't expose /// the Arc from the interface. struct NetworkInner { - swarm_cmd_sender: mpsc::Sender, + network_swarm_cmd_sender: mpsc::Sender, + local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, root_dir_path: PathBuf, keypair: Keypair, @@ -169,14 +172,16 @@ struct NetworkInner { impl Network { pub fn new( - swarm_cmd_sender: mpsc::Sender, + network_swarm_cmd_sender: mpsc::Sender, + local_swarm_cmd_sender: mpsc::Sender, peer_id: PeerId, root_dir_path: PathBuf, keypair: Keypair, ) -> Self { Self { inner: Arc::new(NetworkInner { - swarm_cmd_sender, + network_swarm_cmd_sender, + local_swarm_cmd_sender, peer_id, root_dir_path, keypair, @@ -199,9 +204,13 @@ impl Network { &self.inner.root_dir_path } - /// Get the sender to send a `SwarmCmd` to the underlying `Swarm`. - pub(crate) fn swarm_cmd_sender(&self) -> &mpsc::Sender { - &self.inner.swarm_cmd_sender + /// Get the sender to send a `NetworkSwarmCmd` to the underlying `Swarm`. + pub(crate) fn network_swarm_cmd_sender(&self) -> &mpsc::Sender { + &self.inner.network_swarm_cmd_sender + } + /// Get the sender to send a `LocalSwarmCmd` to the underlying `Swarm`. + pub(crate) fn local_swarm_cmd_sender(&self) -> &mpsc::Sender { + &self.inner.local_swarm_cmd_sender } /// Signs the given data with the node's keypair. @@ -223,7 +232,7 @@ impl Network { /// This function will only be called for the bootstrap nodes. pub async fn dial(&self, addr: Multiaddr) -> Result<()> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::Dial { addr, sender }); + self.send_network_swarm_cmd(NetworkSwarmCmd::Dial { addr, sender }); receiver.await? } @@ -245,7 +254,7 @@ impl Network { /// Does not include self pub async fn get_kbuckets(&self) -> Result>> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetKBuckets { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetKBuckets { sender }); receiver .await .map_err(|_e| NetworkError::InternalMsgChannelDropped) @@ -255,7 +264,7 @@ impl Network { /// Also contains our own PeerId. pub async fn get_closest_k_value_local_peers(&self) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetClosestKLocalPeers { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetClosestKLocalPeers { sender }); receiver .await @@ -435,7 +444,7 @@ impl Network { let pretty_key = PrettyPrintRecordKey::from(&key); info!("Getting record from network of {pretty_key:?}. with cfg {cfg:?}",); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetNetworkRecord { + self.send_network_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { key: key.clone(), sender, cfg: cfg.clone(), @@ -471,7 +480,7 @@ impl Network { let pretty_key = PrettyPrintRecordKey::from(&key); info!("Getting record from network of {pretty_key:?}. with cfg {cfg:?}",); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetNetworkRecord { + self.send_network_swarm_cmd(NetworkSwarmCmd::GetNetworkRecord { key: key.clone(), sender, cfg: cfg.clone(), @@ -530,7 +539,7 @@ impl Network { key: RecordKey, ) -> Result<(NanoTokens, QuotingMetrics)> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetLocalStoreCost { key, sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalStoreCost { key, sender }); receiver .await @@ -539,13 +548,13 @@ impl Network { /// Notify the node receicced a payment. pub fn notify_payment_received(&self) { - self.send_swarm_cmd(SwarmCmd::PaymentReceived); + self.send_local_swarm_cmd(LocalSwarmCmd::PaymentReceived); } /// Get `Record` from the local RecordStore pub async fn get_local_record(&self, key: &RecordKey) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetLocalRecord { + self.send_local_swarm_cmd(LocalSwarmCmd::GetLocalRecord { key: key.clone(), sender, }); @@ -558,7 +567,7 @@ impl Network { /// Whether the target peer is considered blacklisted by self pub async fn is_peer_shunned(&self, target: NetworkAddress) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::IsPeerShunned { target, sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::IsPeerShunned { target, sender }); receiver .await @@ -571,6 +580,8 @@ impl Network { pub async fn put_record(&self, record: Record, cfg: &PutRecordCfg) -> Result<()> { let pretty_key = PrettyPrintRecordKey::from(&record.key); + // Here we only retry after a failed validation. + // So a long validation time will limit the number of PUT retries we attempt here. let retry_duration = cfg.retry_strategy.map(|strategy| strategy.get_duration()); backoff::future::retry( ExponentialBackoff { @@ -609,14 +620,14 @@ impl Network { // Waiting for a response to avoid flushing to network too quick that causing choke let (sender, receiver) = oneshot::channel(); if let Some(put_record_to_peers) = &cfg.use_put_record_to { - self.send_swarm_cmd(SwarmCmd::PutRecordTo { + self.send_network_swarm_cmd(NetworkSwarmCmd::PutRecordTo { peers: put_record_to_peers.clone(), record: record.clone(), sender, quorum: cfg.put_quorum, }); } else { - self.send_swarm_cmd(SwarmCmd::PutRecord { + self.send_network_swarm_cmd(NetworkSwarmCmd::PutRecord { record: record.clone(), sender, quorum: cfg.put_quorum, @@ -676,8 +687,8 @@ impl Network { /// Notify ReplicationFetch a fetch attempt is completed. /// (but it won't trigger any real writes to disk, say fetched an old version of register) - pub fn notify_fetch_completed(&self, key: RecordKey) { - self.send_swarm_cmd(SwarmCmd::FetchCompleted(key)) + pub fn notify_fetch_completed(&self, key: RecordKey, record_type: RecordType) { + self.send_local_swarm_cmd(LocalSwarmCmd::FetchCompleted((key, record_type))) } /// Put `Record` to the local RecordStore @@ -688,13 +699,13 @@ impl Network { PrettyPrintRecordKey::from(&record.key), record.value.len() ); - self.send_swarm_cmd(SwarmCmd::PutLocalRecord { record }) + self.send_local_swarm_cmd(LocalSwarmCmd::PutLocalRecord { record }) } /// Returns true if a RecordKey is present locally in the RecordStore pub async fn is_record_key_present_locally(&self, key: &RecordKey) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::RecordStoreHasKey { + self.send_local_swarm_cmd(LocalSwarmCmd::RecordStoreHasKey { key: key.clone(), sender, }); @@ -709,7 +720,7 @@ impl Network { &self, ) -> Result> { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetAllLocalRecordAddresses { sender }); + self.send_local_swarm_cmd(LocalSwarmCmd::GetAllLocalRecordAddresses { sender }); receiver .await @@ -720,55 +731,90 @@ impl Network { /// then the `Request` is forwarded to itself and handled, and a corresponding `Response` is created /// and returned to itself. Hence the flow remains the same and there is no branching at the upper /// layers. + /// + /// If an outbound issue is raised, we retry once more to send the request before returning an error. pub async fn send_request(&self, req: Request, peer: PeerId) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::SendRequest { - req, + self.send_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req: req.clone(), peer, sender: Some(sender), }); - receiver.await? + let mut r = receiver.await?; + + if let Err(error) = &r { + error!("Error in response: {:?}", error); + + match error { + NetworkError::OutboundError(OutboundFailure::Io(_)) + | NetworkError::OutboundError(OutboundFailure::ConnectionClosed) => { + warn!( + "Outbound failed for {req:?} .. {error:?}, redialing once and reattempting" + ); + let (sender, receiver) = oneshot::channel(); + + debug!("Reattempting to send_request {req:?} to {peer:?}"); + self.send_network_swarm_cmd(NetworkSwarmCmd::SendRequest { + req, + peer, + sender: Some(sender), + }); + + r = receiver.await?; + } + _ => { + // If the record is found, we should log the error and continue + warn!("Error in response: {:?}", error); + } + } + } + + r } /// Send `Request` to the given `PeerId` and do _not_ await a response here. /// Instead the Response will be handled by the common `response_handler` pub fn send_req_ignore_reply(&self, req: Request, peer: PeerId) { - let swarm_cmd = SwarmCmd::SendRequest { + let swarm_cmd = NetworkSwarmCmd::SendRequest { req, peer, sender: None, }; - self.send_swarm_cmd(swarm_cmd) + self.send_network_swarm_cmd(swarm_cmd) } /// Send a `Response` through the channel opened by the requester. pub fn send_response(&self, resp: Response, channel: MsgResponder) { - self.send_swarm_cmd(SwarmCmd::SendResponse { resp, channel }) + self.send_network_swarm_cmd(NetworkSwarmCmd::SendResponse { resp, channel }) } /// Return a `SwarmLocalState` with some information obtained from swarm's local state. pub async fn get_swarm_local_state(&self) -> Result { let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetSwarmLocalState(sender)); + self.send_local_swarm_cmd(LocalSwarmCmd::GetSwarmLocalState(sender)); let state = receiver.await?; Ok(state) } pub fn trigger_interval_replication(&self) { - self.send_swarm_cmd(SwarmCmd::TriggerIntervalReplication) + self.send_local_swarm_cmd(LocalSwarmCmd::TriggerIntervalReplication) } pub fn record_node_issues(&self, peer_id: PeerId, issue: NodeIssue) { - self.send_swarm_cmd(SwarmCmd::RecordNodeIssue { peer_id, issue }); + self.send_local_swarm_cmd(LocalSwarmCmd::RecordNodeIssue { peer_id, issue }); } pub fn historical_verify_quotes(&self, quotes: Vec<(PeerId, PaymentQuote)>) { - self.send_swarm_cmd(SwarmCmd::QuoteVerification { quotes }); + self.send_local_swarm_cmd(LocalSwarmCmd::QuoteVerification { quotes }); } - // Helper to send SwarmCmd - fn send_swarm_cmd(&self, cmd: SwarmCmd) { - send_swarm_cmd(self.swarm_cmd_sender().clone(), cmd); + /// Helper to send NetworkSwarmCmd + fn send_network_swarm_cmd(&self, cmd: NetworkSwarmCmd) { + send_network_swarm_cmd(self.network_swarm_cmd_sender().clone(), cmd); + } + /// Helper to send LocalSwarmCmd + fn send_local_swarm_cmd(&self, cmd: LocalSwarmCmd) { + send_local_swarm_cmd(self.local_swarm_cmd_sender().clone(), cmd); } /// Returns the closest peers to the given `XorName`, sorted by their distance to the xor_name. @@ -780,7 +826,7 @@ impl Network { ) -> Result> { debug!("Getting the closest peers to {key:?}"); let (sender, receiver) = oneshot::channel(); - self.send_swarm_cmd(SwarmCmd::GetClosestPeersToAddressFromNetwork { + self.send_network_swarm_cmd(NetworkSwarmCmd::GetClosestPeersToAddressFromNetwork { key: key.clone(), sender, }); @@ -953,7 +999,28 @@ pub(crate) fn multiaddr_strip_p2p(multiaddr: &Multiaddr) -> Multiaddr { } } -pub(crate) fn send_swarm_cmd(swarm_cmd_sender: Sender, cmd: SwarmCmd) { +pub(crate) fn send_local_swarm_cmd(swarm_cmd_sender: Sender, cmd: LocalSwarmCmd) { + let capacity = swarm_cmd_sender.capacity(); + + if capacity == 0 { + error!( + "SwarmCmd channel is full. Await capacity to send: {:?}", + cmd + ); + } + + // Spawn a task to send the SwarmCmd and keep this fn sync + let _handle = spawn(async move { + if let Err(error) = swarm_cmd_sender.send(cmd).await { + error!("Failed to send SwarmCmd: {}", error); + } + }); +} + +pub(crate) fn send_network_swarm_cmd( + swarm_cmd_sender: Sender, + cmd: NetworkSwarmCmd, +) { let capacity = swarm_cmd_sender.capacity(); if capacity == 0 { diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index beae192828..7939ce7e25 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -7,10 +7,11 @@ // permissions and limitations relating to use of the SAFE Network Software. #![allow(clippy::mutable_key_type)] // for the Bytes in NetworkAddress +use crate::cmd::LocalSwarmCmd; use crate::driver::MAX_PACKET_SIZE; use crate::target_arch::{spawn, Instant}; -use crate::CLOSE_GROUP_SIZE; -use crate::{cmd::SwarmCmd, event::NetworkEvent, log_markers::Marker, send_swarm_cmd}; +use crate::{event::NetworkEvent, log_markers::Marker}; +use crate::{send_local_swarm_cmd, CLOSE_GROUP_SIZE}; use aes_gcm_siv::{ aead::{Aead, KeyInit, OsRng}, Aes256GcmSiv, Nonce, @@ -76,7 +77,7 @@ pub struct NodeRecordStore { /// Send network events to the node layer. network_event_sender: mpsc::Sender, /// Send cmds to the network layer. Used to interact with self in an async fashion. - swarm_cmd_sender: mpsc::Sender, + local_swarm_cmd_sender: mpsc::Sender, /// ilog2 distance range of responsible records /// AKA: how many buckets of data do we consider "close" /// None means accept all records. @@ -248,7 +249,7 @@ impl NodeRecordStore { local_id: PeerId, config: NodeRecordStoreConfig, network_event_sender: mpsc::Sender, - swarm_cmd_sender: mpsc::Sender, + swarm_cmd_sender: mpsc::Sender, ) -> Self { let key = Aes256GcmSiv::generate_key(&mut OsRng); let cipher = Aes256GcmSiv::new(&key); @@ -280,7 +281,7 @@ impl NodeRecordStore { records, records_cache: VecDeque::with_capacity(cache_size), network_event_sender, - swarm_cmd_sender, + local_swarm_cmd_sender: swarm_cmd_sender, responsible_distance_range: None, #[cfg(feature = "open-metrics")] record_count_metric: None, @@ -541,7 +542,7 @@ impl NodeRecordStore { } let encryption_details = self.encryption_details.clone(); - let cloned_cmd_sender = self.swarm_cmd_sender.clone(); + let cloned_cmd_sender = self.local_swarm_cmd_sender.clone(); spawn(async move { let key = r.key.clone(); if let Some(bytes) = Self::prepare_record_bytes(r, encryption_details) { @@ -550,17 +551,17 @@ impl NodeRecordStore { // vdash metric (if modified please notify at https://github.com/happybeing/vdash/issues): info!("Wrote record {record_key:?} to disk! filename: {filename}"); - SwarmCmd::AddLocalRecordAsStored { key, record_type } + LocalSwarmCmd::AddLocalRecordAsStored { key, record_type } } Err(err) => { error!( "Error writing record {record_key:?} filename: {filename}, error: {err:?}" ); - SwarmCmd::RemoveFailedLocalRecord { key } + LocalSwarmCmd::RemoveFailedLocalRecord { key } } }; - send_swarm_cmd(cloned_cmd_sender, cmd); + send_local_swarm_cmd(cloned_cmd_sender, cmd); } }); diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 8aedbc525c..1b90ac9a53 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -223,10 +223,23 @@ impl ReplicationFetcher { pub(crate) fn notify_fetch_early_completed( &mut self, key_in: RecordKey, + record_type: RecordType, ) -> Vec<(PeerId, RecordKey)> { - self.to_be_fetched.retain(|(key, _t, _), _| key != &key_in); + self.to_be_fetched.retain(|(key, current_type, _), _| { + if current_type == &record_type { + key != &key_in + } else { + true + } + }); - self.on_going_fetches.retain(|(key, _t), _| key != &key_in); + self.on_going_fetches.retain(|(key, current_type), _| { + if current_type == &record_type { + key != &key_in + } else { + true + } + }); self.next_keys_to_fetch() } diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index 0cc94cc3ac..0d7d526f36 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -2,7 +2,7 @@ authors = ["MaidSafe Developers "] description = "Safe Node" name = "sn_node" -version = "0.109.0" +version = "0.110.0" edition = "2021" license = "GPL-3.0" homepage = "https://maidsafe.net" @@ -50,14 +50,14 @@ rmp-serde = "1.1.1" rayon = "1.8.0" self_encryption = "~0.29.0" serde = { version = "1.0.133", features = ["derive", "rc"] } -sn_build_info = { path = "../sn_build_info", version = "0.1.9" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_networking = { path = "../sn_networking", version = "0.17.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5" } -sn_registers = { path = "../sn_registers", version = "0.3.15" } -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } -sn_service_management = { path = "../sn_service_management", version = "0.3.8" } +sn_build_info = { path = "../sn_build_info", version = "0.1.10" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_networking = { path = "../sn_networking", version = "0.17.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6" } +sn_registers = { path = "../sn_registers", version = "0.3.16" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_service_management = { path = "../sn_service_management", version = "0.3.9" } thiserror = "1.0.23" tokio = { version = "1.32.0", features = [ "io-util", @@ -84,11 +84,11 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.108.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5", features = [ +sn_client = { path = "../sn_client", version = "0.109.0" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.8", features = [ +sn_transfers = { path = "../sn_transfers", version = "0.18.9", features = [ "test-utils", ] } tempfile = "3.6.0" diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 64635bf18b..602312f443 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -30,11 +30,6 @@ impl Node { pub(crate) async fn validate_and_store_record(&self, record: Record) -> Result<()> { let record_header = RecordHeader::from_record(&record)?; - // Notify replication_fetcher to mark the attempt as completed. - // Send the notification earlier to avoid it got skipped due to: - // the record becomes stored during the fetch because of other interleaved process. - self.network().notify_fetch_completed(record.key.clone()); - match record_header.kind { RecordKind::ChunkWithPayment => { let record_key = record.key.clone(); @@ -56,6 +51,13 @@ impl Node { // we eagery retry replicaiton as it seems like other nodes are having trouble // did not manage to get this chunk as yet self.replicate_valid_fresh_record(record_key, RecordType::Chunk); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network() + .notify_fetch_completed(record.key.clone(), RecordType::Chunk); + debug!( "Chunk with addr {:?} already exists: {already_exists}, payment extracted.", chunk.network_address() @@ -75,6 +77,12 @@ impl Node { Marker::ValidPaidChunkPutFromClient(&PrettyPrintRecordKey::from(&record.key)) .log(); self.replicate_valid_fresh_record(record_key, RecordType::Chunk); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network() + .notify_fetch_completed(record.key.clone(), RecordType::Chunk); } store_chunk_result @@ -99,6 +107,14 @@ impl Node { record_key, RecordType::NonChunk(content_hash), ); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network().notify_fetch_completed( + record.key.clone(), + RecordType::NonChunk(content_hash), + ); } result } @@ -122,9 +138,22 @@ impl Node { let result = self.validate_and_store_register(register, true).await; if result.is_ok() { + debug!("Successfully stored register update at {pretty_key:?}"); Marker::ValidPaidRegisterPutFromClient(&pretty_key).log(); // we dont try and force replicaiton here as there's state to be kept in sync // which we leave up to the client to enforce + + let content_hash = XorName::from_content(&record.value); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network().notify_fetch_completed( + record.key.clone(), + RecordType::NonChunk(content_hash), + ); + } else { + warn!("Failed to store register update at {pretty_key:?}"); } result } @@ -161,7 +190,19 @@ impl Node { } } - self.validate_and_store_register(register, true).await + let res = self.validate_and_store_register(register, true).await; + if res.is_ok() { + let content_hash = XorName::from_content(&record.value); + + // Notify replication_fetcher to mark the attempt as completed. + // Send the notification earlier to avoid it got skipped due to: + // the record becomes stored during the fetch because of other interleaved process. + self.network().notify_fetch_completed( + record.key.clone(), + RecordType::NonChunk(content_hash), + ); + } + res } } } @@ -298,10 +339,12 @@ impl Node { // check register and merge if needed let updated_register = match self.register_validation(®ister, present_locally).await? { - Some(reg) => reg, + Some(reg) => { + debug!("Register needed to be updated"); + reg + } None => { - // Notify replication_fetcher to mark the attempt as completed. - self.network().notify_fetch_completed(key.clone()); + debug!("No update needed for register"); return Ok(()); } }; diff --git a/sn_node/tests/double_spend.rs b/sn_node/tests/double_spend.rs index 3f6296c490..ce1e9515e5 100644 --- a/sn_node/tests/double_spend.rs +++ b/sn_node/tests/double_spend.rs @@ -11,7 +11,7 @@ mod common; use assert_fs::TempDir; use assert_matches::assert_matches; use common::client::{get_client_and_funded_wallet, get_wallet}; -use eyre::Result; +use eyre::{bail, Result}; use itertools::Itertools; use sn_logging::LogBuilder; use sn_networking::NetworkError; @@ -77,15 +77,18 @@ async fn cash_note_transfer_double_spend_fail() -> Result<()> { let cash_notes_for_2: Vec<_> = transfer_to_2.cash_notes_for_recipient.clone(); let cash_notes_for_3: Vec<_> = transfer_to_3.cash_notes_for_recipient.clone(); + // we wait 5s to ensure that the double spend attempt is detected and accumulated + tokio::time::sleep(Duration::from_secs(5)).await; + let should_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; let should_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; info!("Both should fail during GET record accumulation : {should_err1:?} {should_err2:?}"); assert!(should_err1.is_err() && should_err2.is_err()); assert_matches!(should_err1, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); assert_matches!(should_err2, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); Ok(()) @@ -239,11 +242,17 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { client .send_spends(transfer_to_3.all_spend_requests.iter(), false) .await?; - info!("Verifying the transfers from 1 -> 3 wallet... It should error out."); + let cash_notes_for_3: Vec<_> = transfer_to_3.cash_notes_for_recipient.clone(); - assert!(client.verify_cashnote(&cash_notes_for_3[0]).await.is_err()); // the old spend has been poisoned - info!("Verifying the original transfers from 1 -> 2 wallet... It should error out."); - assert!(client.verify_cashnote(&cash_notes_for_2[0]).await.is_err()); // the old spend has been poisoned + + info!("Verifying the transfers from 1 -> 3 wallet aand 1-> 2... One should error out."); + let for3_failed = client.verify_cashnote(&cash_notes_for_3[0]).await.is_err(); + let for2_failed = client.verify_cashnote(&cash_notes_for_2[0]).await.is_err(); + // Both cannot pass + assert!( + for2_failed || for3_failed, + "one transaction must be invalid" + ); // the old spend has been poisoned // The old spend has been poisoned, but spends from 22 -> 222 should still work let wallet_dir_222 = TempDir::new()?; @@ -266,10 +275,36 @@ async fn poisoning_old_spend_should_not_affect_descendant() -> Result<()> { client .send_spends(transfer_to_222.all_spend_requests.iter(), false) .await?; + info!("Verifying the transfers from 22 -> 222 wallet..."); let cash_notes_for_222: Vec<_> = transfer_to_222.cash_notes_for_recipient.clone(); client.verify_cashnote(&cash_notes_for_222[0]).await?; + // finally assert that we have a double spend attempt error here + // we wait 1s to ensure that the double spend attempt is detected and accumulated + tokio::time::sleep(Duration::from_secs(5)).await; + + match client.verify_cashnote(&cash_notes_for_2[0]).await { + Ok(_) => bail!("Cashnote verification should have failed"), + Err(e) => { + assert!( + e.to_string() + .contains("Network Error Double spend(s) attempt was detected"), + "error should reflect double spend attempt", + ); + } + } + + match client.verify_cashnote(&cash_notes_for_3[0]).await { + Ok(_) => bail!("Cashnote verification should have failed"), + Err(e) => { + assert!( + e.to_string() + .contains("Network Error Double spend(s) attempt was detected"), + "error should reflect double spend attempt", + ); + } + } Ok(()) } @@ -365,10 +400,13 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() .await?; info!("Verifying the transfers from A -> X wallet... It should error out."); let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); + let result = client.verify_cashnote(&cash_notes_for_x[0]).await; info!("Got result while verifying double spend from A -> X: {result:?}"); + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) attempt was detected"); + assert!(spend_did_not_happen); }); // poisoned // Try to double spend from B -> Y @@ -401,25 +439,43 @@ async fn parent_and_child_double_spends_should_lead_to_cashnote_being_invalid() info!("Verifying the transfers from B -> Y wallet... It should error out."); let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone(); + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; info!("Got result while verifying double spend from B -> Y: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) attempt was detected"); + assert!(spend_did_not_happen); }); info!("Verifying the original cashnote of A -> B"); + let result = client.verify_cashnote(&cash_notes_for_b[0]).await; info!("Got result while verifying the original spend from A -> B: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + + let spend_did_not_happen = str.starts_with("The spends in network were not the same as the ones in the CashNote") || str.starts_with("Network Error Double spend(s) attempt was detected"); + assert!(spend_did_not_happen); }); println!("Verifying the original cashnote of B -> C"); + + // arbitrary time sleep to allow for network accumulation of double spend. + tokio::time::sleep(Duration::from_secs(15)).await; + let result = client.verify_cashnote(&cash_notes_for_c[0]).await; info!("Got result while verifying the original spend from B -> C: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); - }); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for c should show double spend attempt"); + }, "result should be verify error, it was {result:?}"); + + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); + }, "result should be verify error, it was {result:?}"); + let result = client.verify_cashnote(&cash_notes_for_b[0]).await; + assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "cashnote for y should show double spend attempt"); + }, "result should be verify error, it was {result:?}"); Ok(()) } @@ -522,10 +578,14 @@ async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { .await?; info!("Verifying the transfers from A -> X wallet... It should error out."); let cash_notes_for_x: Vec<_> = transfer_to_x.cash_notes_for_recipient.clone(); + + // sleep for a bit to allow the network to process and accumulate the double spend + tokio::time::sleep(Duration::from_secs(10)).await; + let result = client.verify_cashnote(&cash_notes_for_x[0]).await; info!("Got result while verifying double spend from A -> X: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected"), "non double spend error found: {str:?}"); }); // the original A should still be present as one of the double spends @@ -565,10 +625,14 @@ async fn spamming_double_spends_should_not_shadow_live_branch() -> Result<()> { .await?; info!("Verifying the transfers from A -> Y wallet... It should error out."); let cash_notes_for_y: Vec<_> = transfer_to_y.cash_notes_for_recipient.clone(); + + // sleep for a bit to allow the network to process and accumulate the double spend + tokio::time::sleep(Duration::from_millis(500)).await; + let result = client.verify_cashnote(&cash_notes_for_y[0]).await; info!("Got result while verifying double spend from A -> Y: {result:?}"); assert_matches!(result, Err(WalletError::CouldNotVerifyTransfer(str)) => { - assert!(str.starts_with("Network Error Double spend(s) was detected")); + assert!(str.starts_with("Network Error Double spend(s) attempt was detected")); }); // the original A should still be present as one of the double spends diff --git a/sn_node_manager/Cargo.toml b/sn_node_manager/Cargo.toml index b5635edc6b..becade3053 100644 --- a/sn_node_manager/Cargo.toml +++ b/sn_node_manager/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn-node-manager" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.10.0" +version = "0.10.1" [[bin]] name = "safenode-manager" @@ -44,12 +44,12 @@ semver = "1.0.20" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5" } -sn_service_management = { path = "../sn_service_management", version = "0.3.8" } +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6" } +sn_service_management = { path = "../sn_service_management", version = "0.3.9" } sn-releases = "0.2.6" -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.26", features = ["full"] } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 5f59246281..229941c7ec 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_node_rpc_client" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.6.25" +version = "0.6.26" [[bin]] name = "safenode_rpc_client" @@ -23,13 +23,13 @@ color-eyre = "0.6.2" hex = "~0.4.3" libp2p = { version="0.53", features = ["kad"]} libp2p-identity = { version="0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", version = "0.108.0" } -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_node = { path = "../sn_node", version = "0.109.0" } -sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.0" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5", features=["rpc"] } -sn_service_management = { path = "../sn_service_management", version = "0.3.8" } -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_client = { path = "../sn_client", version = "0.109.0" } +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_node = { path = "../sn_node", version = "0.110.0" } +sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.4.1" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6", features=["rpc"] } +sn_service_management = { path = "../sn_service_management", version = "0.3.9" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } thiserror = "1.0.23" # # watch out updating this, protoc compiler needs to be installed on all build systems # # arm builds + musl are very problematic diff --git a/sn_peers_acquisition/Cargo.toml b/sn_peers_acquisition/Cargo.toml index 8f4d7180dd..5df30921a8 100644 --- a/sn_peers_acquisition/Cargo.toml +++ b/sn_peers_acquisition/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_peers_acquisition" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.0" +version = "0.4.1" [features] local-discovery = [] @@ -21,7 +21,7 @@ lazy_static = "~1.4.0" libp2p = { version="0.53", features = [] } rand = "0.8.5" reqwest = { version="0.12.2", default-features=false, features = ["rustls-tls"] } -sn_networking = { path = "../sn_networking", version = "0.17.0", optional = true} +sn_networking = { path = "../sn_networking", version = "0.17.1", optional = true} thiserror = "1.0.23" tokio = { version = "1.32.0", default-features = false} tracing = { version = "~0.1.26" } diff --git a/sn_protocol/Cargo.toml b/sn_protocol/Cargo.toml index 8a7ebe6ea5..c30e4081b5 100644 --- a/sn_protocol/Cargo.toml +++ b/sn_protocol/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_protocol" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.17.5" +version = "0.17.6" [features] default = [] @@ -27,8 +27,8 @@ rmp-serde = "1.1.1" serde = { version = "1.0.133", features = [ "derive", "rc" ]} serde_json = "1.0" sha2 = "0.10.7" -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } -sn_registers = { path = "../sn_registers", version = "0.3.15" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } +sn_registers = { path = "../sn_registers", version = "0.3.16" } thiserror = "1.0.23" tiny-keccak = { version = "~2.0.2", features = [ "sha3" ] } tracing = { version = "~0.1.26" } diff --git a/sn_registers/Cargo.toml b/sn_registers/Cargo.toml index d5d37195db..ea41bcd7c7 100644 --- a/sn_registers/Cargo.toml +++ b/sn_registers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_registers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.15" +version = "0.3.16" [features] test-utils = [] diff --git a/sn_service_management/Cargo.toml b/sn_service_management/Cargo.toml index 63b07ff84b..cb30be547f 100644 --- a/sn_service_management/Cargo.toml +++ b/sn_service_management/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "sn_service_management" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.3.8" +version = "0.3.9" [dependencies] async-trait = "0.1" @@ -19,11 +19,11 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" semver = "1.0.20" service-manager = "0.7.0" -sn_logging = { path = "../sn_logging", version = "0.2.30" } -sn_protocol = { path = "../sn_protocol", version = "0.17.5", features = [ +sn_logging = { path = "../sn_logging", version = "0.2.31" } +sn_protocol = { path = "../sn_protocol", version = "0.17.6", features = [ "rpc", ] } -sn_transfers = { path = "../sn_transfers", version = "0.18.8" } +sn_transfers = { path = "../sn_transfers", version = "0.18.9" } sysinfo = "0.30.12" thiserror = "1.0.23" tokio = { version = "1.32.0", features = ["time"] } diff --git a/sn_transfers/Cargo.toml b/sn_transfers/Cargo.toml index 75156b5920..c87b586660 100644 --- a/sn_transfers/Cargo.toml +++ b/sn_transfers/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "sn_transfers" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.18.8" +version = "0.18.9" [features] reward-forward = [] diff --git a/sn_transfers/src/wallet/wallet_file.rs b/sn_transfers/src/wallet/wallet_file.rs index a39c911507..58b4827663 100644 --- a/sn_transfers/src/wallet/wallet_file.rs +++ b/sn_transfers/src/wallet/wallet_file.rs @@ -69,7 +69,7 @@ pub(super) fn remove_unconfirmed_spend_requests( let spend_hex_name = spend.address().to_hex(); let spend_file_path = spends_dir.join(&spend_hex_name); debug!("Writing spend to: {spend_file_path:?}"); - fs::write(spend_file_path, &spend.to_bytes())?; + fs::write(spend_file_path, spend.to_bytes())?; } let unconfirmed_spend_requests_path = wallet_dir.join(UNCONFIRMED_TX_NAME); diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index ef5d45695f..17e006d306 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0" name = "test_utils" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.4.1" +version = "0.4.2" [dependencies] color-eyre = "~0.6.2" diff --git a/token_supplies/Cargo.toml b/token_supplies/Cargo.toml index 652d037787..7f182cd035 100644 --- a/token_supplies/Cargo.toml +++ b/token_supplies/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0" name = "token_supplies" readme = "README.md" repository = "https://github.com/maidsafe/safe_network" -version = "0.1.48" +version = "0.1.49" [dependencies]