From 537f22489a44ca3626be959b80ea36711bda27f8 Mon Sep 17 00:00:00 2001 From: Chris O'Neil <chriso83@protonmail.com> Date: Tue, 21 May 2024 21:17:45 +0100 Subject: [PATCH] test: enable node man integration tests These tests are enabled again, along with some changes to the setup. Here are the important points: * The tests now run against an isolated local network and binary built during CI is supplied. * The workflow runs the e2e tests for both system-wide and user-mode services. * The test definitions are moved to a new workflow file, but it has the same conditions as the merge workflow. The reason is just because the merge workflow file is already large and difficult to navigate. * The upgrade integration tests are removed because since the service management refactor, unit tests cover the upgrade scenarios well. What we are more concerned with in the upgrade process is the logic of how different upgrade scenarios are handled. * The daemon integration tests are removed. The test that was setup was really trying to cover the scenario where peer retention was specified for restart commands, but we have agreed that the semantics of this command is wrong and that it needs to be broken down. In general, the daemon commands will correspond to the operations of the node manager, and the node manager operations should already be quite well tested. * Some documentation. --- .github/workflows/merge.yml | 201 --------------------- .github/workflows/node_man_tests.yml | 156 ++++++++++++++++ Justfile | 16 +- sn_node_manager/README.md | 14 ++ sn_node_manager/Vagrantfile | 21 ++- sn_node_manager/tests/daemon.rs | 161 ----------------- sn_node_manager/tests/e2e.rs | 34 +++- sn_node_manager/tests/upgrades.rs | 257 --------------------------- 8 files changed, 229 insertions(+), 631 deletions(-) create mode 100644 .github/workflows/node_man_tests.yml delete mode 100644 sn_node_manager/tests/daemon.rs delete mode 100644 sn_node_manager/tests/upgrades.rs diff --git a/.github/workflows/merge.yml b/.github/workflows/merge.yml index 4f13ba91e1..fdc2345f13 100644 --- a/.github/workflows/merge.yml +++ b/.github/workflows/merge.yml @@ -141,207 +141,6 @@ jobs: # we do many more runs on the nightly run PROPTEST_CASES: 50 - node-manager-unit-tests: - name: node manager unit tests - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - - - name: cargo cache registry, index and build - uses: actions/cache@v4.0.2 - with: - path: | - ~/.cargo/registry - ~/.cargo/git - target - key: ${{ runner.os }}-cargo-cache-${{ hashFiles('**/Cargo.lock') }} - - shell: bash - run: cargo test --lib --package sn-node-manager - - # - # Temporarily disable node manager integration tests until they can be made more isolated. - # - # node-manager-e2e-tests: - # name: node manager e2e tests - # runs-on: ${{ matrix.os }} - # strategy: - # fail-fast: false - # matrix: - # include: - # - { os: ubuntu-latest, elevated: sudo env PATH="$PATH" } - # - { os: macos-latest, elevated: sudo } - # - { os: windows-latest } - # steps: - # - uses: actions/checkout@v4 - # - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 - # - # - shell: bash - # if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' - # run: | - # ${{ matrix.elevated }} rustup default stable - # ${{ matrix.elevated }} cargo test --package sn-node-manager --release --test e2e -- --nocapture - # - # # Powershell step runs as admin by default. - # - name: run integration test in powershell - # if: matrix.os == 'windows-latest' - # shell: pwsh - # run: | - # curl -L -o WinSW.exe $env:WINSW_URL - # - # New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" - # Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" - # $env:PATH += ";$env:GITHUB_WORKSPACE\bin" - # - # cargo test --release --package sn-node-manager --test e2e -- --nocapture - - # Each upgrade test needs its own VM, otherwise they will interfere with each other. - # node-manager-upgrade-tests: - # name: node manager upgrade tests - # runs-on: ${{ matrix.os }} - # strategy: - # fail-fast: false - # matrix: - # include: - # - { - # os: ubuntu-latest, - # elevated: sudo env PATH="$PATH", - # test: upgrade_to_latest_version, - # } - # - { - # os: ubuntu-latest, - # elevated: sudo env PATH="$PATH", - # test: force_upgrade_when_two_binaries_have_the_same_version, - # } - # - { - # os: ubuntu-latest, - # elevated: sudo env PATH="$PATH", - # test: force_downgrade_to_a_previous_version, - # } - # - { - # os: ubuntu-latest, - # elevated: sudo env PATH="$PATH", - # test: upgrade_from_older_version_to_specific_version, - # } - # - { - # os: macos-latest, - # elevated: sudo, - # test: upgrade_to_latest_version, - # } - # - { - # os: macos-latest, - # elevated: sudo, - # test: force_upgrade_when_two_binaries_have_the_same_version, - # } - # - { - # os: macos-latest, - # elevated: sudo, - # test: force_downgrade_to_a_previous_version, - # } - # - { - # os: macos-latest, - # elevated: sudo, - # test: upgrade_from_older_version_to_specific_version, - # } - # - { os: windows-latest, test: upgrade_to_latest_version } - # - { - # os: windows-latest, - # test: force_upgrade_when_two_binaries_have_the_same_version, - # } - # - { os: windows-latest, test: force_downgrade_to_a_previous_version } - # - { - # os: windows-latest, - # test: upgrade_from_older_version_to_specific_version, - # } - # steps: - # - uses: actions/checkout@v4 - # - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 - # - # - shell: bash - # if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' - # run: | - # ${{ matrix.elevated }} rustup default stable - # ${{ matrix.elevated }} cargo test --package sn-node-manager --release \ - # --test upgrades ${{ matrix.test }} -- --nocapture - # - # # Powershell step runs as admin by default. - # - name: run integration test in powershell - # if: matrix.os == 'windows-latest' - # shell: pwsh - # run: | - # curl -L -o WinSW.exe $env:WINSW_URL - # - # New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" - # Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" - # $env:PATH += ";$env:GITHUB_WORKSPACE\bin" - # - # cargo test --package sn-node-manager --release ` - # --test upgrades ${{ matrix.test }} -- --nocapture - # - # # Each daemon test needs its own VM, otherwise they will interfere with each other. - # node-manager-daemon-tests: - # name: node manager daemon tests - # runs-on: ${{ matrix.os }} - # strategy: - # fail-fast: false - # matrix: - # include: - # - { - # os: ubuntu-latest, - # elevated: sudo env PATH="$PATH", - # test: restart_node, - # } - # # todo: enable once url/version has been implemented for Daemon subcmd. - # # - { - # # os: macos-latest, - # # elevated: sudo, - # # test: restart_node, - # # } - # # - { - # # os: windows-latest, - # # test: restart_node, - # # } - # steps: - # - uses: actions/checkout@v4 - # - # - name: Install Rust - # uses: dtolnay/rust-toolchain@stable - # - uses: Swatinem/rust-cache@v2 - # - # - name: run integration test - # shell: bash - # if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' - # run: | - # ${{ matrix.elevated }} rustup default stable - # ${{ matrix.elevated }} cargo test --package sn-node-manager --release \ - # --test daemon ${{ matrix.test }} -- --nocapture - # - # # Powershell step runs as admin by default. - # - name: run integration test in powershell - # if: matrix.os == 'windows-latest' - # shell: pwsh - # run: | - # curl -L -o WinSW.exe $env:WINSW_URL - # - # New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" - # Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" - # $env:PATH += ";$env:GITHUB_WORKSPACE\bin" - # - # cargo test --package sn-node-manager --release ` - # --test daemon ${{ matrix.test }} -- --nocapture - e2e: if: "!startsWith(github.event.head_commit.message, 'chore(release):')" name: E2E tests diff --git a/.github/workflows/node_man_tests.yml b/.github/workflows/node_man_tests.yml new file mode 100644 index 0000000000..ea49a67372 --- /dev/null +++ b/.github/workflows/node_man_tests.yml @@ -0,0 +1,156 @@ +name: Node Manager Tests + +on: + merge_group: + branches: [main, alpha*, beta*, rc*] + pull_request: + branches: ["*"] + +env: + CARGO_INCREMENTAL: 0 # bookkeeping for incremental builds has overhead, not useful in CI. + WINSW_URL: https://github.com/winsw/winsw/releases/download/v3.0.0-alpha.11/WinSW-x64.exe + +jobs: + node-manager-unit-tests: + name: node manager unit tests + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + + - name: cargo cache registry, index and build + uses: actions/cache@v4.0.2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-cache-${{ hashFiles('**/Cargo.lock') }} + - shell: bash + run: cargo test --lib --package sn-node-manager + + node-manager-user-mode-e2e-tests: + name: user-mode e2e + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - { os: ubuntu-latest } + - { os: macos-latest } + steps: + - uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + + - name: Build binaries + run: cargo build --release --bin safenode --bin faucet + timeout-minutes: 30 + + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + with: + action: start + interval: 2000 + node-path: target/release/safenode + faucet-path: target/release/faucet + platform: ${{ matrix.os }} + build: true + + - name: Check SAFE_PEERS was set + shell: bash + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + fi + + - shell: bash + run: | + cargo test --package sn-node-manager --release --test e2e -- --nocapture + + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: node_man_tests_user_mode + platform: ${{ matrix.os }} + + node-manager-e2e-tests: + name: system-wide e2e + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - { os: ubuntu-latest, elevated: sudo -E env PATH="$PATH" } + - { os: macos-latest, elevated: sudo -E } + - { os: windows-latest } + steps: + - uses: actions/checkout@v4 + + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + + - name: Build binaries + run: cargo build --release --bin safenode --bin faucet + timeout-minutes: 30 + + - name: Start a local network + uses: maidsafe/sn-local-testnet-action@main + with: + action: start + interval: 2000 + node-path: target/release/safenode + faucet-path: target/release/faucet + platform: ${{ matrix.os }} + build: true + + - name: Check SAFE_PEERS was set + shell: bash + run: | + if [[ -z "$SAFE_PEERS" ]]; then + echo "The SAFE_PEERS variable has not been set" + exit 1 + else + echo "SAFE_PEERS has been set to $SAFE_PEERS" + fi + + - shell: bash + if: matrix.os == 'ubuntu-latest' || matrix.os == 'macos-latest' + run: | + ${{ matrix.elevated }} rustup default stable + ${{ matrix.elevated }} cargo test --package sn-node-manager --release --test e2e -- --nocapture + + # Powershell step runs as admin by default. + - name: run integration test in powershell + if: matrix.os == 'windows-latest' + shell: pwsh + run: | + curl -L -o WinSW.exe $env:WINSW_URL + + New-Item -ItemType Directory -Force -Path "$env:GITHUB_WORKSPACE\bin" + Move-Item -Path WinSW.exe -Destination "$env:GITHUB_WORKSPACE\bin" + $env:PATH += ";$env:GITHUB_WORKSPACE\bin" + + cargo test --release --package sn-node-manager --test e2e -- --nocapture + + - name: Stop the local network and upload logs + if: always() + uses: maidsafe/sn-local-testnet-action@main + with: + action: stop + log_file_prefix: node_man_tests_system_wide + platform: ${{ matrix.os }} diff --git a/Justfile b/Justfile index ae70d54708..d03faf7784 100644 --- a/Justfile +++ b/Justfile @@ -360,5 +360,17 @@ upload-release-assets-to-s3 bin_name: cd deploy/{{bin_name}} for file in *.zip *.tar.gz; do - aws s3 cp "$file" "s3://$bucket/$file" --acl public-read - done + aws s3 cp "$file" "s3://$bucket/$file" --acl public-read done + +node-man-integration-tests: + #!/usr/bin/env bash + set -e + + cargo build --release --bin safenode --bin faucet --bin safenode-manager + cargo run --release --bin safenode-manager -- local run \ + --node-path target/release/safenode \ + --faucet-path target/release/faucet + peer=$(cargo run --release --bin safenode-manager -- local status \ + --json | jq -r .nodes[-1].listen_addr[0]) + export SAFE_PEERS=$peer + cargo test --release --package sn-node-manager --test e2e -- --nocapture diff --git a/sn_node_manager/README.md b/sn_node_manager/README.md index d71d72db0b..eea17b05c4 100644 --- a/sn_node_manager/README.md +++ b/sn_node_manager/README.md @@ -358,3 +358,17 @@ So by default, 25 node processes have been launched, along with a faucet. The fa The most common scenario for using a local network is for development, but you can also use it to exercise a lot of features locally. For more details, please see the 'Using a Local Network' section of the [main README](https://github.com/maidsafe/safe_network/tree/node-man-readme?tab=readme-ov-file#using-a-local-network). Once you've finished, run `safenode-manager local kill` to dispose the local network. + +## Running Integration Tests + +Sometimes it will be necessary to run the integration tests in a local setup. The problem is, the system-wide tests need root access to run, and they will also create real services, which you don't necessarily want on your development machine. + +The tests can be run from a VM, which is provided by a `Vagrantfile` in the `sn_node_manager` crate directory. The machine is defined to use libvirt rather than Virtualbox, so an installation of that is required, but that is beyond the scope of this document. + +Assuming that you did have an installation of libvirt, you can get the VM by running `vagrant up`. Once the machine is available, run `vagrant ssh` to get a shell session inside it. For running the tests, switch to the root user using `sudo su -`. As part of the provisioning process, the current `safe_network` code is copied to the root user's home directory. To run the tests: +``` +cd safe_network +just node-man-integration-tests +``` + +The target in the `Justfile` will create a local network and the tests will then run against that. diff --git a/sn_node_manager/Vagrantfile b/sn_node_manager/Vagrantfile index deea4d0ff4..f64a3511ee 100644 --- a/sn_node_manager/Vagrantfile +++ b/sn_node_manager/Vagrantfile @@ -3,7 +3,7 @@ Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.memory = 4096 end - config.vm.synced_folder ".", + config.vm.synced_folder "..", "/vagrant", type: "9p", accessmode: "mapped", @@ -23,12 +23,26 @@ Vagrant.configure("2") do |config| echo "source ~/.cargo/env" >> ~/.bashrc SHELL config.vm.provision "shell", inline: <<-SHELL + curl -L -O https://github.com/casey/just/releases/download/1.25.2/just-1.25.2-x86_64-unknown-linux-musl.tar.gz + mkdir just + tar xvf just-1.25.2-x86_64-unknown-linux-musl.tar.gz -C just + rm just-1.25.2-x86_64-unknown-linux-musl.tar.gz + sudo mv just/just /usr/local/bin + rm -rf just + curl -L -O https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init chmod +x rustup-init ./rustup-init --default-toolchain stable --no-modify-path -y echo "source ~/.cargo/env" >> ~/.bashrc # Copy the binaries to a system-wide location for running tests as the root user sudo cp ~/.cargo/bin/** /usr/local/bin + sudo rsync -av \ + --exclude 'artifacts' \ + --exclude 'deploy' \ + --exclude 'target' \ + --exclude '.git' \ + --exclude '.vagrant' \ + /vagrant/ /root/safe_network SHELL config.vm.provision "shell", privileged: false, inline: <<-SHELL mkdir -p ~/.vim/tmp/ ~/.vim/backup @@ -65,5 +79,10 @@ set viminfo+=! nnoremap j gj nnoremap k gk EOF + cp ~/.vimrc /tmp/.vimrc + SHELL + config.vm.provision "shell", inline: <<-SHELL + mkdir -p /root/.vim/tmp/ /root/.vim/backup + cp /tmp/.vimrc /root/.vimrc SHELL end diff --git a/sn_node_manager/tests/daemon.rs b/sn_node_manager/tests/daemon.rs deleted file mode 100644 index be9ba27068..0000000000 --- a/sn_node_manager/tests/daemon.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod utils; - -use assert_cmd::Command; -use color_eyre::eyre::{bail, eyre, OptionExt, Result}; -use sn_node_manager::DAEMON_DEFAULT_PORT; -use sn_service_management::safenode_manager_proto::{ - safe_node_manager_client::SafeNodeManagerClient, NodeServiceRestartRequest, -}; -use std::{ - env, - io::Read, - net::{Ipv4Addr, SocketAddr}, - process::Stdio, - time::Duration, -}; -use tonic::Request; -use utils::get_service_status; - -/// These tests need to execute as the root user. -/// -/// They are intended to run on a CI-based environment with a fresh build agent because they will -/// create real services and user accounts, and will not attempt to clean themselves up. -/// -/// Each test also needs to run in isolation, otherwise they will interfere with each other. -/// -/// If you run them on your own dev machine, do so at your own risk! - -#[tokio::test] -async fn restart_node() -> Result<()> { - println!("Building safenodemand:"); - let mut cmd = std::process::Command::new("cargo") - .arg("build") - .arg("--release") - .arg("--bin") - .arg("safenodemand") - .stdout(Stdio::piped()) - .spawn()?; - let mut output = String::new(); - cmd.stdout - .as_mut() - .ok_or_else(|| eyre!("Failed to capture stdout"))? - .read_to_string(&mut output)?; - println!("{}", output); - - // It doesn't make any sense, but copying the `safenodemand` binary to another location seemed - // to be necessary before running `daemon add`, because it was just complaining about the file - // not existing. - let mut cwd = env::current_dir()?; - cwd.pop(); - let safenodemand_path = cwd.join("target").join("release").join("safenodemand"); - std::fs::copy(safenodemand_path, "/tmp/safenodemand")?; - - // 1. Preserve the PeerId - println!("Adding 3 safenode services..."); - let node_index_to_restart = 0; - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("add") - .arg("--user") - .arg("runner") - .arg("--count") - .arg("3") - .arg("--peer") - .arg("/ip4/127.0.0.1/udp/46091/p2p/12D3KooWAWnbQLxqspWeB3M8HB3ab3CSj6FYzsJxEG9XdVnGNCod") - .assert() - .success(); - - println!("Attempting to start 3 safenode services..."); - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("start").assert().success(); - - let status = get_service_status().await?; - let old_pid = status.nodes[node_index_to_restart] - .pid - .ok_or_eyre("PID should be present")?; - assert_eq!(status.nodes.len(), 3); - - println!("Attempting to add the safenodemand service..."); - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("daemon") - .arg("add") - .arg("--path") - .arg("/tmp/safenodemand") - .assert() - .success(); - - println!("Attempting to start the safenodemand service..."); - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("daemon").arg("start").assert().success(); - - // restart a node - let mut rpc_client = get_safenode_manager_rpc_client(SocketAddr::new( - std::net::IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - DAEMON_DEFAULT_PORT, - )) - .await?; - let node_to_restart = status.nodes[node_index_to_restart] - .peer_id - .ok_or_eyre("We should have PeerId")?; - - let _response = rpc_client - .restart_node_service(Request::new(NodeServiceRestartRequest { - peer_id: node_to_restart.to_bytes(), - delay_millis: 0, - retain_peer_id: true, - })) - .await?; - - // make sure that we still have just 3 services running and pid's are different - let status = get_service_status().await?; - assert_eq!(status.nodes.len(), 3); - let new_pid = status.nodes[node_index_to_restart] - .pid - .ok_or_eyre("PID should be present")?; - assert_ne!(old_pid, new_pid); - - // 2. Start as a fresh node - let _response = rpc_client - .restart_node_service(Request::new(NodeServiceRestartRequest { - peer_id: node_to_restart.to_bytes(), - delay_millis: 0, - retain_peer_id: false, - })) - .await?; - - // make sure that we still have an extra service, and the new one has the same rpc addr as the old one. - let status = get_service_status().await?; - assert_eq!(status.nodes.len(), 4); - let old_rpc_socket_addr = status.nodes[node_index_to_restart].rpc_socket_addr; - let new_rpc_socket_addr = status.nodes[3].rpc_socket_addr; - assert_eq!(old_rpc_socket_addr, new_rpc_socket_addr); - - Ok(()) -} - -// Connect to a RPC socket addr with retry -pub async fn get_safenode_manager_rpc_client( - socket_addr: SocketAddr, -) -> Result<SafeNodeManagerClient<tonic::transport::Channel>> { - // get the new PeerId for the current NodeIndex - let endpoint = format!("https://{socket_addr}"); - let mut attempts = 0; - loop { - if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { - break Ok(rpc_client); - } - attempts += 1; - println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - tokio::time::sleep(Duration::from_secs(1)).await; - if attempts >= 10 { - bail!("Failed to connect to {endpoint:?} even after 10 retries"); - } - } -} diff --git a/sn_node_manager/tests/e2e.rs b/sn_node_manager/tests/e2e.rs index 3394aef5fa..fd2973b8aa 100644 --- a/sn_node_manager/tests/e2e.rs +++ b/sn_node_manager/tests/e2e.rs @@ -9,13 +9,21 @@ use assert_cmd::Command; use libp2p_identity::PeerId; use sn_service_management::{ServiceStatus, StatusSummary}; +use std::path::PathBuf; /// These tests need to execute as the root user. /// /// They are intended to run on a CI-based environment with a fresh build agent because they will /// create real services and user accounts, and will not attempt to clean themselves up. /// -/// If you run them on your own dev machine, do so at your own risk! +/// They are assuming the existence of a `safenode` binary produced by the release process, and a +/// running local network, with SAFE_PEERS set to a local node. + +const CI_USER: &str = "runner"; +#[cfg(unix)] +const SAFENODE_BIN_NAME: &str = "safenode"; +#[cfg(windows)] +const SAFENODE_BIN_NAME: &str = "safenode.exe"; /// The default behaviour is for the service to run as the `safe` user, which gets created during /// the process. However, there seems to be some sort of issue with adding user accounts on the GHA @@ -23,18 +31,19 @@ use sn_service_management::{ServiceStatus, StatusSummary}; /// build agent. #[test] fn cross_platform_service_install_and_control() { - // An explicit version of `safenode` will be used to avoid any rate limiting from Github when - // retrieving the latest version number. + let safenode_path = PathBuf::from("..") + .join("target") + .join("release") + .join(SAFENODE_BIN_NAME); let mut cmd = Command::cargo_bin("safenode-manager").unwrap(); cmd.arg("add") + .arg("--local") .arg("--user") - .arg("runner") + .arg(CI_USER) .arg("--count") .arg("3") - .arg("--peer") - .arg("/ip4/127.0.0.1/tcp/46091/p2p/12D3KooWAWnbQLxqspWeB3M8HB3ab3CSj6FYzsJxEG9XdVnGNCod") - .arg("--version") - .arg("0.98.27") + .arg("--path") + .arg(safenode_path.to_string_lossy().to_string()) .assert() .success(); @@ -171,7 +180,14 @@ fn cross_platform_service_install_and_control() { .assert() .success(); let registry = get_status(); - assert_eq!(registry.nodes.len(), 1); + assert_eq!( + 1, + registry + .nodes + .iter() + .filter(|n| n.status != ServiceStatus::Removed) + .count() + ); } fn get_status() -> StatusSummary { diff --git a/sn_node_manager/tests/upgrades.rs b/sn_node_manager/tests/upgrades.rs deleted file mode 100644 index 91267e6e8c..0000000000 --- a/sn_node_manager/tests/upgrades.rs +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (C) 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod utils; - -use assert_cmd::Command; -use color_eyre::Result; -use sn_releases::{ReleaseType, SafeReleaseRepoActions}; -use utils::get_service_status; - -/// These tests need to execute as the root user. -/// -/// They are intended to run on a CI-based environment with a fresh build agent because they will -/// create real services and user accounts, and will not attempt to clean themselves up. -/// -/// Each test also needs to run in isolation, otherwise they will interfere with each other. -/// -/// If you run them on your own dev machine, do so at your own risk! - -const CI_USER: &str = "runner"; - -#[tokio::test] -async fn upgrade_to_latest_version() -> Result<()> { - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("add") - .arg("--user") - .arg(CI_USER) - .arg("--count") - .arg("3") - .arg("--peer") - .arg("/ip4/127.0.0.1/udp/46091/p2p/12D3KooWAWnbQLxqspWeB3M8HB3ab3CSj6FYzsJxEG9XdVnGNCod") - .arg("--version") - .arg("0.98.27") - .assert() - .success(); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|node| node.version == "0.98.27"), - "Services were not correctly initialised" - ); - - let release_repo = <dyn SafeReleaseRepoActions>::default_config(); - let latest_version = release_repo - .get_latest_version(&ReleaseType::Safenode) - .await?; - let mut cmd = Command::cargo_bin("safenode-manager")?; - let output = cmd - .arg("upgrade") - .arg("--do-not-start") - .assert() - .success() - .get_output() - .stdout - .clone(); - - let output = std::str::from_utf8(&output)?; - println!("upgrade command output:"); - println!("{output}"); - - let status = get_service_status().await?; - assert!( - status - .nodes - .iter() - .all(|n| n.version == latest_version.to_string()), - "Not all services were updated to the latest version" - ); - - Ok(()) -} - -/// This scenario may seem pointless, but forcing a change for a binary with the same version will -/// be required for the backwards compatibility test; the binary will be different, it will just -/// have the same version. -#[tokio::test] -async fn force_upgrade_when_two_binaries_have_the_same_version() -> Result<()> { - let version = "0.98.27"; - - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("add") - .arg("--user") - .arg(CI_USER) - .arg("--count") - .arg("3") - .arg("--peer") - .arg("/ip4/127.0.0.1/udp/46091/p2p/12D3KooWAWnbQLxqspWeB3M8HB3ab3CSj6FYzsJxEG9XdVnGNCod") - .arg("--version") - .arg(version) - .assert() - .success(); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|n| n.version == version), - "Services were not correctly initialised" - ); - - let mut cmd = Command::cargo_bin("safenode-manager")?; - let output = cmd - .arg("upgrade") - .arg("--do-not-start") - .arg("--force") - .arg("--version") - .arg(version) - .assert() - .success() - .get_output() - .stdout - .clone(); - - let output = std::str::from_utf8(&output)?; - println!("upgrade command output:"); - println!("{output}"); - - assert!(output.contains(&format!( - "Forced safenode1 version change from {version} to {version}" - ))); - assert!(output.contains(&format!( - "Forced safenode2 version change from {version} to {version}" - ))); - assert!(output.contains(&format!( - "Forced safenode3 version change from {version} to {version}" - ))); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|n| n.version == version), - "Not all services were updated to the latest version" - ); - - Ok(()) -} - -#[tokio::test] -async fn force_downgrade_to_a_previous_version() -> Result<()> { - let initial_version = "0.104.15"; - let downgrade_version = "0.104.10"; - - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("add") - .arg("--user") - .arg(CI_USER) - .arg("--count") - .arg("3") - .arg("--peer") - .arg("/ip4/127.0.0.1/udp/46091/p2p/12D3KooWAWnbQLxqspWeB3M8HB3ab3CSj6FYzsJxEG9XdVnGNCod") - .arg("--version") - .arg(initial_version) - .assert() - .success(); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|n| n.version == initial_version), - "Services were not correctly initialised" - ); - - let mut cmd = Command::cargo_bin("safenode-manager")?; - let output = cmd - .arg("upgrade") - .arg("--do-not-start") - .arg("--force") - .arg("--version") - .arg(downgrade_version) - .assert() - .success() - .get_output() - .stdout - .clone(); - - let output = std::str::from_utf8(&output)?; - println!("upgrade command output:"); - println!("{output}"); - - assert!(output.contains(&format!( - "Forced safenode1 version change from {initial_version} to {downgrade_version}" - ))); - assert!(output.contains(&format!( - "Forced safenode2 version change from {initial_version} to {downgrade_version}" - ))); - assert!(output.contains(&format!( - "Forced safenode3 version change from {initial_version} to {downgrade_version}" - ))); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|n| n.version == downgrade_version), - "Not all services were updated to the latest version" - ); - - Ok(()) -} - -#[tokio::test] -async fn upgrade_from_older_version_to_specific_version() -> Result<()> { - let initial_version = "0.104.10"; - let upgrade_version = "0.104.14"; - - let mut cmd = Command::cargo_bin("safenode-manager")?; - cmd.arg("add") - .arg("--user") - .arg(CI_USER) - .arg("--count") - .arg("3") - .arg("--peer") - .arg("/ip4/127.0.0.1/udp/46091/p2p/12D3KooWAWnbQLxqspWeB3M8HB3ab3CSj6FYzsJxEG9XdVnGNCod") - .arg("--version") - .arg(initial_version) - .assert() - .success(); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|n| n.version == initial_version), - "Services were not correctly initialised" - ); - - let mut cmd = Command::cargo_bin("safenode-manager")?; - let output = cmd - .arg("upgrade") - .arg("--do-not-start") - .arg("--version") - .arg(upgrade_version) - .assert() - .success() - .get_output() - .stdout - .clone(); - - let output = std::str::from_utf8(&output)?; - println!("upgrade command output:"); - println!("{output}"); - - assert!(output.contains(&format!( - "safenode1 upgraded from {initial_version} to {upgrade_version}" - ))); - assert!(output.contains(&format!( - "safenode2 upgraded from {initial_version} to {upgrade_version}" - ))); - assert!(output.contains(&format!( - "safenode3 upgraded from {initial_version} to {upgrade_version}" - ))); - - let status = get_service_status().await?; - assert!( - status.nodes.iter().all(|n| n.version == upgrade_version), - "Not all services were updated to the latest version" - ); - - Ok(()) -}