From f7339b486cff341bbabaa2b15ac88d0586be5c7d Mon Sep 17 00:00:00 2001 From: Roland Sherwin Date: Tue, 16 Jan 2024 13:55:06 +0530 Subject: [PATCH] chore(test): use logs instead of println for small running tests --- sn_node/tests/common/client.rs | 13 ++++++ sn_node/tests/common/mod.rs | 5 ++- sn_node/tests/data_with_churn.rs | 40 ++++++++++++----- sn_node/tests/msgs_over_gossipsub.rs | 18 ++++++-- sn_node/tests/nodes_rewards.rs | 63 +++++++++++++-------------- sn_node/tests/sequential_transfers.rs | 6 --- sn_node/tests/storage_payments.rs | 15 ++++--- sn_node/tests/verify_data_location.rs | 49 ++++++++++++++------- sn_node/tests/verify_routing_table.rs | 11 +++-- 9 files changed, 140 insertions(+), 80 deletions(-) diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index e0e7943e64..52b59d3498 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -171,6 +171,7 @@ impl NonDroplet { }; println!("Client bootstrap with peer {bootstrap_peers:?}"); + info!("Client bootstrap with peer {bootstrap_peers:?}"); Client::new(secret_key, bootstrap_peers, true, None) .await .expect("Client shall be successfully created.") @@ -186,13 +187,16 @@ impl NonDroplet { let mut local_wallet = get_wallet(root_dir); println!("Getting {wallet_balance} tokens from the faucet..."); + info!("Getting {wallet_balance} tokens from the faucet..."); let tokens = send(from, wallet_balance, local_wallet.address(), client, true).await?; println!("Verifying the transfer from faucet..."); + info!("Verifying the transfer from faucet..."); client.verify_cashnote(&tokens).await?; local_wallet.deposit_and_store_to_disk(&vec![tokens])?; assert_eq!(local_wallet.balance(), wallet_balance); println!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); + info!("CashNotes deposited to the wallet that'll pay for storage: {wallet_balance}."); Ok(local_wallet) } @@ -237,6 +241,7 @@ impl Droplet { } println!("Client bootstrap with peer {bootstrap_peers:?}"); + info!("Client bootstrap with peer {bootstrap_peers:?}"); Client::new(secret_key, Some(bootstrap_peers), true, None) .await .expect("Client shall be successfully created.") @@ -262,6 +267,10 @@ impl Droplet { "Getting {} tokens from the faucet... num_requests:{num_requests}", NanoTokens::from(num_requests * 100 * 1_000_000_000) ); + info!( + "Getting {} tokens from the faucet... num_requests:{num_requests}", + NanoTokens::from(num_requests * 100 * 1_000_000_000) + ); for _ in 0..num_requests { let faucet_url = format!("http://{faucet_socket}/{address_hex}"); @@ -272,6 +281,8 @@ impl Droplet { Err(err) => { println!("Failed to parse transfer: {err:?}"); println!("Transfer: \"{transfer}\""); + error!("Failed to parse transfer: {err:?}"); + error!("Transfer: \"{transfer}\""); return Err(err.into()); } }; @@ -279,10 +290,12 @@ impl Droplet { Ok(cashnotes) => cashnotes, Err(err) => { println!("Failed to verify and redeem transfer: {err:?}"); + error!("Failed to verify and redeem transfer: {err:?}"); return Err(err.into()); } }; println!("Successfully verified transfer."); + info!("Successfully verified transfer."); local_wallet.deposit_and_store_to_disk(&cashnotes)?; } diff --git a/sn_node/tests/common/mod.rs b/sn_node/tests/common/mod.rs index 989a091de1..033e195323 100644 --- a/sn_node/tests/common/mod.rs +++ b/sn_node/tests/common/mod.rs @@ -30,7 +30,7 @@ use std::{ time::Duration, }; use tonic::Request; -use tracing::error; +use tracing::{debug, error, info}; use xor_name::XorName; type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; @@ -99,7 +99,7 @@ pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result Result<()> { .await?; println!("Node restart requested to RPC service at {addr}"); + info!("Node restart requested to RPC service at {addr}"); Ok(()) } diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index 31aec27d74..672207033b 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -38,7 +38,7 @@ use std::{ }; use tempfile::tempdir; use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; -use tracing::{debug, trace}; +use tracing::{debug, error, info, trace, warn}; use xor_name::XorName; const EXTRA_CHURN_COUNT: u32 = 5; @@ -92,6 +92,7 @@ async fn data_availability_during_churn() -> Result<()> { let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); + info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); let cycles = str.parse::()?; test_duration / cycles } else { @@ -103,6 +104,7 @@ async fn data_availability_during_churn() -> Result<()> { ) }; println!("Nodes will churn every {churn_period:?}"); + info!("Nodes will churn every {churn_period:?}"); // Create a cross thread usize for tracking churned nodes let churn_count = Arc::new(RwLock::new(0_usize)); @@ -115,12 +117,16 @@ async fn data_availability_during_churn() -> Result<()> { "Running this test for {test_duration:?}{}...", if chunks_only { " (Chunks only)" } else { "" } ); + info!( + "Running this test for {test_duration:?}{}...", + if chunks_only { " (Chunks only)" } else { "" } + ); // The testnet will create a `faucet` at last. To avoid mess up with that, // wait for a while to ensure the spends of that got settled. sleep(std::time::Duration::from_secs(10)).await; - println!("Creating a client and paying wallet..."); + info!("Creating a client and paying wallet..."); let paying_wallet_dir = TempDir::new()?; let (client, paying_wallet) = get_gossip_client_and_wallet(paying_wallet_dir.path(), PAYING_WALLET_INITIAL_BALANCE) @@ -129,7 +135,7 @@ async fn data_availability_during_churn() -> Result<()> { // Waiting for the paying_wallet funded. sleep(std::time::Duration::from_secs(10)).await; - println!( + info!( "Client and paying_wallet created with signing key: {:?}", client.signer_pk() ); @@ -143,7 +149,7 @@ async fn data_availability_during_churn() -> Result<()> { // Spawn a task to create Registers and CashNotes at random locations, // at a higher frequency than the churning events if !chunks_only { - println!("Creating transfer wallet taking balance from the payment wallet"); + info!("Creating transfer wallet taking balance from the payment wallet"); let transfers_wallet_dir = TempDir::new()?; let transfers_wallet = get_funded_wallet( &client, @@ -152,7 +158,7 @@ async fn data_availability_during_churn() -> Result<()> { TRANSFERS_WALLET_INITIAL_BALANCE, ) .await?; - println!("Transfer wallet created"); + info!("Transfer wallet created"); // Waiting for the transfers_wallet funded. sleep(std::time::Duration::from_secs(10)).await; @@ -174,6 +180,7 @@ async fn data_availability_during_churn() -> Result<()> { } println!("Uploading some chunks before carry out node churning"); + info!("Uploading some chunks before carry out node churning"); // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events store_chunks_task( @@ -215,12 +222,13 @@ async fn data_availability_during_churn() -> Result<()> { paying_wallet_dir.path().to_path_buf(), ); + info!("All tasks have been spawned. The test is now running..."); println!("All tasks have been spawned. The test is now running..."); let start_time = Instant::now(); while start_time.elapsed() < test_duration { let failed = failures.read().await; - println!( + info!( "Current failures after {:?} ({}): {:?}", start_time.elapsed(), failed.len(), @@ -243,7 +251,7 @@ async fn data_availability_during_churn() -> Result<()> { // i.e. the test may pass even without any replication // Hence, we carry out a final round of query all data to confirm storage. println!("Final querying confirmation of content"); - debug!("Final querying confirmation of content"); + info!("Final querying confirmation of content"); // take one read lock to avoid holding the lock for the whole loop // prevent any late content uploads being added to the list @@ -320,6 +328,7 @@ fn create_cash_note_task( let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); let net_addr = NetworkAddress::SpendAddress(cash_note_addr); println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); + debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); content.write().await.push_back(net_addr); let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); } @@ -347,6 +356,7 @@ fn create_registers_task( let addr = RegisterAddress::new(meta, owner); println!("Creating Register at {addr:?} in {delay:?}"); + debug!("Creating Register at {addr:?} in {delay:?}"); sleep(delay).await; match client @@ -406,7 +416,7 @@ fn store_chunks_task( let (addr, _data_map, _file_size, chunks) = FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); - println!( + info!( "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", chunks.len(), chunk_size @@ -429,6 +439,9 @@ fn store_chunks_task( println!( "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" ); + info!( + "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" + ); sleep(delay).await; for chunk_name in chunks_name { @@ -457,6 +470,7 @@ fn query_content_task( let len = content.read().await.len(); if len == 0 { println!("No content created/stored just yet, let's try in {delay:?} ..."); + info!("No content created/stored just yet, let's try in {delay:?} ..."); sleep(delay).await; continue; } @@ -475,6 +489,7 @@ fn query_content_task( println!( "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" ); + error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. let _ = content_erred .write() @@ -508,14 +523,16 @@ fn churn_nodes_task( // break out if we've run the duration of churn if start.elapsed() > test_duration { - println!("Test duration reached, stopping churn nodes task"); + debug!("Test duration reached, stopping churn nodes task"); break 'main; } println!("Restarting node through its RPC service at {rpc_address}"); + info!("Restarting node through its RPC service at {rpc_address}"); if let Err(err) = node_restart(rpc_address).await { println!("Failed to restart node with RPC endpoint {rpc_address}: {err}"); + info!("Failed to restart node with RPC endpoint {rpc_address}: {err}"); continue; } @@ -547,10 +564,12 @@ fn retry_query_content_task( let attempts = content_error.attempts + 1; println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); + info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); if let Err(last_err) = query_content(&client, &wallet_dir, &net_addr, cash_notes.clone()).await { println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, // otherwise report it effectivelly as failure. content_error.attempts = attempts; @@ -589,11 +608,12 @@ async fn final_retry_query_content( { if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); + error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); } else { attempts += 1; let delay = 2 * churn_period; - println!("Delaying last check for {delay:?} ..."); + debug!("Delaying last check for {delay:?} ..."); sleep(delay).await; continue; } diff --git a/sn_node/tests/msgs_over_gossipsub.rs b/sn_node/tests/msgs_over_gossipsub.rs index 445c251f1b..080f699ce3 100644 --- a/sn_node/tests/msgs_over_gossipsub.rs +++ b/sn_node/tests/msgs_over_gossipsub.rs @@ -8,7 +8,10 @@ mod common; -use common::client::get_node_count; +use crate::common::{ + client::{get_all_rpc_addresses, get_node_count}, + get_safenode_rpc_client, +}; use eyre::Result; use rand::seq::SliceRandom; use sn_logging::LogBuilder; @@ -21,8 +24,7 @@ use std::{net::SocketAddr, time::Duration}; use tokio::time::timeout; use tokio_stream::StreamExt; use tonic::Request; - -use crate::common::{client::get_all_rpc_addresses, get_safenode_rpc_client}; +use tracing::{error, info}; const TEST_CYCLES: u8 = 20; @@ -42,6 +44,8 @@ async fn msgs_over_gossipsub() -> Result<()> { let topic = format!("TestTopic-{}", rand::random::()); println!("Testing cicle {}/{TEST_CYCLES} - topic: {topic}", c + 1); println!("============================================================"); + info!("Testing cicle {}/{TEST_CYCLES} - topic: {topic}", c + 1); + info!("============================================================"); // get a random subset of `nodes_subscribed`` out of `node_count` nodes to subscribe to the topic let mut rng = rand::thread_rng(); @@ -54,6 +58,7 @@ async fn msgs_over_gossipsub() -> Result<()> { for (node_index, rpc_addr) in random_subs_nodes.clone() { // request current node to subscribe to the topic println!("Node #{node_index} ({rpc_addr}) subscribing to {topic} ..."); + info!("Node #{node_index} ({rpc_addr}) subscribing to {topic} ..."); node_subscribe_to_topic(rpc_addr, topic.clone()).await?; let handle = tokio::spawn(async move { @@ -73,10 +78,15 @@ async fn msgs_over_gossipsub() -> Result<()> { "Msg received on node #{node_index} '{topic}': {}", String::from_utf8(msg.to_vec()).unwrap() ); + info!( + "Msg received on node #{node_index} '{topic}': {}", + String::from_utf8(msg.to_vec()).unwrap() + ); count += 1; } Ok(_) => { /* ignored */ } Err(_) => { + error!("Error while parsing received NodeEvent"); println!("Error while parsing received NodeEvent"); } } @@ -101,6 +111,7 @@ async fn msgs_over_gossipsub() -> Result<()> { for (node_index, addr, handle) in subs_handles.into_iter() { let count = handle.await??; println!("Messages received by node {node_index}: {count}"); + info!("Messages received by node {node_index}: {count}"); assert_eq!( count, node_count - nodes_subscribed, @@ -144,6 +155,7 @@ async fn other_nodes_to_publish_on_topic( let mut rpc_client = get_safenode_rpc_client(addr).await?; println!("Node {node_index} to publish on {topic} message: {msg}"); + info!("Node {node_index} to publish on {topic} message: {msg}"); let _response = rpc_client .publish_on_topic(Request::new(GossipsubPublishRequest { diff --git a/sn_node/tests/nodes_rewards.rs b/sn_node/tests/nodes_rewards.rs index 83f9cb3261..54ad5ff07e 100644 --- a/sn_node/tests/nodes_rewards.rs +++ b/sn_node/tests/nodes_rewards.rs @@ -32,6 +32,7 @@ use tokio::{ }; use tokio_stream::StreamExt; use tonic::Request; +use tracing::{debug, error, info}; use xor_name::XorName; #[tokio::test] @@ -50,13 +51,13 @@ async fn nodes_rewards_for_storing_chunks() -> Result<()> { let chunks_len = chunks.len(); let prev_rewards_balance = current_rewards_balance().await?; - println!("With {prev_rewards_balance:?} current balance, paying for {} random addresses... {chunks:?}", chunks.len()); + info!("With {prev_rewards_balance:?} current balance, paying for {} random addresses... {chunks:?}", chunks.len()); let mut files_upload = FilesUpload::new(files_api.clone()).set_show_holders(true); files_upload.upload_chunks(chunks).await?; let storage_cost = files_upload.get_upload_storage_cost(); - println!("Paid {storage_cost:?} total rewards for the chunks"); + info!("Paid {storage_cost:?} total rewards for the chunks"); verify_rewards(prev_rewards_balance, storage_cost, chunks_len).await?; @@ -79,7 +80,7 @@ async fn nodes_rewards_for_storing_registers() -> Result<()> { let mut rng = rand::thread_rng(); let register_addr = XorName::random(&mut rng); - println!("Paying for random Register address {register_addr:?} with current balance {rb:?}"); + info!("Paying for random Register address {register_addr:?} with current balance {rb:?}"); let prev_rewards_balance = current_rewards_balance().await?; @@ -91,7 +92,7 @@ async fn nodes_rewards_for_storing_registers() -> Result<()> { Permissions::new_owner_only(), ) .await?; - println!("Cost is {storage_cost:?}: {prev_rewards_balance:?}"); + info!("Cost is {storage_cost:?}: {prev_rewards_balance:?}"); verify_rewards(prev_rewards_balance, storage_cost, 1).await?; @@ -117,19 +118,18 @@ async fn nodes_rewards_for_chunks_notifs_over_gossipsub() -> Result<()> { let num_of_chunks = chunks.len(); - tracing::info!("Paying for {num_of_chunks} random addresses..."); - println!("Paying for {num_of_chunks} random addresses..."); + info!("Paying for {num_of_chunks} random addresses..."); let mut files_upload = FilesUpload::new(files_api.clone()).set_show_holders(true); files_upload.upload_chunks(chunks).await?; let storage_cost = files_upload.get_upload_storage_cost(); let royalties_fees = files_upload.get_upload_royalty_fees(); - println!("Random chunks stored, paid {storage_cost}/{royalties_fees}"); + info!("Random chunks stored, paid {storage_cost}/{royalties_fees}"); let (count, amount) = handle.await??; - println!("Number of notifications received: {count}"); - println!("Amount notified for royalties fees: {amount}"); + info!("Number of notifications received: {count}"); + info!("Amount notified for royalties fees: {amount}"); assert_eq!( amount, royalties_fees, "Unexpected amount of royalties fees received" @@ -158,7 +158,7 @@ async fn nodes_rewards_for_register_notifs_over_gossipsub() -> Result<()> { let handle = spawn_royalties_payment_client_listener(client.clone(), 1).await?; - println!("Paying for random Register address {register_addr:?} ..."); + info!("Paying for random Register address {register_addr:?} ..."); let (_, storage_cost, royalties_fees) = client .create_and_pay_for_register( register_addr, @@ -167,11 +167,11 @@ async fn nodes_rewards_for_register_notifs_over_gossipsub() -> Result<()> { Permissions::new_owner_only(), ) .await?; - println!("Random Register created, paid {storage_cost}/{royalties_fees}"); + info!("Random Register created, paid {storage_cost}/{royalties_fees}"); let (count, amount) = handle.await??; - println!("Number of notifications received: {count}"); - println!("Amount notified for royalties fees: {amount}"); + info!("Number of notifications received: {count}"); + info!("Amount notified for royalties fees: {amount}"); assert_eq!( amount, royalties_fees, "Unexpected amount of royalties fees received" @@ -218,20 +218,20 @@ async fn nodes_rewards_transfer_notifs_filter() -> Result<()> { spawn_royalties_payment_listener(node_rpc_addresses[2], royalties_pk, false, 0, true).await; let num_of_chunks = chunks.len(); - println!("Paying for {num_of_chunks} chunks"); + info!("Paying for {num_of_chunks} chunks"); let mut files_upload = FilesUpload::new(files_api.clone()).set_show_holders(true); files_upload.upload_chunks(chunks).await?; let storage_cost = files_upload.get_upload_storage_cost(); let royalties_fees = files_upload.get_upload_royalty_fees(); - println!("Random chunks stored, paid {storage_cost}/{royalties_fees}"); + info!("Random chunks stored, paid {storage_cost}/{royalties_fees}"); let count_1 = handle_1.await??; - println!("Number of notifications received by node #1: {count_1}"); + info!("Number of notifications received by node #1: {count_1}"); let count_2 = handle_2.await??; - println!("Number of notifications received by node #2: {count_2}"); + info!("Number of notifications received by node #2: {count_2}"); let count_3 = handle_3.await??; - println!("Number of notifications received by node #3: {count_3}"); + info!("Number of notifications received by node #3: {count_3}"); assert!( count_1 >= num_of_chunks, @@ -260,7 +260,7 @@ async fn verify_rewards( while iteration < put_record_count { iteration += 1; - println!("Current iteration {iteration}"); + info!("Current iteration {iteration}"); let new_rewards_balance = current_rewards_balance().await?; if expected_rewards_balance == new_rewards_balance { return Ok(()); @@ -287,7 +287,7 @@ async fn current_rewards_balance() -> Result { .ok_or_else(|| eyre!("Failed to sum up rewards balance"))?; } - println!("Current total balance is {total_rewards:?}"); + info!("Current total balance is {total_rewards:?}"); Ok(total_rewards) } @@ -328,20 +328,20 @@ async fn spawn_royalties_payment_listener( let secs = std::cmp::max(40, expected_royalties as u64 * 15); let duration = Duration::from_secs(secs); - println!("Awaiting transfers notifs for {duration:?}..."); + info!("Awaiting transfers notifs for {duration:?}..."); if timeout(duration, async { while let Some(Ok(e)) = stream.next().await { match NodeEvent::from_bytes(&e.event) { Ok(NodeEvent::TransferNotif { key, .. }) => { - println!("Transfer notif received for key {key:?}"); + info!("Transfer notif received for key {key:?}"); if key == royalties_pk { count += 1; - println!("Received {count} royalty notifs so far"); + info!("Received {count} royalty notifs so far"); } } Ok(_) => { /* ignored */ } Err(_) => { - println!("Error while parsing received NodeEvent"); + error!("Error while parsing received NodeEvent"); } } } @@ -349,7 +349,7 @@ async fn spawn_royalties_payment_listener( .await .is_err() { - println!("Timeout after {duration:?}."); + debug!("Timeout after {duration:?}."); } Ok(count) @@ -382,8 +382,7 @@ async fn spawn_royalties_payment_client_listener( // otherwise we'll wait for 10s per expected royalty let secs = std::cmp::max(40, expected_royalties as u64 * 15); let duration = Duration::from_secs(secs); - tracing::info!("Awaiting transfers notifs for {duration:?}..."); - println!("Awaiting transfers notifs for {duration:?}..."); + info!("Awaiting transfers notifs for {duration:?}..."); if timeout(duration, async { while let Ok(event) = events_receiver.recv().await { let cashnote_redemptions = match event { @@ -391,7 +390,7 @@ async fn spawn_royalties_payment_client_listener( // we assume it's a notification of a transfer as that's the only topic we've subscribed to match try_decode_transfer_notif(&msg) { Ok((key, cashnote_redemptions)) => { - println!("Transfer notif received for key {key:?}"); + info!("Transfer notif received for key {key:?}"); if key != royalties_pk { continue; } @@ -399,7 +398,7 @@ async fn spawn_royalties_payment_client_listener( cashnote_redemptions } Err(err) => { - println!("GossipsubMsg received on topic '{topic}' couldn't be decoded as transfer notif: {err:?}"); + error!("GossipsubMsg received on topic '{topic}' couldn't be decoded as transfer notif: {err:?}"); continue; }, } @@ -412,16 +411,16 @@ async fn spawn_royalties_payment_client_listener( .await { Ok(cash_notes) => if let Err(err) = wallet.deposit(&cash_notes) { - println!("Failed to deposit cash notes: {err}"); + error!("Failed to deposit cash notes: {err}"); } - Err(err) => println!("At least one of the CashNoteRedemptions received is invalid, dropping them: {err:?}") + Err(err) => error!("At least one of the CashNoteRedemptions received is invalid, dropping them: {err:?}") } } }) .await .is_err() { - println!("Timeout after {duration:?}."); + debug!("Timeout after {duration:?}."); } Ok((count, wallet.balance())) diff --git a/sn_node/tests/sequential_transfers.rs b/sn_node/tests/sequential_transfers.rs index c599606181..42ac09b374 100644 --- a/sn_node/tests/sequential_transfers.rs +++ b/sn_node/tests/sequential_transfers.rs @@ -27,7 +27,6 @@ async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { get_gossip_client_and_wallet(first_wallet_dir.path(), first_wallet_balance).await?; let second_wallet_balance = NanoTokens::from(first_wallet_balance / 2); - println!("Transferring from first wallet to second wallet: {second_wallet_balance}."); info!("Transferring from first wallet to second wallet: {second_wallet_balance}."); let second_wallet_dir = TempDir::new()?; let mut second_wallet = get_wallet(second_wallet_dir.path()); @@ -42,13 +41,11 @@ async fn cash_note_transfer_multiple_sequential_succeed() -> Result<()> { true, ) .await?; - println!("Verifying the transfer from first wallet..."); info!("Verifying the transfer from first wallet..."); client.verify_cashnote(&tokens).await?; second_wallet.deposit_and_store_to_disk(&vec![tokens])?; assert_eq!(second_wallet.balance(), second_wallet_balance); - println!("CashNotes deposited to second wallet: {second_wallet_balance}."); info!("CashNotes deposited to second wallet: {second_wallet_balance}."); let first_wallet = get_wallet(&first_wallet_dir); @@ -98,7 +95,6 @@ async fn cash_note_transfer_double_spend_fail() -> Result<()> { // send both transfers to the network // upload won't error out, only error out during verification. - println!("Sending both transfers to the network..."); info!("Sending both transfers to the network..."); let res = client .send_spends(transfer_to_2.all_spend_requests.iter(), false) @@ -110,7 +106,6 @@ async fn cash_note_transfer_double_spend_fail() -> Result<()> { assert!(res.is_ok()); // check the CashNotes, it should fail - println!("Verifying the transfers from first wallet..."); info!("Verifying the transfers from first wallet..."); let cash_notes_for_2: Vec<_> = transfer_to_2.created_cash_notes.clone(); @@ -118,7 +113,6 @@ async fn cash_note_transfer_double_spend_fail() -> Result<()> { let could_err1 = client.verify_cashnote(&cash_notes_for_2[0]).await; let could_err2 = client.verify_cashnote(&cash_notes_for_3[0]).await; - println!("Verifying at least one fails : {could_err1:?} {could_err2:?}"); info!("Verifying at least one fails : {could_err1:?} {could_err2:?}"); assert!(could_err1.is_err() || could_err2.is_err()); diff --git a/sn_node/tests/storage_payments.rs b/sn_node/tests/storage_payments.rs index c1dce36e49..adacee5fe5 100644 --- a/sn_node/tests/storage_payments.rs +++ b/sn_node/tests/storage_payments.rs @@ -24,6 +24,7 @@ use sn_registers::Permissions; use sn_transfers::{MainPubkey, NanoTokens, PaymentQuote}; use std::collections::BTreeMap; use tokio::time::{sleep, Duration}; +use tracing::info; use xor_name::XorName; #[tokio::test] @@ -46,7 +47,7 @@ async fn storage_payment_succeeds() -> Result<()> { sn_protocol::NetworkAddress::ChunkAddress(ChunkAddress::new(XorName::random(&mut rng))) }) .collect::>(); - println!( + info!( "Paying for {} random addresses...", random_content_addrs.len() ); @@ -55,7 +56,7 @@ async fn storage_payment_succeeds() -> Result<()> { .pay_for_storage(random_content_addrs.clone().into_iter()) .await?; - println!("Verifying balance has been paid from the wallet..."); + info!("Verifying balance has been paid from the wallet..."); let paying_wallet = wallet_client.into_wallet(); assert!( @@ -127,7 +128,7 @@ async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { // let's first pay only for a subset of the addresses let subset_len = random_content_addrs.len() / 3; - println!("Paying for {subset_len} random addresses...",); + info!("Paying for {subset_len} random addresses...",); let ((storage_cost, royalties_fees), _) = wallet_client .pay_for_storage(random_content_addrs.clone().into_iter().take(subset_len)) .await?; @@ -138,7 +139,7 @@ async fn storage_payment_proofs_cached_in_wallet() -> Result<()> { // check we've paid only for the subset of addresses, 1 nano per addr let new_balance = NanoTokens::from(wallet_original_balance - total_cost.as_nano()); - println!("Verifying new balance on paying wallet is {new_balance} ..."); + info!("Verifying new balance on paying wallet is {new_balance} ..."); let paying_wallet = wallet_client.into_wallet(); assert_eq!(paying_wallet.balance(), new_balance); @@ -190,7 +191,7 @@ async fn storage_payment_chunk_upload_succeeds() -> Result<()> { let (files_api, _content_bytes, file_addr, chunks) = random_content(&client, paying_wallet_dir.to_path_buf(), chunks_dir.path())?; - println!("Paying for {} random addresses...", chunks.len()); + info!("Paying for {} random addresses...", chunks.len()); let _cost = wallet_client .pay_for_storage( @@ -245,7 +246,7 @@ async fn storage_payment_chunk_upload_fails_if_no_tokens_sent() -> Result<()> { .upload_test_bytes(content_bytes.clone(), false) .await?; - println!("Reading {content_addr:?} expected to fail"); + info!("Reading {content_addr:?} expected to fail"); let mut files_download = FilesDownload::new(files_api); assert!( matches!( @@ -275,7 +276,7 @@ async fn storage_payment_register_creation_succeeds() -> Result<()> { let xor_name = XorName::random(&mut rng); let address = RegisterAddress::new(xor_name, client.signer_pk()); let net_addr = NetworkAddress::from_register_address(address); - println!("Paying for random Register address {net_addr:?} ..."); + info!("Paying for random Register address {net_addr:?} ..."); let _cost = wallet_client .pay_for_storage(std::iter::once(net_addr)) diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index eb25ed621a..485d3307fa 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -36,7 +36,7 @@ use std::{ time::{Duration, Instant}, }; use tonic::Request; -use tracing::error; +use tracing::{debug, error, info}; const CHUNK_SIZE: usize = 1024; @@ -84,11 +84,17 @@ async fn verify_data_location() -> Result<()> { "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}\nIt will take approx {:?}", VERIFICATION_DELAY*churn_count as u32 ); + info!( + "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}\nIt will take approx {:?}", + VERIFICATION_DELAY*churn_count as u32 + ); let node_rpc_address = get_all_rpc_addresses(true)?; let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; // Store chunks println!("Creating a client and paying wallet..."); + debug!("Creating a client and paying wallet..."); + let paying_wallet_dir = TempDir::new()?; let (client, _paying_wallet) = @@ -115,6 +121,7 @@ async fn verify_data_location() -> Result<()> { // wait for the dead peer to be removed from the RT and the replication flow to finish println!("\nNode {node_index} has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); + info!("\nNode {node_index} has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); tokio::time::sleep(VERIFICATION_DELAY).await; // get the new PeerId for the current NodeIndex @@ -135,7 +142,7 @@ async fn verify_data_location() -> Result<()> { fn print_node_close_groups(all_peers: &[PeerId]) { let all_peers = all_peers.to_vec(); - println!("\nNode close groups:"); + info!("\nNode close groups:"); for (node_index, peer) in all_peers.iter().enumerate() { let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); @@ -150,7 +157,7 @@ fn print_node_close_groups(all_peers: &[PeerId]) { .expect("peer to be in iterator") }) .collect::>(); - println!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); + info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); } } @@ -170,7 +177,7 @@ async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result, node_rpc_addresses: &[SocketAd println!("*********************************************"); println!("Verifying data across all peers {all_peers:?}"); - tracing::info!("*********************************************"); - tracing::info!("Verifying data across all peers {all_peers:?}"); + info!("*********************************************"); + info!("Verifying data across all peers {all_peers:?}"); let mut verification_attempts = 0; while verification_attempts < VERIFICATION_ATTEMPTS { @@ -190,6 +197,7 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd let record_holders = get_records_and_holders(node_rpc_addresses).await?; for (key, actual_holders_idx) in record_holders.iter() { println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); + info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); let record_key = KBucketKey::from(key.to_vec()); let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? .into_iter() @@ -201,11 +209,11 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd .map(|i| all_peers[*i]) .collect::>(); - println!( + info!( "Expected to be held by {:?} nodes: {expected_holders:?}", expected_holders.len() ); - println!( + info!( "Actually held by {:?} nodes : {actual_holders:?}", actual_holders.len() ); @@ -246,21 +254,23 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd println!("Verification failed for {:?} entries", failed.len()); failed.iter().for_each(|(key, failed_peers)| { - failed_peers - .iter() - .for_each(|peer| println!("Record {key:?} is not stored inside {peer:?}",)); + failed_peers.iter().for_each(|peer| { + println!("Record {key:?} is not stored inside {peer:?}"); + error!("Record {key:?} is not stored inside {peer:?}"); + }); }); - println!("State of each node:"); + info!("State of each node:"); record_holders.iter().for_each(|(key, node_index)| { - println!("Record {key:?} is currently held by node indices {node_index:?}"); + info!("Record {key:?} is currently held by node indices {node_index:?}"); }); - println!("Node index map:"); + info!("Node index map:"); all_peers .iter() .enumerate() - .for_each(|(idx, peer)| println!("{idx} : {peer:?}")); + .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); verification_attempts += 1; println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); + info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); if verification_attempts < VERIFICATION_ATTEMPTS { tokio::time::sleep(REVERIFICATION_DELAY).await; } @@ -272,9 +282,11 @@ async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAd if !failed.is_empty() { println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); + error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); Err(eyre!("Verification failed for: {failed:?}")) } else { println!("All the Records have been verified!"); + info!("All the Records have been verified!"); Ok(()) } } @@ -308,7 +320,7 @@ async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) - let (head_chunk_addr, _data_map, _file_size, chunks) = FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; - println!( + debug!( "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", chunks.len(), random_bytes.len() @@ -320,12 +332,17 @@ async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) - uploaded_chunks_count += 1; println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); + info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); } println!( "{chunk_count:?} Chunks were stored in {:?}", start.elapsed() ); + info!( + "{chunk_count:?} Chunks were stored in {:?}", + start.elapsed() + ); // to make sure the last chunk was stored tokio::time::sleep(Duration::from_secs(10)).await; diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index 14ba1dcbf8..6dad0fc505 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -22,6 +22,7 @@ use std::{ time::Duration, }; use tonic::Request; +use tracing::{error, info}; /// Sleep for sometime for the nodes for discover each other before verification /// Also can be set through the env variable of the same name. @@ -39,7 +40,7 @@ async fn verify_routing_table() -> Result<()> { }) .map(Duration::from_secs) .unwrap_or(SLEEP_BEFORE_VERIFICATION); - println!("Sleeping for {sleep_duration:?} before verification"); + info!("Sleeping for {sleep_duration:?} before verification"); tokio::time::sleep(sleep_duration).await; let node_rpc_address = get_all_rpc_addresses(false)?; @@ -83,14 +84,16 @@ async fn verify_routing_table() -> Result<()> { continue; } else if bucket.len() == K_VALUE.get() { println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); + info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); continue; } else { println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); + error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); failed_list.push(*peer); } } None => { - println!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); + info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); failed_list.push(*peer); } } @@ -100,8 +103,8 @@ async fn verify_routing_table() -> Result<()> { } } if !all_failed_list.is_empty() { - println!("Failed to verify routing table:\n{all_failed_list:?}"); - panic!("Failed to verify routing table"); + error!("Failed to verify routing table:\n{all_failed_list:?}"); + panic!("Failed to verify routing table."); } Ok(()) }