Skip to content

Commit

Permalink
test: Add network tests (#335)
Browse files Browse the repository at this point in the history
# Description

This pull request implements the following features:

- [x] Add RPC listens on address test
- [x] Add Websocket listens on address test
- [x] Add libp2p generates peer ID test
- [x] Add libp2p known peers connect test
- [x] Add libp2p peers connect after mDNS discovery test
- [x] Add `console_subscriber_port` setting
- [x] Update swarm event logs so `peer_id` is always the first label (we
test logs assuming so)
- [x] Update logs for swarm peer connection closed event to debug and
move `peer_id` to a label
- [x] Add `kill_homestar` function (to abstract out some repeated code)
- [x] Turn off tokio console when running tests
- [x] Reorganize integration test fixture ports into a sparse sequence
across fixtures (gives us room to add tests, but keeps them somewhat in
order).

## Link to issue

Implements #131

Solving for  

- [X] Test MDNS dialing
- [X] Test Node Dialing

## Type of change

- [x] New feature (non-breaking change that adds functionality)
- [x] Refactor (non-breaking change that updates existing functionality)

## Test plan (required)

We are adding a test to check on existing functionality.
  • Loading branch information
bgins authored Oct 10, 2023
1 parent f7a0b4f commit b0bda83
Show file tree
Hide file tree
Showing 24 changed files with 432 additions and 127 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ private
.DS_Store
homestar-guest-wasm/out
homestar-wasm/out
**/fixtures/test_*
homestar-core/fixtures/test_*
homestar-runtime/fixtures/test_*
.zed
result-alejandra
report.json
Expand Down
30 changes: 30 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion examples/websocket-relay/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use tracing::info;

fn main() -> Result<()> {
let settings = Settings::load().expect("runtime settings to be loaded");
let _guard = Logger::init();
let _guard = Logger::init(settings.monitoring());

// Just for example purposes, we're going to start the ipfs
// daemon. Typically, these would be started separately.
Expand Down
1 change: 1 addition & 0 deletions homestar-runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ rm_rf = "0.6"
serial_test = { version = "2.0", default-features = false, features = [
"file_locks",
] }
strip-ansi-escapes = "0.2.0"
tokio-tungstenite = { version = "0.20", default-features = false }
wait-timeout = "0.2"

Expand Down
1 change: 1 addition & 0 deletions homestar-runtime/config/settings.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
[monitoring]
process_collector_interval = 5000
metrics_port = 4000
console_subscriber_port = 5555

[node]
24 changes: 14 additions & 10 deletions homestar-runtime/src/event_handler/swarm_event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,11 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
event_handler: &mut EventHandler<DB>,
) {
match event {
// N.B. Labels should be ordered with peer_id first for log testing
SwarmEvent::Behaviour(ComposedEvent::Identify(identify_event)) => {
match identify_event {
identify::Event::Error { peer_id, error } => {
warn!(err=?error, peer_id=peer_id.to_string(), "error while attempting to identify the remote")
warn!(peer_id=peer_id.to_string(), err=?error, "error while attempting to identify the remote")
}
identify::Event::Sent { peer_id } => {
debug!(peer_id = peer_id.to_string(), "sent identify info to peer")
Expand Down Expand Up @@ -148,8 +149,8 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
None,
) {
warn!(
err = format!("{err}"),
peer_id = peer_id.to_string(),
err = format!("{err}"),
"failed to register with rendezvous peer"
)
}
Expand Down Expand Up @@ -190,7 +191,7 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
.build();
// TODO: we might be dialing too many peers here. Add settings to configure when we stop dialing new peers
if let Err(err) = event_handler.swarm.dial(opts) {
warn!(err=?err, peer_id=registration.record.peer_id().to_string(), "failed to dial peer discovered through rendezvous")
warn!(peer_id=registration.record.peer_id().to_string(), err=?err, "failed to dial peer discovered through rendezvous")
}
}
} else {
Expand All @@ -203,7 +204,7 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
error,
..
} => {
error!(err=?error, peer_id=rendezvous_node.to_string(), "failed to discover peers from rendezvous peer")
error!(peer_id=rendezvous_node.to_string(), err=?error, "failed to discover peers from rendezvous peer")
}
rendezvous::client::Event::Registered {
rendezvous_node,
Expand All @@ -219,7 +220,7 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
error,
..
} => {
error!(err=?error, peer_id=rendezvous_node.to_string(), "failed to register self with rendezvous peer")
error!(peer_id=rendezvous_node.to_string(), err=?error, "failed to register self with rendezvous peer")
}
rendezvous::client::Event::Expired { peer } => {
// re-discover records from peer
Expand All @@ -244,14 +245,14 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
"served rendezvous discover request to peer"
),
rendezvous::server::Event::DiscoverNotServed { enquirer, error } => {
warn!(err=?error, peer_id=enquirer.to_string(), "did not serve rendezvous discover request")
warn!(peer_id=enquirer.to_string(), err=?error, "did not serve rendezvous discover request")
}
rendezvous::server::Event::PeerNotRegistered {
peer,
namespace,
error,
} => {
warn!(err=?error, namespace=?namespace, peer_id=peer.to_string(), "did not register peer with rendezvous")
warn!(peer_id=peer.to_string(), err=?error, namespace=?namespace, "did not register peer with rendezvous")
}
_ => (),
}
Expand Down Expand Up @@ -556,12 +557,15 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
SwarmEvent::ConnectionEstablished {
peer_id, endpoint, ..
} => {
debug!(endpoint=?endpoint, peer_id=peer_id.to_string(), "peer connection established");
debug!(peer_id=peer_id.to_string(), endpoint=?endpoint, "peer connection established");
// add peer to connected peers list
event_handler.connected_peers.insert(peer_id, endpoint);
}
SwarmEvent::ConnectionClosed { peer_id, cause, .. } => {
info!("peer connection closed {peer_id}, cause: {cause:?}");
debug!(
peer_id = peer_id.to_string(),
"peer connection closed, cause: {cause:?}"
);
event_handler.connected_peers.remove_entry(&peer_id);
}
SwarmEvent::OutgoingConnectionError {
Expand All @@ -570,8 +574,8 @@ async fn handle_swarm_event<THandlerErr: fmt::Debug + Send, DB: Database>(
error,
} => {
error!(
err=?error,
peer_id=peer_id.map(|p| p.to_string()).unwrap_or_default(),
err=?error,
connection_id=?connection_id,
"outgoing connection error"
)
Expand Down
37 changes: 29 additions & 8 deletions homestar-runtime/src/logger.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
//! Logger initialization.
use crate::settings;
use std::{io, path::PathBuf};
use tracing_appender::non_blocking::{NonBlocking, WorkerGuard};
use tracing_subscriber::{layer::SubscriberExt as _, prelude::*, EnvFilter};
Expand All @@ -19,9 +20,9 @@ impl Logger {
/// write to [io::stdout].
///
/// [logfmt]: <https://brandur.org/logfmt>
pub fn init() -> WorkerGuard {
pub fn init(settings: &settings::Monitoring) -> WorkerGuard {
let (writer, guard) = tracing_appender::non_blocking(io::stdout());
init(writer, guard)
init(writer, guard, settings)
}
}

Expand All @@ -30,14 +31,18 @@ impl FileLogger {
/// write to file.
///
/// [logfmt]: <https://brandur.org/logfmt>
pub fn init(dir: PathBuf) -> WorkerGuard {
pub fn init(dir: PathBuf, settings: &settings::Monitoring) -> WorkerGuard {
let file_appender = tracing_appender::rolling::daily(dir, LOG_FILE);
let (writer, guard) = tracing_appender::non_blocking(file_appender);
init(writer, guard)
init(writer, guard, settings)
}
}

fn init(writer: NonBlocking, guard: WorkerGuard) -> WorkerGuard {
fn init(
writer: NonBlocking,
guard: WorkerGuard,
#[allow(unused_variables)] settings: &settings::Monitoring,
) -> WorkerGuard {
let format_layer = tracing_subscriber::fmt::layer()
.event_format(tracing_logfmt::EventsFormatter::default())
.fmt_fields(tracing_logfmt::FieldsFormatter::default())
Expand All @@ -55,7 +60,12 @@ fn init(writer: NonBlocking, guard: WorkerGuard) -> WorkerGuard {
.add_directive("tower_http=info".parse().expect(DIRECTIVE_EXPECT))
});

#[cfg(all(feature = "console", tokio_unstable))]
#[cfg(all(
feature = "console",
not(test),
not(feature = "test-utils"),
tokio_unstable
))]
let filter = filter
.add_directive("tokio=trace".parse().expect(DIRECTIVE_EXPECT))
.add_directive("runtime=trace".parse().expect(DIRECTIVE_EXPECT));
Expand All @@ -64,16 +74,27 @@ fn init(writer: NonBlocking, guard: WorkerGuard) -> WorkerGuard {
.with(filter)
.with(format_layer);

#[cfg(all(feature = "console", tokio_unstable))]
#[cfg(all(
feature = "console",
not(test),
not(feature = "test-utils"),
tokio_unstable
))]
{
let console_layer = console_subscriber::ConsoleLayer::builder()
.retention(std::time::Duration::from_secs(60))
.server_addr(([127, 0, 0, 1], settings.console_subscriber_port))
.spawn();

registry.with(console_layer).init();
}

#[cfg(any(not(feature = "console"), not(tokio_unstable)))]
#[cfg(any(
not(feature = "console"),
test,
not(tokio_unstable),
feature = "test-utils",
))]
{
registry.init();
}
Expand Down
4 changes: 2 additions & 2 deletions homestar-runtime/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ fn main() -> Result<()> {
let _guard = if daemonize {
daemon::start(daemon_dir.clone())
.expect("runner to be started as a daemon process");
FileLogger::init(daemon_dir)
FileLogger::init(daemon_dir, settings.monitoring())
} else {
Logger::init()
Logger::init(settings.monitoring())
};

info!(
Expand Down
3 changes: 3 additions & 0 deletions homestar-runtime/src/settings.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ pub struct Monitoring {
pub process_collector_interval: u64,
/// Metrics port for prometheus scraping.
pub metrics_port: u16,
/// Tokio console port.
pub console_subscriber_port: u16,
}

/// Server settings.
Expand Down Expand Up @@ -159,6 +161,7 @@ impl Default for Monitoring {
Self {
metrics_port: 4000,
process_collector_interval: 5000,
console_subscriber_port: 5555,
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions homestar-runtime/src/settings/pubkey_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,14 @@ impl PubkeyConfig {

match key_type {
KeyType::Ed25519 => {
info!("generating radom ed25519 key from seed");
info!("generating random ed25519 key from seed");

identity::Keypair::ed25519_from_bytes(new_key).map_err(|e| {
anyhow!("failed to generate ed25519 key from random: {:?}", e)
})
}
KeyType::Secp256k1 => {
info!("generating radom secp256k1 key from seed");
info!("generating random secp256k1 key from seed");

let sk =
secp256k1::SecretKey::try_from_bytes(&mut new_key).map_err(|e| {
Expand Down
Loading

0 comments on commit b0bda83

Please sign in to comment.