diff --git a/networks/monza/monza-client/src/tests/mod.rs b/networks/monza/monza-client/src/tests/mod.rs index 8885a8229..405605fbf 100644 --- a/networks/monza/monza-client/src/tests/mod.rs +++ b/networks/monza/monza-client/src/tests/mod.rs @@ -1,7 +1,7 @@ use crate::{ - coin_client::CoinClient, - rest_client::{Client, FaucetClient}, - types::LocalAccount, + coin_client::CoinClient, + rest_client::{Client, FaucetClient}, + types::LocalAccount, }; use anyhow::{Context, Result}; use async_channel::Sender; @@ -11,131 +11,134 @@ use suzuka_executor::v1::SuzukaExecutorV1; use url::Url; static MONZA_CONFIG: Lazy = Lazy::new(|| { - maptos_execution_util::config::Config::try_from_env() - .context("Failed to create the config") - .unwrap() + maptos_execution_util::config::Config::try_from_env() + .context("Failed to create the config") + .unwrap() }); // :!:>section_1c static NODE_URL: Lazy = Lazy::new(|| { - Url::from_str( - format!("http://{}", MONZA_CONFIG.aptos_config.aptos_rest_listen_url.as_str()).as_str(), - ) - .unwrap() + Url::from_str( + format!("http://{}", MONZA_CONFIG.aptos_config.aptos_rest_listen_url.as_str()).as_str(), + ) + .unwrap() }); static FAUCET_URL: Lazy = Lazy::new(|| { - Url::from_str( - format!("http://{}", MONZA_CONFIG.aptos_config.aptos_faucet_listen_url.as_str()).as_str(), - ) - .unwrap() + Url::from_str( + format!("http://{}", MONZA_CONFIG.aptos_config.aptos_faucet_listen_url.as_str()).as_str(), + ) + .unwrap() }); // <:!:section_1c #[tokio::test] async fn test_example_interaction() -> Result<()> { - // :!:>section_1a - let rest_client = Client::new(NODE_URL.clone()); - let faucet_client = FaucetClient::new(FAUCET_URL.clone(), NODE_URL.clone()); // <:!:section_1a + // :!:>section_1a + let rest_client = Client::new(NODE_URL.clone()); + let faucet_client = FaucetClient::new(FAUCET_URL.clone(), NODE_URL.clone()); // <:!:section_1a - // :!:>section_1b - let coin_client = CoinClient::new(&rest_client); // <:!:section_1b + // :!:>section_1b + let coin_client = CoinClient::new(&rest_client); // <:!:section_1b - // Create two accounts locally, Alice and Bob. - // :!:>section_2 - let mut alice = LocalAccount::generate(&mut rand::rngs::OsRng); - let bob = LocalAccount::generate(&mut rand::rngs::OsRng); // <:!:section_2 + // Create two accounts locally, Alice and Bob. + // :!:>section_2 + let mut alice = LocalAccount::generate(&mut rand::rngs::OsRng); + let bob = LocalAccount::generate(&mut rand::rngs::OsRng); // <:!:section_2 - // Print account addresses. - println!("\n=== Addresses ==="); - println!("Alice: {}", alice.address().to_hex_literal()); - println!("Bob: {}", bob.address().to_hex_literal()); + // Print account addresses. + println!("\n=== Addresses ==="); + println!("Alice: {}", alice.address().to_hex_literal()); + println!("Bob: {}", bob.address().to_hex_literal()); - // Create the accounts on chain, but only fund Alice. - // :!:>section_3 - faucet_client - .fund(alice.address(), 100_000_000) - .await - .context("Failed to fund Alice's account")?; - faucet_client.create_account(bob.address()).await.context("Failed to fund Bob's account")?; // <:!:section_3 + // Create the accounts on chain, but only fund Alice. + // :!:>section_3 + faucet_client + .fund(alice.address(), 100_000_000) + .await + .context("Failed to fund Alice's account")?; + faucet_client + .create_account(bob.address()) + .await + .context("Failed to fund Bob's account")?; // <:!:section_3 - // Print initial balances. - println!("\n=== Initial Balances ==="); - println!( - "Alice: {:?}", - coin_client - .get_account_balance(&alice.address()) - .await - .context("Failed to get Alice's account balance")? - ); - println!( - "Bob: {:?}", - coin_client - .get_account_balance(&bob.address()) - .await - .context("Failed to get Bob's account balance")? - ); + // Print initial balances. + println!("\n=== Initial Balances ==="); + println!( + "Alice: {:?}", + coin_client + .get_account_balance(&alice.address()) + .await + .context("Failed to get Alice's account balance")? + ); + println!( + "Bob: {:?}", + coin_client + .get_account_balance(&bob.address()) + .await + .context("Failed to get Bob's account balance")? + ); - // Have Alice send Bob some coins. - let txn_hash = coin_client - .transfer(&mut alice, bob.address(), 1_000, None) - .await - .context("Failed to submit transaction to transfer coins")?; - rest_client - .wait_for_transaction(&txn_hash) - .await - .context("Failed when waiting for the transfer transaction")?; + // Have Alice send Bob some coins. + let txn_hash = coin_client + .transfer(&mut alice, bob.address(), 1_000, None) + .await + .context("Failed to submit transaction to transfer coins")?; + rest_client + .wait_for_transaction(&txn_hash) + .await + .context("Failed when waiting for the transfer transaction")?; - // Print intermediate balances. - println!("\n=== Intermediate Balances ==="); - // :!:>section_4 - println!( - "Alice: {:?}", - coin_client - .get_account_balance(&alice.address()) - .await - .context("Failed to get Alice's account balance the second time")? - ); - println!( - "Bob: {:?}", - coin_client - .get_account_balance(&bob.address()) - .await - .context("Failed to get Bob's account balance the second time")? - ); // <:!:section_4 + // Print intermediate balances. + println!("\n=== Intermediate Balances ==="); + // :!:>section_4 + println!( + "Alice: {:?}", + coin_client + .get_account_balance(&alice.address()) + .await + .context("Failed to get Alice's account balance the second time")? + ); + println!( + "Bob: {:?}", + coin_client + .get_account_balance(&bob.address()) + .await + .context("Failed to get Bob's account balance the second time")? + ); // <:!:section_4 - // Have Alice send Bob some more coins. - // :!:>section_5 - let txn_hash = coin_client - .transfer(&mut alice, bob.address(), 1_000, None) - .await - .context("Failed to submit transaction to transfer coins")?; // <:!:section_5 - // :!:>section_6 - rest_client - .wait_for_transaction(&txn_hash) - .await - .context("Failed when waiting for the transfer transaction")?; // <:!:section_6 + // Have Alice send Bob some more coins. + // :!:>section_5 + let txn_hash = coin_client + .transfer(&mut alice, bob.address(), 1_000, None) + .await + .context("Failed to submit transaction to transfer coins")?; // <:!:section_5 + // :!:>section_6 + rest_client + .wait_for_transaction(&txn_hash) + .await + .context("Failed when waiting for the transfer transaction")?; // <:!:section_6 - // Print final balances. - println!("\n=== Final Balances ==="); - println!( - "Alice: {:?}", - coin_client - .get_account_balance(&alice.address()) - .await - .context("Failed to get Alice's account balance the second time")? - ); - println!( - "Bob: {:?}", - coin_client - .get_account_balance(&bob.address()) - .await - .context("Failed to get Bob's account balance the second time")? - ); + // Print final balances. + println!("\n=== Final Balances ==="); + println!( + "Alice: {:?}", + coin_client + .get_account_balance(&alice.address()) + .await + .context("Failed to get Alice's account balance the second time")? + ); + println!( + "Bob: {:?}", + coin_client + .get_account_balance(&bob.address()) + .await + .context("Failed to get Bob's account balance the second time")? + ); - let (tx, _rx) = async_channel::unbounded(); - let executor = SuzukaExecutorV1::try_from_env(tx).await?; - let api = executor.get_apis(); + let (tx, _rx) = async_channel::unbounded(); + let executor = SuzukaExecutorV1::try_from_env(tx).await?; + let api = executor.get_apis(); - Ok(()) + Ok(()) } diff --git a/networks/monza/monza-config/src/bin/monza_config.rs b/networks/monza/monza-config/src/bin/monza_config.rs index 405dca8dc..17e1832a1 100644 --- a/networks/monza/monza-config/src/bin/monza_config.rs +++ b/networks/monza/monza-config/src/bin/monza_config.rs @@ -2,9 +2,9 @@ use monza_config::Config; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { - // read any values from env, but populate the default values if they are not present - let config = Config::try_from_env()?; - // write the values to the env - print!("{}", config.write_bash_export_string()?); - Ok(()) + // read any values from env, but populate the default values if they are not present + let config = Config::try_from_env()?; + // write the values to the env + print!("{}", config.write_bash_export_string()?); + Ok(()) } diff --git a/networks/monza/monza-config/src/lib.rs b/networks/monza/monza-config/src/lib.rs index e70e254af..f772dc879 100644 --- a/networks/monza/monza-config/src/lib.rs +++ b/networks/monza/monza-config/src/lib.rs @@ -1,25 +1,25 @@ #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { - pub execution_config: maptos_execution_util::config::Config, + pub execution_config: maptos_execution_util::config::Config, } impl Config { - pub fn new(execution_config: maptos_execution_util::config::Config) -> Self { - Self { execution_config } - } + pub fn new(execution_config: maptos_execution_util::config::Config) -> Self { + Self { execution_config } + } - pub fn try_from_env() -> Result { - let execution_config = maptos_execution_util::config::Config::try_from_env()?; + pub fn try_from_env() -> Result { + let execution_config = maptos_execution_util::config::Config::try_from_env()?; - Ok(Self { execution_config }) - } + Ok(Self { execution_config }) + } - pub fn write_to_env(&self) -> Result<(), anyhow::Error> { - self.execution_config.write_to_env()?; - Ok(()) - } + pub fn write_to_env(&self) -> Result<(), anyhow::Error> { + self.execution_config.write_to_env()?; + Ok(()) + } - pub fn write_bash_export_string(&self) -> Result { - Ok(format!("{}", self.execution_config.write_bash_export_string()?)) - } + pub fn write_bash_export_string(&self) -> Result { + Ok(format!("{}", self.execution_config.write_bash_export_string()?)) + } } diff --git a/networks/monza/monza-full-node/src/lib.rs b/networks/monza/monza-full-node/src/lib.rs index ea8bcbbf2..8d4387e04 100644 --- a/networks/monza/monza-full-node/src/lib.rs +++ b/networks/monza/monza-full-node/src/lib.rs @@ -4,20 +4,20 @@ pub mod partial; pub mod tests; pub trait MonzaFullNode { - /// Runs the services until crash or shutdown. - async fn run_services(&self) -> Result<(), anyhow::Error>; + /// Runs the services until crash or shutdown. + async fn run_services(&self) -> Result<(), anyhow::Error>; - /// Runs the background tasks until crash or shutdown. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; + /// Runs the background tasks until crash or shutdown. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; - /// Runs the executor until crash or shutdown. - async fn run_executor(&self) -> Result<(), anyhow::Error>; + /// Runs the executor until crash or shutdown. + async fn run_executor(&self) -> Result<(), anyhow::Error>; - /// Runs the full node until crash or shutdown. - async fn run(&self) -> Result<(), anyhow::Error> { - // run services and executor concurrently - tokio::try_join!(self.run_background_tasks(), self.run_services(), self.run_executor())?; + /// Runs the full node until crash or shutdown. + async fn run(&self) -> Result<(), anyhow::Error> { + // run services and executor concurrently + tokio::try_join!(self.run_background_tasks(), self.run_services(), self.run_executor())?; - Ok(()) - } + Ok(()) + } } diff --git a/networks/monza/monza-full-node/src/main.rs b/networks/monza/monza-full-node/src/main.rs index 854c0296f..db855896c 100644 --- a/networks/monza/monza-full-node/src/main.rs +++ b/networks/monza/monza-full-node/src/main.rs @@ -3,21 +3,22 @@ use monza_full_node::{partial::MonzaPartialNode, MonzaFullNode}; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { - #[cfg(feature = "logging")] - { - use tracing_subscriber::EnvFilter; + #[cfg(feature = "logging")] + { + use tracing_subscriber::EnvFilter; - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), - ) - .init(); - } + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + ) + .init(); + } - let executor = - MonzaPartialNode::try_from_env().await.context("Failed to create the executor")?; + let executor = MonzaPartialNode::try_from_env() + .await + .context("Failed to create the executor")?; - executor.run().await.context("Failed to run the executor")?; + executor.run().await.context("Failed to run the executor")?; - Ok(()) + Ok(()) } diff --git a/networks/monza/monza-full-node/src/partial.rs b/networks/monza/monza-full-node/src/partial.rs index 9c98b517c..7fbc0b69c 100644 --- a/networks/monza/monza-full-node/src/partial.rs +++ b/networks/monza/monza-full-node/src/partial.rs @@ -8,8 +8,8 @@ use tokio_stream::StreamExt; use tracing::debug; use monza_executor::{ - v1::MonzaExecutorV1, ExecutableBlock, ExecutableTransactions, HashValue, MonzaExecutor, - SignatureVerifiedTransaction, SignedTransaction, Transaction, + v1::MonzaExecutorV1, ExecutableBlock, ExecutableTransactions, HashValue, MonzaExecutor, + SignatureVerifiedTransaction, SignedTransaction, Transaction, }; use movement_types::Block; // FIXME: glob imports are bad style @@ -18,191 +18,191 @@ use m1_da_light_node_client::*; #[derive(Clone)] pub struct MonzaPartialNode { - executor: T, - transaction_sender: Sender, - pub transaction_receiver: Receiver, - light_node_client: Arc>>, + executor: T, + transaction_sender: Sender, + pub transaction_receiver: Receiver, + light_node_client: Arc>>, } impl MonzaPartialNode { - pub fn new( - executor: T, - light_node_client: LightNodeServiceClient, - ) -> Self { - let (transaction_sender, transaction_receiver) = async_channel::unbounded(); - Self { - executor: executor, - transaction_sender, - transaction_receiver, - light_node_client: Arc::new(RwLock::new(light_node_client)), - } - } - - fn bind_transaction_channel(&mut self) { - self.executor.set_tx_channel(self.transaction_sender.clone()); - } - - pub fn bound( - executor: T, - light_node_client: LightNodeServiceClient, - ) -> Result { - let mut node = Self::new(executor, light_node_client); - node.bind_transaction_channel(); - Ok(node) - } - - pub async fn tick_write_transactions_to_da(&self) -> Result<(), anyhow::Error> { - // limit the total time batching transactions - let start_time = std::time::Instant::now(); - let end_time = start_time + std::time::Duration::from_millis(100); - - let mut transactions = Vec::new(); - - while let Ok(transaction_result) = - tokio::time::timeout(Duration::from_millis(100), self.transaction_receiver.recv()).await - { - match transaction_result { - Ok(transaction) => { - debug!("Got transaction: {:?}", transaction); - - let serialized_transaction = serde_json::to_vec(&transaction)?; - transactions.push(BlobWrite { data: serialized_transaction }); - } - Err(_) => { - break; - } - } - - if std::time::Instant::now() > end_time { - break; - } - } - - if transactions.len() > 0 { - let client_ptr = self.light_node_client.clone(); - let mut light_node_client = client_ptr.write().await; - light_node_client.batch_write(BatchWriteRequest { blobs: transactions }).await?; - - tracing::debug!("Wrote transactions to DA"); - } - - Ok(()) - } - - pub async fn write_transactions_to_da(&self) -> Result<(), anyhow::Error> { - loop { - self.tick_write_transactions_to_da().await?; - } - } - - // receive transactions from the transaction channel and send them to be executed - // ! This assumes the m1 da light node is running sequencer mode - pub async fn read_blocks_from_da(&self) -> Result<(), anyhow::Error> { - let block_head_height = self.executor.get_block_head_height().await?; - - let mut stream = { - let client_ptr = self.light_node_client.clone(); - let mut light_node_client = client_ptr.write().await; - light_node_client - .stream_read_from_height(StreamReadFromHeightRequest { height: block_head_height }) - .await? - } - .into_inner(); - - while let Some(blob) = stream.next().await { - debug!("Got blob: {:?}", blob); - - // get the block - let (block_bytes, block_timestamp, block_id) = match blob? - .blob - .ok_or(anyhow::anyhow!("No blob in response"))? - .blob_type - .ok_or(anyhow::anyhow!("No blob type in response"))? - { - blob_response::BlobType::SequencedBlobBlock(blob) => { - (blob.data, blob.timestamp, blob.blob_id) - } - _ => { - anyhow::bail!("Invalid blob type in response") - } - }; - - // get the block - let block: Block = serde_json::from_slice(&block_bytes)?; - - debug!("Got block: {:?}", block); - - // get the transactions - let mut block_transactions = Vec::new(); - let block_metadata = self - .executor - .build_block_metadata(HashValue::sha3_256_of(block_id.as_bytes()), block_timestamp) - .await?; - let block_metadata_transaction = - SignatureVerifiedTransaction::Valid(Transaction::BlockMetadata(block_metadata)); - block_transactions.push(block_metadata_transaction); - - for transaction in block.transactions { - let signed_transaction: SignedTransaction = serde_json::from_slice(&transaction.0)?; - let signature_verified_transaction = SignatureVerifiedTransaction::Valid( - Transaction::UserTransaction(signed_transaction), - ); - block_transactions.push(signature_verified_transaction); - } - - // form the executable transactions vec - let block = ExecutableTransactions::Unsharded(block_transactions); - - // hash the block bytes - let mut hasher = sha2::Sha256::new(); - hasher.update(&block_bytes); - let slice = hasher.finalize(); - let block_hash = HashValue::from_slice(slice.as_slice())?; - - // form the executable block and execute it - let executable_block = ExecutableBlock::new(block_hash, block); - let block_id = executable_block.block_id; - self.executor.execute_block_opt(executable_block).await?; - - debug!("Executed block: {:?}", block_id); - } - - Ok(()) - } + pub fn new( + executor: T, + light_node_client: LightNodeServiceClient, + ) -> Self { + let (transaction_sender, transaction_receiver) = async_channel::unbounded(); + Self { + executor: executor, + transaction_sender, + transaction_receiver, + light_node_client: Arc::new(RwLock::new(light_node_client)), + } + } + + fn bind_transaction_channel(&mut self) { + self.executor.set_tx_channel(self.transaction_sender.clone()); + } + + pub fn bound( + executor: T, + light_node_client: LightNodeServiceClient, + ) -> Result { + let mut node = Self::new(executor, light_node_client); + node.bind_transaction_channel(); + Ok(node) + } + + pub async fn tick_write_transactions_to_da(&self) -> Result<(), anyhow::Error> { + // limit the total time batching transactions + let start_time = std::time::Instant::now(); + let end_time = start_time + std::time::Duration::from_millis(100); + + let mut transactions = Vec::new(); + + while let Ok(transaction_result) = + tokio::time::timeout(Duration::from_millis(100), self.transaction_receiver.recv()).await + { + match transaction_result { + Ok(transaction) => { + debug!("Got transaction: {:?}", transaction); + + let serialized_transaction = serde_json::to_vec(&transaction)?; + transactions.push(BlobWrite { data: serialized_transaction }); + } + Err(_) => { + break; + } + } + + if std::time::Instant::now() > end_time { + break; + } + } + + if transactions.len() > 0 { + let client_ptr = self.light_node_client.clone(); + let mut light_node_client = client_ptr.write().await; + light_node_client.batch_write(BatchWriteRequest { blobs: transactions }).await?; + + tracing::debug!("Wrote transactions to DA"); + } + + Ok(()) + } + + pub async fn write_transactions_to_da(&self) -> Result<(), anyhow::Error> { + loop { + self.tick_write_transactions_to_da().await?; + } + } + + // receive transactions from the transaction channel and send them to be executed + // ! This assumes the m1 da light node is running sequencer mode + pub async fn read_blocks_from_da(&self) -> Result<(), anyhow::Error> { + let block_head_height = self.executor.get_block_head_height().await?; + + let mut stream = { + let client_ptr = self.light_node_client.clone(); + let mut light_node_client = client_ptr.write().await; + light_node_client + .stream_read_from_height(StreamReadFromHeightRequest { height: block_head_height }) + .await? + } + .into_inner(); + + while let Some(blob) = stream.next().await { + debug!("Got blob: {:?}", blob); + + // get the block + let (block_bytes, block_timestamp, block_id) = match blob? + .blob + .ok_or(anyhow::anyhow!("No blob in response"))? + .blob_type + .ok_or(anyhow::anyhow!("No blob type in response"))? + { + blob_response::BlobType::SequencedBlobBlock(blob) => { + (blob.data, blob.timestamp, blob.blob_id) + } + _ => { + anyhow::bail!("Invalid blob type in response") + } + }; + + // get the block + let block: Block = serde_json::from_slice(&block_bytes)?; + + debug!("Got block: {:?}", block); + + // get the transactions + let mut block_transactions = Vec::new(); + let block_metadata = self + .executor + .build_block_metadata(HashValue::sha3_256_of(block_id.as_bytes()), block_timestamp) + .await?; + let block_metadata_transaction = + SignatureVerifiedTransaction::Valid(Transaction::BlockMetadata(block_metadata)); + block_transactions.push(block_metadata_transaction); + + for transaction in block.transactions { + let signed_transaction: SignedTransaction = serde_json::from_slice(&transaction.0)?; + let signature_verified_transaction = SignatureVerifiedTransaction::Valid( + Transaction::UserTransaction(signed_transaction), + ); + block_transactions.push(signature_verified_transaction); + } + + // form the executable transactions vec + let block = ExecutableTransactions::Unsharded(block_transactions); + + // hash the block bytes + let mut hasher = sha2::Sha256::new(); + hasher.update(&block_bytes); + let slice = hasher.finalize(); + let block_hash = HashValue::from_slice(slice.as_slice())?; + + // form the executable block and execute it + let executable_block = ExecutableBlock::new(block_hash, block); + let block_id = executable_block.block_id; + self.executor.execute_block_opt(executable_block).await?; + + debug!("Executed block: {:?}", block_id); + } + + Ok(()) + } } impl MonzaFullNode for MonzaPartialNode { - /// Runs the services until crash or shutdown. - async fn run_services(&self) -> Result<(), anyhow::Error> { - self.executor.run_service().await?; + /// Runs the services until crash or shutdown. + async fn run_services(&self) -> Result<(), anyhow::Error> { + self.executor.run_service().await?; - Ok(()) - } + Ok(()) + } - /// Runs the background tasks until crash or shutdown. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - self.executor.run_background_tasks().await?; + /// Runs the background tasks until crash or shutdown. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + self.executor.run_background_tasks().await?; - Ok(()) - } + Ok(()) + } - // ! Currently this only implements opt. - /// Runs the executor until crash or shutdown. - async fn run_executor(&self) -> Result<(), anyhow::Error> { - // wait for both tasks to finish - tokio::try_join!(self.write_transactions_to_da(), self.read_blocks_from_da())?; + // ! Currently this only implements opt. + /// Runs the executor until crash or shutdown. + async fn run_executor(&self) -> Result<(), anyhow::Error> { + // wait for both tasks to finish + tokio::try_join!(self.write_transactions_to_da(), self.read_blocks_from_da())?; - Ok(()) - } + Ok(()) + } } impl MonzaPartialNode { - pub async fn try_from_env() -> Result { - let (tx, _) = async_channel::unbounded(); - let light_node_client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; - let executor = MonzaExecutorV1::try_from_env(tx) - .await - .context("Failed to get executor from environment")?; - Self::bound(executor, light_node_client) - } + pub async fn try_from_env() -> Result { + let (tx, _) = async_channel::unbounded(); + let light_node_client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; + let executor = MonzaExecutorV1::try_from_env(tx) + .await + .context("Failed to get executor from environment")?; + Self::bound(executor, light_node_client) + } } diff --git a/networks/suzuka/suzuka-client/src/tests/mod.rs b/networks/suzuka/suzuka-client/src/tests/mod.rs index 1b87f78a3..42ad7c7e0 100644 --- a/networks/suzuka/suzuka-client/src/tests/mod.rs +++ b/networks/suzuka/suzuka-client/src/tests/mod.rs @@ -1,7 +1,7 @@ use crate::{ - coin_client::CoinClient, - rest_client::{Client, FaucetClient}, - types::LocalAccount, + coin_client::CoinClient, + rest_client::{Client, FaucetClient}, + types::LocalAccount, }; use anyhow::{Context, Result}; use once_cell::sync::Lazy; @@ -10,131 +10,134 @@ use suzuka_executor::SuzukaExecutorV1; use url::Url; static SUZUKA_CONFIG: Lazy = Lazy::new(|| { - maptos_execution_util::config::Config::try_from_env() - .context("Failed to create the config") - .unwrap() + maptos_execution_util::config::Config::try_from_env() + .context("Failed to create the config") + .unwrap() }); // :!:>section_1c static NODE_URL: Lazy = Lazy::new(|| { - Url::from_str( - format!("http://{}", SUZUKA_CONFIG.aptos_config.aptos_rest_listen_url.as_str()).as_str(), - ) - .unwrap() + Url::from_str( + format!("http://{}", SUZUKA_CONFIG.aptos_config.aptos_rest_listen_url.as_str()).as_str(), + ) + .unwrap() }); static FAUCET_URL: Lazy = Lazy::new(|| { - Url::from_str( - format!("http://{}", SUZUKA_CONFIG.aptos_config.aptos_faucet_listen_url.as_str()).as_str(), - ) - .unwrap() + Url::from_str( + format!("http://{}", SUZUKA_CONFIG.aptos_config.aptos_faucet_listen_url.as_str()).as_str(), + ) + .unwrap() }); // <:!:section_1c #[tokio::test] async fn test_example_interaction() -> Result<()> { - // :!:>section_1a - let rest_client = Client::new(NODE_URL.clone()); - let faucet_client = FaucetClient::new(FAUCET_URL.clone(), NODE_URL.clone()); // <:!:section_1a + // :!:>section_1a + let rest_client = Client::new(NODE_URL.clone()); + let faucet_client = FaucetClient::new(FAUCET_URL.clone(), NODE_URL.clone()); // <:!:section_1a - // :!:>section_1b - let coin_client = CoinClient::new(&rest_client); // <:!:section_1b + // :!:>section_1b + let coin_client = CoinClient::new(&rest_client); // <:!:section_1b - // Create two accounts locally, Alice and Bob. - // :!:>section_2 - let mut alice = LocalAccount::generate(&mut rand::rngs::OsRng); - let bob = LocalAccount::generate(&mut rand::rngs::OsRng); // <:!:section_2 + // Create two accounts locally, Alice and Bob. + // :!:>section_2 + let mut alice = LocalAccount::generate(&mut rand::rngs::OsRng); + let bob = LocalAccount::generate(&mut rand::rngs::OsRng); // <:!:section_2 - // Print account addresses. - println!("\n=== Addresses ==="); - println!("Alice: {}", alice.address().to_hex_literal()); - println!("Bob: {}", bob.address().to_hex_literal()); + // Print account addresses. + println!("\n=== Addresses ==="); + println!("Alice: {}", alice.address().to_hex_literal()); + println!("Bob: {}", bob.address().to_hex_literal()); - // Create the accounts on chain, but only fund Alice. - // :!:>section_3 - faucet_client - .fund(alice.address(), 100_000_000) - .await - .context("Failed to fund Alice's account")?; - faucet_client.create_account(bob.address()).await.context("Failed to fund Bob's account")?; // <:!:section_3 + // Create the accounts on chain, but only fund Alice. + // :!:>section_3 + faucet_client + .fund(alice.address(), 100_000_000) + .await + .context("Failed to fund Alice's account")?; + faucet_client + .create_account(bob.address()) + .await + .context("Failed to fund Bob's account")?; // <:!:section_3 - // Print initial balances. - println!("\n=== Initial Balances ==="); - println!( - "Alice: {:?}", - coin_client - .get_account_balance(&alice.address()) - .await - .context("Failed to get Alice's account balance")? - ); - println!( - "Bob: {:?}", - coin_client - .get_account_balance(&bob.address()) - .await - .context("Failed to get Bob's account balance")? - ); + // Print initial balances. + println!("\n=== Initial Balances ==="); + println!( + "Alice: {:?}", + coin_client + .get_account_balance(&alice.address()) + .await + .context("Failed to get Alice's account balance")? + ); + println!( + "Bob: {:?}", + coin_client + .get_account_balance(&bob.address()) + .await + .context("Failed to get Bob's account balance")? + ); - // Have Alice send Bob some coins. - let txn_hash = coin_client - .transfer(&mut alice, bob.address(), 1_000, None) - .await - .context("Failed to submit transaction to transfer coins")?; - rest_client - .wait_for_transaction(&txn_hash) - .await - .context("Failed when waiting for the transfer transaction")?; + // Have Alice send Bob some coins. + let txn_hash = coin_client + .transfer(&mut alice, bob.address(), 1_000, None) + .await + .context("Failed to submit transaction to transfer coins")?; + rest_client + .wait_for_transaction(&txn_hash) + .await + .context("Failed when waiting for the transfer transaction")?; - // Print intermediate balances. - println!("\n=== Intermediate Balances ==="); - // :!:>section_4 - println!( - "Alice: {:?}", - coin_client - .get_account_balance(&alice.address()) - .await - .context("Failed to get Alice's account balance the second time")? - ); - println!( - "Bob: {:?}", - coin_client - .get_account_balance(&bob.address()) - .await - .context("Failed to get Bob's account balance the second time")? - ); // <:!:section_4 + // Print intermediate balances. + println!("\n=== Intermediate Balances ==="); + // :!:>section_4 + println!( + "Alice: {:?}", + coin_client + .get_account_balance(&alice.address()) + .await + .context("Failed to get Alice's account balance the second time")? + ); + println!( + "Bob: {:?}", + coin_client + .get_account_balance(&bob.address()) + .await + .context("Failed to get Bob's account balance the second time")? + ); // <:!:section_4 - // Have Alice send Bob some more coins. - // :!:>section_5 - let txn_hash = coin_client - .transfer(&mut alice, bob.address(), 1_000, None) - .await - .context("Failed to submit transaction to transfer coins")?; // <:!:section_5 - // :!:>section_6 - rest_client - .wait_for_transaction(&txn_hash) - .await - .context("Failed when waiting for the transfer transaction")?; // <:!:section_6 + // Have Alice send Bob some more coins. + // :!:>section_5 + let txn_hash = coin_client + .transfer(&mut alice, bob.address(), 1_000, None) + .await + .context("Failed to submit transaction to transfer coins")?; // <:!:section_5 + // :!:>section_6 + rest_client + .wait_for_transaction(&txn_hash) + .await + .context("Failed when waiting for the transfer transaction")?; // <:!:section_6 - // Print final balances. - println!("\n=== Final Balances ==="); - println!( - "Alice: {:?}", - coin_client - .get_account_balance(&alice.address()) - .await - .context("Failed to get Alice's account balance the second time")? - ); - println!( - "Bob: {:?}", - coin_client - .get_account_balance(&bob.address()) - .await - .context("Failed to get Bob's account balance the second time")? - ); + // Print final balances. + println!("\n=== Final Balances ==="); + println!( + "Alice: {:?}", + coin_client + .get_account_balance(&alice.address()) + .await + .context("Failed to get Alice's account balance the second time")? + ); + println!( + "Bob: {:?}", + coin_client + .get_account_balance(&bob.address()) + .await + .context("Failed to get Bob's account balance the second time")? + ); - let (tx, rx) = async_channel::unbounded(); - let executor = SuzukaExecutorV1::try_from_env(tx).await?; - let apis = executor.get_apis().await?; + let (tx, rx) = async_channel::unbounded(); + let executor = SuzukaExecutorV1::try_from_env(tx).await?; + let apis = executor.get_apis().await?; - Ok(()) + Ok(()) } diff --git a/networks/suzuka/suzuka-config/src/bin/suzuka_config.rs b/networks/suzuka/suzuka-config/src/bin/suzuka_config.rs index 36e034fa8..b3765563a 100644 --- a/networks/suzuka/suzuka-config/src/bin/suzuka_config.rs +++ b/networks/suzuka/suzuka-config/src/bin/suzuka_config.rs @@ -2,9 +2,9 @@ use suzuka_config::Config; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { - // read any values from env, but populate the default values if they are not present - let config = Config::try_from_env()?; - // write the values to the env - print!("{}", config.write_bash_export_string()?); - Ok(()) + // read any values from env, but populate the default values if they are not present + let config = Config::try_from_env()?; + // write the values to the env + print!("{}", config.write_bash_export_string()?); + Ok(()) } diff --git a/networks/suzuka/suzuka-config/src/lib.rs b/networks/suzuka/suzuka-config/src/lib.rs index e70e254af..f772dc879 100644 --- a/networks/suzuka/suzuka-config/src/lib.rs +++ b/networks/suzuka/suzuka-config/src/lib.rs @@ -1,25 +1,25 @@ #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { - pub execution_config: maptos_execution_util::config::Config, + pub execution_config: maptos_execution_util::config::Config, } impl Config { - pub fn new(execution_config: maptos_execution_util::config::Config) -> Self { - Self { execution_config } - } + pub fn new(execution_config: maptos_execution_util::config::Config) -> Self { + Self { execution_config } + } - pub fn try_from_env() -> Result { - let execution_config = maptos_execution_util::config::Config::try_from_env()?; + pub fn try_from_env() -> Result { + let execution_config = maptos_execution_util::config::Config::try_from_env()?; - Ok(Self { execution_config }) - } + Ok(Self { execution_config }) + } - pub fn write_to_env(&self) -> Result<(), anyhow::Error> { - self.execution_config.write_to_env()?; - Ok(()) - } + pub fn write_to_env(&self) -> Result<(), anyhow::Error> { + self.execution_config.write_to_env()?; + Ok(()) + } - pub fn write_bash_export_string(&self) -> Result { - Ok(format!("{}", self.execution_config.write_bash_export_string()?)) - } + pub fn write_bash_export_string(&self) -> Result { + Ok(format!("{}", self.execution_config.write_bash_export_string()?)) + } } diff --git a/networks/suzuka/suzuka-full-node/src/lib.rs b/networks/suzuka/suzuka-full-node/src/lib.rs index 190e5d3af..48525cc33 100644 --- a/networks/suzuka/suzuka-full-node/src/lib.rs +++ b/networks/suzuka/suzuka-full-node/src/lib.rs @@ -4,23 +4,23 @@ pub mod partial; pub mod tests; pub trait SuzukaFullNode { - /// Runs the services until crash or shutdown. - async fn run_services(&self) -> Result<(), anyhow::Error>; + /// Runs the services until crash or shutdown. + async fn run_services(&self) -> Result<(), anyhow::Error>; - /// Runs the background tasks until crash or shutdown. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; + /// Runs the background tasks until crash or shutdown. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; - /// Runs the executor until crash or shutdown. - async fn run_executor(&self) -> Result<(), anyhow::Error>; + /// Runs the executor until crash or shutdown. + async fn run_executor(&self) -> Result<(), anyhow::Error>; - /// Runs the maptos rest apit service until crash or shutdown. - async fn run_maptos_rest(&self) -> Result<(), anyhow::Error>; + /// Runs the maptos rest apit service until crash or shutdown. + async fn run_maptos_rest(&self) -> Result<(), anyhow::Error>; - /// Runs the full node until crash or shutdown. - async fn run(&self) -> Result<(), anyhow::Error> { - // run services and executor concurrently - tokio::try_join!(self.run_background_tasks(), self.run_services(), self.run_executor())?; + /// Runs the full node until crash or shutdown. + async fn run(&self) -> Result<(), anyhow::Error> { + // run services and executor concurrently + tokio::try_join!(self.run_background_tasks(), self.run_services(), self.run_executor())?; - Ok(()) - } + Ok(()) + } } diff --git a/networks/suzuka/suzuka-full-node/src/main.rs b/networks/suzuka/suzuka-full-node/src/main.rs index 9c94f368f..fc5e858af 100644 --- a/networks/suzuka/suzuka-full-node/src/main.rs +++ b/networks/suzuka/suzuka-full-node/src/main.rs @@ -3,23 +3,24 @@ use suzuka_full_node::{partial::SuzukaPartialNode, SuzukaFullNode}; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { - #[cfg(feature = "logging")] - { - use tracing_subscriber::EnvFilter; + #[cfg(feature = "logging")] + { + use tracing_subscriber::EnvFilter; - tracing_subscriber::fmt() - .with_env_filter( - EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), - ) - .init(); - } + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + ) + .init(); + } - let (executor, background_task) = - SuzukaPartialNode::try_from_env().await.context("Failed to create the executor")?; + let (executor, background_task) = SuzukaPartialNode::try_from_env() + .await + .context("Failed to create the executor")?; - tokio::spawn(background_task); + tokio::spawn(background_task); - executor.run().await.context("Failed to run the executor")?; + executor.run().await.context("Failed to run the executor")?; - Ok(()) + Ok(()) } diff --git a/networks/suzuka/suzuka-full-node/src/partial.rs b/networks/suzuka/suzuka-full-node/src/partial.rs index de965a96e..136a256dd 100644 --- a/networks/suzuka/suzuka-full-node/src/partial.rs +++ b/networks/suzuka/suzuka-full-node/src/partial.rs @@ -1,17 +1,17 @@ use crate::SuzukaFullNode; use m1_da_light_node_client::{ - blob_response, BatchWriteRequest, BlobWrite, LightNodeServiceClient, - StreamReadFromHeightRequest, + blob_response, BatchWriteRequest, BlobWrite, LightNodeServiceClient, + StreamReadFromHeightRequest, }; use maptos_rest::MaptosRest; use mcr_settlement_client::{mock::MockMcrSettlementClient, McrSettlementClientOperations}; use mcr_settlement_manager::{ - CommitmentEventStream, McrSettlementManager, McrSettlementManagerOperations, + CommitmentEventStream, McrSettlementManager, McrSettlementManagerOperations, }; use movement_types::{Block, BlockCommitmentEvent}; use suzuka_executor::{ - v1::SuzukaExecutorV1, ExecutableBlock, ExecutableTransactions, HashValue, - SignatureVerifiedTransaction, SignedTransaction, SuzukaExecutor, Transaction, + v1::SuzukaExecutorV1, ExecutableBlock, ExecutableTransactions, HashValue, + SignatureVerifiedTransaction, SignedTransaction, SuzukaExecutor, Transaction, }; use anyhow::Context; @@ -26,242 +26,242 @@ use std::sync::Arc; use std::time::Duration; pub struct SuzukaPartialNode { - executor: T, - transaction_sender: Sender, - pub transaction_receiver: Receiver, - light_node_client: Arc>>, - settlement_manager: McrSettlementManager, - maptos_rest: MaptosRest, + executor: T, + transaction_sender: Sender, + pub transaction_receiver: Receiver, + light_node_client: Arc>>, + settlement_manager: McrSettlementManager, + maptos_rest: MaptosRest, } impl SuzukaPartialNode where - T: SuzukaExecutor + Send + Sync, + T: SuzukaExecutor + Send + Sync, { - pub fn new( - executor: T, - light_node_client: LightNodeServiceClient, - settlement_client: C, - maptos_rest: MaptosRest, - ) -> (Self, impl Future> + Send) - where - C: McrSettlementClientOperations + Send + 'static, - { - let (settlement_manager, commitment_events) = McrSettlementManager::new(settlement_client); - let (transaction_sender, transaction_receiver) = async_channel::unbounded(); - ( - Self { - executor, - transaction_sender, - transaction_receiver, - light_node_client: Arc::new(RwLock::new(light_node_client)), - settlement_manager, - maptos_rest, - }, - read_commitment_events(commitment_events), - ) - } - - fn bind_transaction_channel(&mut self) { - self.executor.set_tx_channel(self.transaction_sender.clone()); - } - - pub fn bound( - executor: T, - light_node_client: LightNodeServiceClient, - settlement_client: C, - maptos_rest: MaptosRest, - ) -> Result<(Self, impl Future> + Send), anyhow::Error> - where - C: McrSettlementClientOperations + Send + 'static, - { - let (mut node, background_task) = - Self::new(executor, light_node_client, settlement_client, maptos_rest); - node.bind_transaction_channel(); - Ok((node, background_task)) - } - - pub async fn tick_write_transactions_to_da(&self) -> Result<(), anyhow::Error> { - // limit the total time batching transactions - let start_time = std::time::Instant::now(); - let end_time = start_time + std::time::Duration::from_millis(100); - - let mut transactions = Vec::new(); - - while let Ok(transaction_result) = - tokio::time::timeout(Duration::from_millis(100), self.transaction_receiver.recv()).await - { - match transaction_result { - Ok(transaction) => { - debug!("Got transaction: {:?}", transaction); - - let serialized_transaction = serde_json::to_vec(&transaction)?; - transactions.push(BlobWrite { data: serialized_transaction }); - } - Err(_) => { - break; - } - } - - if std::time::Instant::now() > end_time { - break; - } - } - - if transactions.len() > 0 { - let client_ptr = self.light_node_client.clone(); - let mut light_node_client = client_ptr.write().await; - light_node_client.batch_write(BatchWriteRequest { blobs: transactions }).await?; - - debug!("Wrote transactions to DA"); - } - - Ok(()) - } - - pub async fn write_transactions_to_da(&self) -> Result<(), anyhow::Error> { - loop { - self.tick_write_transactions_to_da().await?; - } - } - - // receive transactions from the transaction channel and send them to be executed - // ! This assumes the m1 da light node is running sequencer mode - pub async fn read_blocks_from_da(&self) -> Result<(), anyhow::Error> { - let block_head_height = self.executor.get_block_head_height().await?; - - let mut stream = { - let client_ptr = self.light_node_client.clone(); - let mut light_node_client = client_ptr.write().await; - light_node_client - .stream_read_from_height(StreamReadFromHeightRequest { height: block_head_height }) - .await? - } - .into_inner(); - - while let Some(blob) = stream.next().await { - debug!("Got blob: {:?}", blob); - - // get the block - let (block_bytes, block_timestamp, block_id) = match blob? - .blob - .ok_or(anyhow::anyhow!("No blob in response"))? - .blob_type - .ok_or(anyhow::anyhow!("No blob type in response"))? - { - blob_response::BlobType::SequencedBlobBlock(blob) => { - (blob.data, blob.timestamp, blob.blob_id) - } - _ => { - anyhow::bail!("Invalid blob type in response") - } - }; - - let block: Block = serde_json::from_slice(&block_bytes)?; - - debug!("Got block: {:?}", block); - - // get the transactions - let mut block_transactions = Vec::new(); - let block_metadata = self - .executor - .build_block_metadata(HashValue::sha3_256_of(block_id.as_bytes()), block_timestamp) - .await?; - let block_metadata_transaction = - SignatureVerifiedTransaction::Valid(Transaction::BlockMetadata(block_metadata)); - block_transactions.push(block_metadata_transaction); - - for transaction in block.transactions { - let signed_transaction: SignedTransaction = serde_json::from_slice(&transaction.0)?; - let signature_verified_transaction = SignatureVerifiedTransaction::Valid( - Transaction::UserTransaction(signed_transaction), - ); - block_transactions.push(signature_verified_transaction); - } - - // form the executable transactions vec - let block = ExecutableTransactions::Unsharded(block_transactions); - - // hash the block bytes - let mut hasher = sha2::Sha256::new(); - hasher.update(&block_bytes); - let slice = hasher.finalize(); - let block_hash = HashValue::from_slice(slice.as_slice())?; - - // form the executable block and execute it - let executable_block = ExecutableBlock::new(block_hash, block); - let block_id = executable_block.block_id; - let commitment = self.executor.execute_block_opt(executable_block).await?; - - debug!("Executed block: {:?}", block_id); - - self.settlement_manager.post_block_commitment(commitment).await?; - } - - Ok(()) - } + pub fn new( + executor: T, + light_node_client: LightNodeServiceClient, + settlement_client: C, + maptos_rest: MaptosRest, + ) -> (Self, impl Future> + Send) + where + C: McrSettlementClientOperations + Send + 'static, + { + let (settlement_manager, commitment_events) = McrSettlementManager::new(settlement_client); + let (transaction_sender, transaction_receiver) = async_channel::unbounded(); + ( + Self { + executor, + transaction_sender, + transaction_receiver, + light_node_client: Arc::new(RwLock::new(light_node_client)), + settlement_manager, + maptos_rest, + }, + read_commitment_events(commitment_events), + ) + } + + fn bind_transaction_channel(&mut self) { + self.executor.set_tx_channel(self.transaction_sender.clone()); + } + + pub fn bound( + executor: T, + light_node_client: LightNodeServiceClient, + settlement_client: C, + maptos_rest: MaptosRest, + ) -> Result<(Self, impl Future> + Send), anyhow::Error> + where + C: McrSettlementClientOperations + Send + 'static, + { + let (mut node, background_task) = + Self::new(executor, light_node_client, settlement_client, maptos_rest); + node.bind_transaction_channel(); + Ok((node, background_task)) + } + + pub async fn tick_write_transactions_to_da(&self) -> Result<(), anyhow::Error> { + // limit the total time batching transactions + let start_time = std::time::Instant::now(); + let end_time = start_time + std::time::Duration::from_millis(100); + + let mut transactions = Vec::new(); + + while let Ok(transaction_result) = + tokio::time::timeout(Duration::from_millis(100), self.transaction_receiver.recv()).await + { + match transaction_result { + Ok(transaction) => { + debug!("Got transaction: {:?}", transaction); + + let serialized_transaction = serde_json::to_vec(&transaction)?; + transactions.push(BlobWrite { data: serialized_transaction }); + } + Err(_) => { + break; + } + } + + if std::time::Instant::now() > end_time { + break; + } + } + + if transactions.len() > 0 { + let client_ptr = self.light_node_client.clone(); + let mut light_node_client = client_ptr.write().await; + light_node_client.batch_write(BatchWriteRequest { blobs: transactions }).await?; + + debug!("Wrote transactions to DA"); + } + + Ok(()) + } + + pub async fn write_transactions_to_da(&self) -> Result<(), anyhow::Error> { + loop { + self.tick_write_transactions_to_da().await?; + } + } + + // receive transactions from the transaction channel and send them to be executed + // ! This assumes the m1 da light node is running sequencer mode + pub async fn read_blocks_from_da(&self) -> Result<(), anyhow::Error> { + let block_head_height = self.executor.get_block_head_height().await?; + + let mut stream = { + let client_ptr = self.light_node_client.clone(); + let mut light_node_client = client_ptr.write().await; + light_node_client + .stream_read_from_height(StreamReadFromHeightRequest { height: block_head_height }) + .await? + } + .into_inner(); + + while let Some(blob) = stream.next().await { + debug!("Got blob: {:?}", blob); + + // get the block + let (block_bytes, block_timestamp, block_id) = match blob? + .blob + .ok_or(anyhow::anyhow!("No blob in response"))? + .blob_type + .ok_or(anyhow::anyhow!("No blob type in response"))? + { + blob_response::BlobType::SequencedBlobBlock(blob) => { + (blob.data, blob.timestamp, blob.blob_id) + } + _ => { + anyhow::bail!("Invalid blob type in response") + } + }; + + let block: Block = serde_json::from_slice(&block_bytes)?; + + debug!("Got block: {:?}", block); + + // get the transactions + let mut block_transactions = Vec::new(); + let block_metadata = self + .executor + .build_block_metadata(HashValue::sha3_256_of(block_id.as_bytes()), block_timestamp) + .await?; + let block_metadata_transaction = + SignatureVerifiedTransaction::Valid(Transaction::BlockMetadata(block_metadata)); + block_transactions.push(block_metadata_transaction); + + for transaction in block.transactions { + let signed_transaction: SignedTransaction = serde_json::from_slice(&transaction.0)?; + let signature_verified_transaction = SignatureVerifiedTransaction::Valid( + Transaction::UserTransaction(signed_transaction), + ); + block_transactions.push(signature_verified_transaction); + } + + // form the executable transactions vec + let block = ExecutableTransactions::Unsharded(block_transactions); + + // hash the block bytes + let mut hasher = sha2::Sha256::new(); + hasher.update(&block_bytes); + let slice = hasher.finalize(); + let block_hash = HashValue::from_slice(slice.as_slice())?; + + // form the executable block and execute it + let executable_block = ExecutableBlock::new(block_hash, block); + let block_id = executable_block.block_id; + let commitment = self.executor.execute_block_opt(executable_block).await?; + + debug!("Executed block: {:?}", block_id); + + self.settlement_manager.post_block_commitment(commitment).await?; + } + + Ok(()) + } } async fn read_commitment_events(mut stream: CommitmentEventStream) -> anyhow::Result<()> { - while let Some(res) = stream.next().await { - let event = res?; - match event { - BlockCommitmentEvent::Accepted(commitment) => { - debug!("Commitment accepted: {:?}", commitment); - } - BlockCommitmentEvent::Rejected { height, reason } => { - debug!("Commitment rejected: {:?} {:?}", height, reason); - } - } - } - Ok(()) + while let Some(res) = stream.next().await { + let event = res?; + match event { + BlockCommitmentEvent::Accepted(commitment) => { + debug!("Commitment accepted: {:?}", commitment); + } + BlockCommitmentEvent::Rejected { height, reason } => { + debug!("Commitment rejected: {:?} {:?}", height, reason); + } + } + } + Ok(()) } impl SuzukaFullNode for SuzukaPartialNode where - T: SuzukaExecutor + Send + Sync, + T: SuzukaExecutor + Send + Sync, { - /// Runs the services until crash or shutdown. - async fn run_services(&self) -> Result<(), anyhow::Error> { - self.executor.run_service().await?; - - Ok(()) - } - - /// Runs the background tasks until crash or shutdown. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - self.executor.run_background_tasks().await?; - - Ok(()) - } - - // ! Currently this only implements opt. - /// Runs the executor until crash or shutdown. - async fn run_executor(&self) -> Result<(), anyhow::Error> { - // wait for both tasks to finish - tokio::try_join!(self.write_transactions_to_da(), self.read_blocks_from_da())?; - - Ok(()) - } - - /// Runs the maptos rest api service until crash or shutdown. - async fn run_maptos_rest(&self) -> Result<(), anyhow::Error> { - self.maptos_rest.run_service().await?; - Ok(()) - } + /// Runs the services until crash or shutdown. + async fn run_services(&self) -> Result<(), anyhow::Error> { + self.executor.run_service().await?; + + Ok(()) + } + + /// Runs the background tasks until crash or shutdown. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + self.executor.run_background_tasks().await?; + + Ok(()) + } + + // ! Currently this only implements opt. + /// Runs the executor until crash or shutdown. + async fn run_executor(&self) -> Result<(), anyhow::Error> { + // wait for both tasks to finish + tokio::try_join!(self.write_transactions_to_da(), self.read_blocks_from_da())?; + + Ok(()) + } + + /// Runs the maptos rest api service until crash or shutdown. + async fn run_maptos_rest(&self) -> Result<(), anyhow::Error> { + self.maptos_rest.run_service().await?; + Ok(()) + } } impl SuzukaPartialNode { - pub async fn try_from_env( - ) -> Result<(Self, impl Future> + Send), anyhow::Error> { - let (tx, _) = async_channel::unbounded(); - let light_node_client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; - let executor = SuzukaExecutorV1::try_from_env(tx) - .await - .context("Failed to get executor from environment")?; - // TODO: switch to real settlement client - let settlement_client = MockMcrSettlementClient::new(); - let maptos_rest = MaptosRest::try_from_env(executor.executor.context.clone())?; - Self::bound(executor, light_node_client, settlement_client, maptos_rest) - } + pub async fn try_from_env( + ) -> Result<(Self, impl Future> + Send), anyhow::Error> { + let (tx, _) = async_channel::unbounded(); + let light_node_client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; + let executor = SuzukaExecutorV1::try_from_env(tx) + .await + .context("Failed to get executor from environment")?; + // TODO: switch to real settlement client + let settlement_client = MockMcrSettlementClient::new(); + let maptos_rest = MaptosRest::try_from_env(executor.executor.context.clone())?; + Self::bound(executor, light_node_client, settlement_client, maptos_rest) + } } diff --git a/protocol-units/da/m1/light-node-client/src/test/e2e/raw/passthrough.rs b/protocol-units/da/m1/light-node-client/src/test/e2e/raw/passthrough.rs index fbbf98f16..598029892 100644 --- a/protocol-units/da/m1/light-node-client/src/test/e2e/raw/passthrough.rs +++ b/protocol-units/da/m1/light-node-client/src/test/e2e/raw/passthrough.rs @@ -5,71 +5,74 @@ use tokio_stream::StreamExt; #[tokio::test] async fn test_light_node_submits_blob_over_stream() -> Result<(), anyhow::Error> { - let mut client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; + let mut client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; - let blob_write = BlobWrite { data: vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9] }; - let request = StreamWriteBlobRequest { blob: Some(blob_write.clone()) }; + let blob_write = BlobWrite { data: vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9] }; + let request = StreamWriteBlobRequest { blob: Some(blob_write.clone()) }; - let (tx, rx) = tokio::sync::mpsc::channel(32); + let (tx, rx) = tokio::sync::mpsc::channel(32); - // Convert the receiver into a stream - let stream = ReceiverStream::new(rx); + // Convert the receiver into a stream + let stream = ReceiverStream::new(rx); - let handle = client.stream_write_blob(stream).await?; + let handle = client.stream_write_blob(stream).await?; - tx.send(request.clone()).await?; + tx.send(request.clone()).await?; - let back = - handle.into_inner().next().await.ok_or(anyhow::anyhow!("No response from server"))??; + let back = handle + .into_inner() + .next() + .await + .ok_or(anyhow::anyhow!("No response from server"))??; - match back.blob { - Some(blob) => match blob.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))? { - blob_response::BlobType::PassedThroughBlob(blob) => { - assert_eq!(blob.data, request.blob.unwrap().data); - } - _ => { - assert!(false, "Invalid blob type in response"); - } - }, - None => { - assert!(false, "No blob in response"); - } - } + match back.blob { + Some(blob) => match blob.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))? { + blob_response::BlobType::PassedThroughBlob(blob) => { + assert_eq!(blob.data, request.blob.unwrap().data); + } + _ => { + assert!(false, "Invalid blob type in response"); + } + }, + None => { + assert!(false, "No blob in response"); + } + } - Ok(()) + Ok(()) } #[tokio::test] async fn test_submit_and_read() -> Result<(), anyhow::Error> { - let mut client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; - - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let blob_write = BlobWrite { data: data.clone() }; - let request = BatchWriteRequest { blobs: vec![blob_write.clone()] }; - - let write = client.batch_write(request).await?.into_inner(); - let first = write.blobs[0].clone(); - - let blob_type = first.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))?; - let height = match blob_type { - blob_response::BlobType::PassedThroughBlob(blob) => blob.height, - _ => { - anyhow::bail!("Invalid blob type in response"); - } - }; - let read_request = ReadAtHeightRequest { height }; - - let read = client.read_at_height(read_request).await?.into_inner(); - let first = read.blobs[0].clone(); - - match first.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))? { - blob_response::BlobType::PassedThroughBlob(blob) => { - assert_eq!(blob.data, data); - } - _ => { - anyhow::bail!("Invalid blob type in response"); - } - } - - Ok(()) + let mut client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; + + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let blob_write = BlobWrite { data: data.clone() }; + let request = BatchWriteRequest { blobs: vec![blob_write.clone()] }; + + let write = client.batch_write(request).await?.into_inner(); + let first = write.blobs[0].clone(); + + let blob_type = first.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))?; + let height = match blob_type { + blob_response::BlobType::PassedThroughBlob(blob) => blob.height, + _ => { + anyhow::bail!("Invalid blob type in response"); + } + }; + let read_request = ReadAtHeightRequest { height }; + + let read = client.read_at_height(read_request).await?.into_inner(); + let first = read.blobs[0].clone(); + + match first.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))? { + blob_response::BlobType::PassedThroughBlob(blob) => { + assert_eq!(blob.data, data); + } + _ => { + anyhow::bail!("Invalid blob type in response"); + } + } + + Ok(()) } diff --git a/protocol-units/da/m1/light-node-client/src/test/e2e/raw/sequencer.rs b/protocol-units/da/m1/light-node-client/src/test/e2e/raw/sequencer.rs index 908d397ad..2529896b5 100644 --- a/protocol-units/da/m1/light-node-client/src/test/e2e/raw/sequencer.rs +++ b/protocol-units/da/m1/light-node-client/src/test/e2e/raw/sequencer.rs @@ -4,47 +4,50 @@ use tokio_stream::StreamExt; #[tokio::test] async fn test_light_node_submits_blob_over_stream() -> Result<(), anyhow::Error> { - let mut client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; - - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let blob_write = BlobWrite { data: data.clone() }; - let batch_write_request = BatchWriteRequest { blobs: vec![blob_write.clone()] }; - client.batch_write(batch_write_request).await?; - - let mut log_lines = Vec::new(); - - for _ in 0..16 { - let stream = client.stream_read_latest(StreamReadLatestRequest {}).await?; - - let back = - stream.into_inner().next().await.ok_or(anyhow::anyhow!("No response from server"))?; - - match back { - Ok(response) => match response.blob { - Some(blob) => { - match blob.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))? { - blob_response::BlobType::SequencedBlobBlock(blob) => { - let block = serde_json::from_slice::(&blob.data)?; - assert_eq!(block.transactions[0].0, data); - return Ok(()); - } - _ => { - assert!(false, "Invalid blob type in response"); - } - } - } - None => { - assert!(false, "No blob in response"); - } - }, - Err(e) => { - let log_line = format!("Error: {}", e); - log_lines.push(log_line); - } - } - } - - assert!(false, "No block fou in 16 attempts, log: {:?}", log_lines); - - Ok(()) + let mut client = LightNodeServiceClient::connect("http://0.0.0.0:30730").await?; + + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let blob_write = BlobWrite { data: data.clone() }; + let batch_write_request = BatchWriteRequest { blobs: vec![blob_write.clone()] }; + client.batch_write(batch_write_request).await?; + + let mut log_lines = Vec::new(); + + for _ in 0..16 { + let stream = client.stream_read_latest(StreamReadLatestRequest {}).await?; + + let back = stream + .into_inner() + .next() + .await + .ok_or(anyhow::anyhow!("No response from server"))?; + + match back { + Ok(response) => match response.blob { + Some(blob) => { + match blob.blob_type.ok_or(anyhow::anyhow!("No blob type in response"))? { + blob_response::BlobType::SequencedBlobBlock(blob) => { + let block = serde_json::from_slice::(&blob.data)?; + assert_eq!(block.transactions[0].0, data); + return Ok(()); + } + _ => { + assert!(false, "Invalid blob type in response"); + } + } + } + None => { + assert!(false, "No blob in response"); + } + }, + Err(e) => { + let log_line = format!("Error: {}", e); + log_lines.push(log_line); + } + } + } + + assert!(false, "No block fou in 16 attempts, log: {:?}", log_lines); + + Ok(()) } diff --git a/protocol-units/da/m1/light-node-grpc/src/lib.rs b/protocol-units/da/m1/light-node-grpc/src/lib.rs index 94a58b3fb..1769f1ff1 100644 --- a/protocol-units/da/m1/light-node-grpc/src/lib.rs +++ b/protocol-units/da/m1/light-node-grpc/src/lib.rs @@ -1,3 +1,3 @@ tonic::include_proto!("movementlabs.protocol_units.da.m1.light_node.v1beta1"); // The string specified here pub const FILE_DESCRIPTOR_SET: &[u8] = - tonic::include_file_descriptor_set!("m1-da-light-node-grpc-descriptor"); + tonic::include_file_descriptor_set!("m1-da-light-node-grpc-descriptor"); diff --git a/protocol-units/da/m1/light-node-verifier/src/lib.rs b/protocol-units/da/m1/light-node-verifier/src/lib.rs index dbf5e6079..212d4975d 100644 --- a/protocol-units/da/m1/light-node-verifier/src/lib.rs +++ b/protocol-units/da/m1/light-node-verifier/src/lib.rs @@ -4,41 +4,41 @@ pub use m1_da_light_node_grpc::*; #[tonic::async_trait] pub trait Verifier { - async fn verify( - &self, - verification_mode: VerificationMode, - blob: &[u8], - height: u64, - ) -> Result { - match verification_mode { - VerificationMode::Cowboy => self.verify_cowboy(verification_mode, blob, height).await, - VerificationMode::ValidatorIn => { - self.verifiy_validator_in(verification_mode, blob, height).await - } - VerificationMode::MOfN => self.verify_m_of_n(verification_mode, blob, height).await, - } - } + async fn verify( + &self, + verification_mode: VerificationMode, + blob: &[u8], + height: u64, + ) -> Result { + match verification_mode { + VerificationMode::Cowboy => self.verify_cowboy(verification_mode, blob, height).await, + VerificationMode::ValidatorIn => { + self.verifiy_validator_in(verification_mode, blob, height).await + } + VerificationMode::MOfN => self.verify_m_of_n(verification_mode, blob, height).await, + } + } - async fn verify_cowboy( - &self, - _verification_mode: VerificationMode, - _blob: &[u8], - _height: u64, - ) -> Result { - Ok(true) - } + async fn verify_cowboy( + &self, + _verification_mode: VerificationMode, + _blob: &[u8], + _height: u64, + ) -> Result { + Ok(true) + } - async fn verifiy_validator_in( - &self, - _verification_mode: VerificationMode, - _blob: &[u8], - _height: u64, - ) -> Result; + async fn verifiy_validator_in( + &self, + _verification_mode: VerificationMode, + _blob: &[u8], + _height: u64, + ) -> Result; - async fn verify_m_of_n( - &self, - _verification_mode: VerificationMode, - _blob: &[u8], - _height: u64, - ) -> Result; + async fn verify_m_of_n( + &self, + _verification_mode: VerificationMode, + _blob: &[u8], + _height: u64, + ) -> Result; } diff --git a/protocol-units/da/m1/light-node-verifier/src/v1.rs b/protocol-units/da/m1/light-node-verifier/src/v1.rs index f50e7a7b3..cd6a24353 100644 --- a/protocol-units/da/m1/light-node-verifier/src/v1.rs +++ b/protocol-units/da/m1/light-node-verifier/src/v1.rs @@ -6,162 +6,162 @@ use std::sync::Arc; #[derive(Clone)] pub struct V1Verifier { - pub client: Arc, - pub namespace: Namespace, + pub client: Arc, + pub namespace: Namespace, } #[tonic::async_trait] impl Verifier for V1Verifier { - /// All verification is the same for now - async fn verify( - &self, - _verification_mode: VerificationMode, - blob: &[u8], - height: u64, - ) -> Result { - let celestia_blob = Blob::new(self.namespace.clone(), blob.to_vec())?; - - celestia_blob.validate()?; - - // wait for the header to be at the correct height - self.client.header_wait_for_height(height).await?; - - // get the root - let dah = self.client.header_get_by_height(height).await?.dah; - let root_hash = dah.row_root(0).ok_or(anyhow::anyhow!("No root hash found"))?; - - // get the proof - let proofs = self - .client - .blob_get_proof(height, self.namespace.clone(), celestia_blob.commitment) - .await?; - - // get the leaves - let leaves = celestia_blob.to_shares()?; - - // check if included - for proof in proofs.iter() { - proof - .verify_complete_namespace(&root_hash, &leaves, self.namespace.into()) - .map_err(|e| anyhow::anyhow!("Failed to verify proof: {:?}", e))?; - } - - Ok(true) - } - - async fn verify_cowboy( - &self, - _verification_mode: VerificationMode, - _blob: &[u8], - _height: u64, - ) -> Result { - unimplemented!() - } - - async fn verify_m_of_n( - &self, - _verification_mode: VerificationMode, - _blob: &[u8], - _height: u64, - ) -> Result { - unimplemented!() - } - - async fn verifiy_validator_in( - &self, - _verification_mode: VerificationMode, - _blob: &[u8], - _height: u64, - ) -> Result { - unimplemented!() - } + /// All verification is the same for now + async fn verify( + &self, + _verification_mode: VerificationMode, + blob: &[u8], + height: u64, + ) -> Result { + let celestia_blob = Blob::new(self.namespace.clone(), blob.to_vec())?; + + celestia_blob.validate()?; + + // wait for the header to be at the correct height + self.client.header_wait_for_height(height).await?; + + // get the root + let dah = self.client.header_get_by_height(height).await?.dah; + let root_hash = dah.row_root(0).ok_or(anyhow::anyhow!("No root hash found"))?; + + // get the proof + let proofs = self + .client + .blob_get_proof(height, self.namespace.clone(), celestia_blob.commitment) + .await?; + + // get the leaves + let leaves = celestia_blob.to_shares()?; + + // check if included + for proof in proofs.iter() { + proof + .verify_complete_namespace(&root_hash, &leaves, self.namespace.into()) + .map_err(|e| anyhow::anyhow!("Failed to verify proof: {:?}", e))?; + } + + Ok(true) + } + + async fn verify_cowboy( + &self, + _verification_mode: VerificationMode, + _blob: &[u8], + _height: u64, + ) -> Result { + unimplemented!() + } + + async fn verify_m_of_n( + &self, + _verification_mode: VerificationMode, + _blob: &[u8], + _height: u64, + ) -> Result { + unimplemented!() + } + + async fn verifiy_validator_in( + &self, + _verification_mode: VerificationMode, + _blob: &[u8], + _height: u64, + ) -> Result { + unimplemented!() + } } #[cfg(test)] pub mod test { - use super::*; - use celestia_types::blob::GasPrice; - use m1_da_light_node_util::Config; + use super::*; + use celestia_types::blob::GasPrice; + use m1_da_light_node_util::Config; - /// todo: Investigate why this test sporadically fails. - #[tokio::test] - pub async fn test_valid_verifies() -> Result<(), anyhow::Error> { - let config = Config::try_from_env()?; - let client = Arc::new(config.connect_celestia().await?); + /// todo: Investigate why this test sporadically fails. + #[tokio::test] + pub async fn test_valid_verifies() -> Result<(), anyhow::Error> { + let config = Config::try_from_env()?; + let client = Arc::new(config.connect_celestia().await?); - let verifier = - V1Verifier { client: client.clone(), namespace: config.celestia_namespace.clone() }; + let verifier = + V1Verifier { client: client.clone(), namespace: config.celestia_namespace.clone() }; - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let blob = Blob::new(config.celestia_namespace.clone(), data.clone())?; + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let blob = Blob::new(config.celestia_namespace.clone(), data.clone())?; - let height = client.blob_submit(&[blob], GasPrice::default()).await?; + let height = client.blob_submit(&[blob], GasPrice::default()).await?; - let included = verifier.verify(VerificationMode::Cowboy, &data, height).await?; + let included = verifier.verify(VerificationMode::Cowboy, &data, height).await?; - assert!(included); + assert!(included); - Ok(()) - } + Ok(()) + } - #[tokio::test] - pub async fn test_absent_does_not_verify() -> Result<(), anyhow::Error> { - let config = Config::try_from_env()?; - let client = Arc::new(config.connect_celestia().await?); + #[tokio::test] + pub async fn test_absent_does_not_verify() -> Result<(), anyhow::Error> { + let config = Config::try_from_env()?; + let client = Arc::new(config.connect_celestia().await?); - let verifier = - V1Verifier { client: client.clone(), namespace: config.celestia_namespace.clone() }; + let verifier = + V1Verifier { client: client.clone(), namespace: config.celestia_namespace.clone() }; - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let blob = Blob::new(config.celestia_namespace.clone(), data.clone())?; + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let blob = Blob::new(config.celestia_namespace.clone(), data.clone())?; - let height = client.blob_submit(&[blob], GasPrice::default()).await?; + let height = client.blob_submit(&[blob], GasPrice::default()).await?; - let included = verifier.verify(VerificationMode::Cowboy, &data, height).await?; + let included = verifier.verify(VerificationMode::Cowboy, &data, height).await?; - assert!(included); + assert!(included); - let absent_data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 7]; + let absent_data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 7]; - let absent_included = verifier.verify(VerificationMode::Cowboy, &absent_data, height).await; + let absent_included = verifier.verify(VerificationMode::Cowboy, &absent_data, height).await; - match absent_included { - Ok(_) => { - assert!(false, "Should not have verified") - } - Err(_) => {} - } + match absent_included { + Ok(_) => { + assert!(false, "Should not have verified") + } + Err(_) => {} + } - Ok(()) - } + Ok(()) + } - #[tokio::test] - pub async fn test_wrong_height_does_not_verify() -> Result<(), anyhow::Error> { - let config = Config::try_from_env()?; - let client = Arc::new(config.connect_celestia().await?); + #[tokio::test] + pub async fn test_wrong_height_does_not_verify() -> Result<(), anyhow::Error> { + let config = Config::try_from_env()?; + let client = Arc::new(config.connect_celestia().await?); - let verifier = - V1Verifier { client: client.clone(), namespace: config.celestia_namespace.clone() }; + let verifier = + V1Verifier { client: client.clone(), namespace: config.celestia_namespace.clone() }; - let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - let blob = Blob::new(config.celestia_namespace.clone(), data.clone())?; + let data = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let blob = Blob::new(config.celestia_namespace.clone(), data.clone())?; - let height = client.blob_submit(&[blob], GasPrice::default()).await?; + let height = client.blob_submit(&[blob], GasPrice::default()).await?; - let included = verifier.verify(VerificationMode::Cowboy, &data, height).await?; + let included = verifier.verify(VerificationMode::Cowboy, &data, height).await?; - assert!(included); + assert!(included); - let wrong_height_included = - verifier.verify(VerificationMode::Cowboy, &data, height + 1).await; + let wrong_height_included = + verifier.verify(VerificationMode::Cowboy, &data, height + 1).await; - match wrong_height_included { - Ok(_) => { - assert!(false, "Should not have verified") - } - Err(_) => {} - } + match wrong_height_included { + Ok(_) => { + assert!(false, "Should not have verified") + } + Err(_) => {} + } - Ok(()) - } + Ok(()) + } } diff --git a/protocol-units/da/m1/light-node/src/main.rs b/protocol-units/da/m1/light-node/src/main.rs index d6796a501..279c9f87b 100644 --- a/protocol-units/da/m1/light-node/src/main.rs +++ b/protocol-units/da/m1/light-node/src/main.rs @@ -2,10 +2,10 @@ use m1_da_light_node::v1::{LightNodeV1, LightNodeV1Operations}; #[tokio::main] async fn main() -> Result<(), Box> { - // TODO: set up tracing-subscriber if the "logging" feature is enabled + // TODO: set up tracing-subscriber if the "logging" feature is enabled - let light_node = LightNodeV1::try_from_env().await?; - light_node.run().await?; + let light_node = LightNodeV1::try_from_env().await?; + light_node.run().await?; - Ok(()) + Ok(()) } diff --git a/protocol-units/da/m1/light-node/src/v1/dynamic.rs b/protocol-units/da/m1/light-node/src/v1/dynamic.rs index c93bd5fe1..2cadc46b3 100644 --- a/protocol-units/da/m1/light-node/src/v1/dynamic.rs +++ b/protocol-units/da/m1/light-node/src/v1/dynamic.rs @@ -5,131 +5,130 @@ use tokio_stream::Stream; #[derive(Clone)] pub enum LightNodeV1 { - PassThrough(passthrough::LightNodeV1), - Sequencer(sequencer::LightNodeV1), + PassThrough(passthrough::LightNodeV1), + Sequencer(sequencer::LightNodeV1), } impl LightNodeV1Operations for LightNodeV1 { - async fn try_from_env() -> Result { - let which = - std::env::var("M1_DA_LIGHT_NODE_MODE").unwrap_or_else(|_| "passthrough".to_string()); + async fn try_from_env() -> Result { + let which = + std::env::var("M1_DA_LIGHT_NODE_MODE").unwrap_or_else(|_| "passthrough".to_string()); - match which.as_str() { - "passthrough" => Ok(Self::PassThrough(passthrough::LightNodeV1::try_from_env().await?)), - "sequencer" => Ok(Self::Sequencer(sequencer::LightNodeV1::try_from_env().await?)), - _ => Err(anyhow::anyhow!("Unknown mode: {}", which)), - } - } + match which.as_str() { + "passthrough" => Ok(Self::PassThrough(passthrough::LightNodeV1::try_from_env().await?)), + "sequencer" => Ok(Self::Sequencer(sequencer::LightNodeV1::try_from_env().await?)), + _ => Err(anyhow::anyhow!("Unknown mode: {}", which)), + } + } - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - match self { - Self::PassThrough(pass_through) => pass_through.run_background_tasks().await, - Self::Sequencer(sequencer) => sequencer.run_background_tasks().await, - } - } + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + match self { + Self::PassThrough(pass_through) => pass_through.run_background_tasks().await, + Self::Sequencer(sequencer) => sequencer.run_background_tasks().await, + } + } } impl LightNodeV1 { - pub async fn try_from_env() -> Result { - let which = - std::env::var("M1_DA_LIGHT_NODE_MODE").unwrap_or_else(|_| "passthrough".to_string()); + pub async fn try_from_env() -> Result { + let which = + std::env::var("M1_DA_LIGHT_NODE_MODE").unwrap_or_else(|_| "passthrough".to_string()); - match which.as_str() { - "passthrough" => Ok(Self::PassThrough(passthrough::LightNodeV1::try_from_env().await?)), - "sequencer" => Ok(Self::Sequencer(sequencer::LightNodeV1::try_from_env().await?)), - _ => Err(anyhow::anyhow!("Unknown mode: {}", which)), - } - } + match which.as_str() { + "passthrough" => Ok(Self::PassThrough(passthrough::LightNodeV1::try_from_env().await?)), + "sequencer" => Ok(Self::Sequencer(sequencer::LightNodeV1::try_from_env().await?)), + _ => Err(anyhow::anyhow!("Unknown mode: {}", which)), + } + } } #[tonic::async_trait] impl LightNodeService for LightNodeV1 { - /// Server streaming response type for the StreamReadFromHeight method. - type StreamReadFromHeightStream = std::pin::Pin< - Box< - dyn Stream> + Send + 'static, - >, - >; + /// Server streaming response type for the StreamReadFromHeight method. + type StreamReadFromHeightStream = std::pin::Pin< + Box< + dyn Stream> + Send + 'static, + >, + >; - /// Stream blobs from a specified height or from the latest height. - async fn stream_read_from_height( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - match self { - Self::PassThrough(pass_through) => pass_through.stream_read_from_height(request).await, - Self::Sequencer(sequencer) => sequencer.stream_read_from_height(request).await, - } - } + /// Stream blobs from a specified height or from the latest height. + async fn stream_read_from_height( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + match self { + Self::PassThrough(pass_through) => pass_through.stream_read_from_height(request).await, + Self::Sequencer(sequencer) => sequencer.stream_read_from_height(request).await, + } + } - /// Server streaming response type for the StreamReadLatest method. - type StreamReadLatestStream = std::pin::Pin< - Box> + Send + 'static>, - >; + /// Server streaming response type for the StreamReadLatest method. + type StreamReadLatestStream = std::pin::Pin< + Box> + Send + 'static>, + >; - /// Stream the latest blobs. - async fn stream_read_latest( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - match self { - Self::PassThrough(pass_through) => pass_through.stream_read_latest(request).await, - Self::Sequencer(sequencer) => sequencer.stream_read_latest(request).await, - } - } - /// Server streaming response type for the StreamWriteCelestiaBlob method. - type StreamWriteBlobStream = std::pin::Pin< - Box> + Send + 'static>, - >; - /// Stream blobs out, either individually or in batches. - async fn stream_write_blob( - &self, - _request: tonic::Request>, - ) -> std::result::Result, tonic::Status> { - unimplemented!("StreamWriteBlob not implemented") - } - /// Read blobs at a specified height. - async fn read_at_height( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - match self { - Self::PassThrough(pass_through) => pass_through.read_at_height(request).await, - Self::Sequencer(sequencer) => sequencer.read_at_height(request).await, - } - } - /// Batch read and write operations for efficiency. - async fn batch_read( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - match self { - Self::PassThrough(pass_through) => pass_through.batch_read(request).await, - Self::Sequencer(sequencer) => sequencer.batch_read(request).await, - } - } + /// Stream the latest blobs. + async fn stream_read_latest( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + match self { + Self::PassThrough(pass_through) => pass_through.stream_read_latest(request).await, + Self::Sequencer(sequencer) => sequencer.stream_read_latest(request).await, + } + } + /// Server streaming response type for the StreamWriteCelestiaBlob method. + type StreamWriteBlobStream = std::pin::Pin< + Box> + Send + 'static>, + >; + /// Stream blobs out, either individually or in batches. + async fn stream_write_blob( + &self, + _request: tonic::Request>, + ) -> std::result::Result, tonic::Status> { + unimplemented!("StreamWriteBlob not implemented") + } + /// Read blobs at a specified height. + async fn read_at_height( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + match self { + Self::PassThrough(pass_through) => pass_through.read_at_height(request).await, + Self::Sequencer(sequencer) => sequencer.read_at_height(request).await, + } + } + /// Batch read and write operations for efficiency. + async fn batch_read( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + match self { + Self::PassThrough(pass_through) => pass_through.batch_read(request).await, + Self::Sequencer(sequencer) => sequencer.batch_read(request).await, + } + } - /// Batch write blobs. - async fn batch_write( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - match self { - Self::PassThrough(pass_through) => pass_through.batch_write(request).await, - Self::Sequencer(sequencer) => sequencer.batch_write(request).await, - } - } - /// Update and manage verification parameters. - async fn update_verification_parameters( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> - { - match self { - Self::PassThrough(pass_through) => { - pass_through.update_verification_parameters(request).await - } - Self::Sequencer(sequencer) => sequencer.update_verification_parameters(request).await, - } - } + /// Batch write blobs. + async fn batch_write( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + match self { + Self::PassThrough(pass_through) => pass_through.batch_write(request).await, + Self::Sequencer(sequencer) => sequencer.batch_write(request).await, + } + } + /// Update and manage verification parameters. + async fn update_verification_parameters( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + match self { + Self::PassThrough(pass_through) => { + pass_through.update_verification_parameters(request).await + } + Self::Sequencer(sequencer) => sequencer.update_verification_parameters(request).await, + } + } } diff --git a/protocol-units/da/m1/light-node/src/v1/mod.rs b/protocol-units/da/m1/light-node/src/v1/mod.rs index 4c363e58b..d196c88a2 100644 --- a/protocol-units/da/m1/light-node/src/v1/mod.rs +++ b/protocol-units/da/m1/light-node/src/v1/mod.rs @@ -17,44 +17,44 @@ use m1_da_light_node_grpc::light_node_service_server::{LightNodeService, LightNo use tonic::transport::Server; pub trait LightNodeV1Operations: LightNodeService + Send + Sync + Sized + Clone { - /// Initializes from environment variables. - async fn try_from_env() -> Result; - - /// Runs the background tasks. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; - - /// Runs the server - async fn run_server(&self) -> Result<(), anyhow::Error> { - let reflection = tonic_reflection::server::Builder::configure() - .register_encoded_file_descriptor_set(m1_da_light_node_grpc::FILE_DESCRIPTOR_SET) - .build()?; - - let env_addr = - std::env::var("M1_DA_LIGHT_NODE_ADDR").unwrap_or_else(|_| "0.0.0.0:30730".to_string()); - let addr = env_addr.parse()?; - - Server::builder() - .accept_http1(true) - .add_service(LightNodeServiceServer::new(self.clone())) - .add_service(reflection) - .serve(addr) - .await?; - - Ok(()) - } - - /// Runs the server and the background tasks. - async fn run(self) -> Result<(), anyhow::Error> { - let background_handle = self.run_background_tasks(); - - let background_tasks = async move { - background_handle.await?; - Ok::<_, anyhow::Error>(()) - }; - let server = self.run_server(); - - tokio::try_join!(server, background_tasks)?; - - Ok(()) - } + /// Initializes from environment variables. + async fn try_from_env() -> Result; + + /// Runs the background tasks. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; + + /// Runs the server + async fn run_server(&self) -> Result<(), anyhow::Error> { + let reflection = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(m1_da_light_node_grpc::FILE_DESCRIPTOR_SET) + .build()?; + + let env_addr = + std::env::var("M1_DA_LIGHT_NODE_ADDR").unwrap_or_else(|_| "0.0.0.0:30730".to_string()); + let addr = env_addr.parse()?; + + Server::builder() + .accept_http1(true) + .add_service(LightNodeServiceServer::new(self.clone())) + .add_service(reflection) + .serve(addr) + .await?; + + Ok(()) + } + + /// Runs the server and the background tasks. + async fn run(self) -> Result<(), anyhow::Error> { + let background_handle = self.run_background_tasks(); + + let background_tasks = async move { + background_handle.await?; + Ok::<_, anyhow::Error>(()) + }; + let server = self.run_server(); + + tokio::try_join!(server, background_tasks)?; + + Ok(()) + } } diff --git a/protocol-units/da/m1/light-node/src/v1/passthrough.rs b/protocol-units/da/m1/light-node/src/v1/passthrough.rs index 752206542..4db1efb68 100644 --- a/protocol-units/da/m1/light-node/src/v1/passthrough.rs +++ b/protocol-units/da/m1/light-node/src/v1/passthrough.rs @@ -17,421 +17,420 @@ use crate::v1::LightNodeV1Operations; #[derive(Clone)] pub struct LightNodeV1 { - pub celestia_url: String, - pub celestia_token: String, - pub celestia_namespace: Namespace, - pub default_client: Arc, - pub verification_mode: Arc>, - pub verifier: Arc>, + pub celestia_url: String, + pub celestia_token: String, + pub celestia_namespace: Namespace, + pub default_client: Arc, + pub verification_mode: Arc>, + pub verifier: Arc>, } impl LightNodeV1Operations for LightNodeV1 { - /// Tries to create a new LightNodeV1 instance from the environment variables. - async fn try_from_env() -> Result { - let config = Config::try_from_env()?; - let client = Arc::new(config.connect_celestia().await?); - - Ok(Self { - celestia_url: config.celestia_url, - celestia_token: config.celestia_token, - celestia_namespace: config.celestia_namespace, - default_client: client.clone(), - verification_mode: Arc::new(RwLock::new(config.verification_mode)), - verifier: Arc::new(Box::new(V1Verifier { - client: client, - namespace: config.celestia_namespace.clone(), - })), - }) - } - - /// Runs background tasks for the LightNodeV1 instance. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - Ok(()) - } + /// Tries to create a new LightNodeV1 instance from the environment variables. + async fn try_from_env() -> Result { + let config = Config::try_from_env()?; + let client = Arc::new(config.connect_celestia().await?); + + Ok(Self { + celestia_url: config.celestia_url, + celestia_token: config.celestia_token, + celestia_namespace: config.celestia_namespace, + default_client: client.clone(), + verification_mode: Arc::new(RwLock::new(config.verification_mode)), + verifier: Arc::new(Box::new(V1Verifier { + client: client, + namespace: config.celestia_namespace.clone(), + })), + }) + } + + /// Runs background tasks for the LightNodeV1 instance. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + Ok(()) + } } impl LightNodeV1 { - /// Gets a new Celestia client instance with the matching params. - pub async fn get_new_celestia_client(&self) -> Result { - Client::new(&self.celestia_url, Some(&self.celestia_token)) - .await - .map_err(|e| anyhow::anyhow!("Failed to create Celestia client: {}", e)) - } - - /// Creates a new blob instance with the provided data. - pub fn create_new_celestia_blob(&self, data: Vec) -> Result { - CelestiaBlob::new(self.celestia_namespace, data) - .map_err(|e| anyhow::anyhow!("Failed to create a blob: {}", e)) - } - - /// Submits a CelestiaNlob to the Celestia node. - pub async fn submit_celestia_blob(&self, blob: CelestiaBlob) -> Result { - let height = self - .default_client - .blob_submit(&[blob], GasPrice::default()) - .await - .map_err(|e| anyhow::anyhow!("Failed submitting the blob: {}", e))?; - - Ok(height) - } - - /// Submits a blob to the Celestia node. - pub async fn submit_blob(&self, data: Vec) -> Result { - let celestia_blob = self.create_new_celestia_blob(data)?; - let height = self.submit_celestia_blob(celestia_blob.clone()).await?; - Ok(Self::celestia_blob_to_blob(celestia_blob, height)?) - } - - /// Gets the blobs at a given height. - pub async fn get_celestia_blobs_at_height( - &self, - height: u64, - ) -> Result, anyhow::Error> { - let blobs = self.default_client.blob_get_all(height, &[self.celestia_namespace]).await; - - if let Err(e) = &blobs { - debug!("Error getting blobs: {:?}", e); - } - - let blobs = blobs.unwrap_or_default(); - - let mut verified_blobs = Vec::new(); - for blob in blobs { - debug!("Verifying blob"); - - let blob_data = blob.data.clone(); - - // todo: improve error boundary here to detect crashes - let verified = self - .verifier - .verify(*self.verification_mode.read().await, &blob_data, height) - .await; - - if let Err(e) = &verified { - debug!("Error verifying blob: {:?}", e); - } - - // FIXME: check the implications of treating errors as verification success. - // @l-monninger: under the assumption we are running a light node in the same - // trusted setup and have not experience a highly intrusive(?), the vulnerability here - // is fairly low. The light node should take care of verification on its own. - let verified = verified.unwrap_or(true); - - if verified { - verified_blobs.push(blob); - } - } - - Ok(verified_blobs) - } - - pub async fn get_blobs_at_height(&self, height: u64) -> Result, anyhow::Error> { - let celestia_blobs = self.get_celestia_blobs_at_height(height).await?; - let mut blobs = Vec::new(); - for blob in celestia_blobs { - blobs.push(Self::celestia_blob_to_blob(blob, height)?); - } - Ok(blobs) - } - - /// Streams blobs until it can't get another one in the loop - pub async fn stream_blobs_in_range( - &self, - start_height: u64, - end_height: Option, - ) -> Result< - std::pin::Pin> + Send>>, - anyhow::Error, - > { - let mut height = start_height; - let end_height = end_height.unwrap_or_else(|| u64::MAX); - let me = Arc::new(self.clone()); - - let stream = async_stream::try_stream! { - loop { - if height > end_height { - break; - } - - let blobs = me.get_blobs_at_height(height).await?; - for blob in blobs { - yield blob; - } - height += 1; - } - }; - - Ok(Box::pin(stream) - as std::pin::Pin> + Send>>) - } - - /// Streams the latest blobs that can subscribed to. - async fn stream_blobs_from_height_on( - &self, - start_height: Option, - ) -> Result< - std::pin::Pin> + Send>>, - anyhow::Error, - > { - let start_height = start_height.unwrap_or_else(|| u64::MAX); - let me = Arc::new(self.clone()); - let mut subscription = me.default_client.header_subscribe().await?; - - let stream = async_stream::try_stream! { - let mut first_flag = true; - while let Some(header_res) = subscription.next().await { - - let header = header_res?; - let height = header.height().into(); - - debug!("Stream got header: {:?}", header.height()); - - // back fetch the blobs - if first_flag && (height > start_height) { - - let mut blob_stream = me.stream_blobs_in_range(start_height, Some(height)).await?; - - while let Some(blob) = blob_stream.next().await { - - debug!("Stream got blob: {:?}", blob); - - yield blob?; - } - - } - first_flag = false; - - let blobs = me.get_blobs_at_height(height).await?; - for blob in blobs { - - debug!("Stream got blob: {:?}", blob); - - yield blob; - } - } - }; - - Ok(Box::pin(stream) - as std::pin::Pin> + Send>>) - } - - pub fn celestia_blob_to_blob(blob: CelestiaBlob, height: u64) -> Result { - let timestamp = chrono::Utc::now().timestamp() as u64; - - Ok(Blob { - data: blob.data, - blob_id: serde_json::to_string(&blob.commitment) - .map_err(|e| anyhow::anyhow!("Failed to serialize commitment: {}", e))?, - height, - timestamp, - }) - } - - pub fn blob_to_blob_write_response(blob: Blob) -> Result { - Ok(BlobResponse { blob_type: Some(blob_response::BlobType::PassedThroughBlob(blob)) }) - } - - pub fn blob_to_blob_read_response(blob: Blob) -> Result { - #[cfg(feature = "sequencer")] - { - Ok(BlobResponse { blob_type: Some(blob_response::BlobType::SequencedBlobBlock(blob)) }) - } - - #[cfg(not(feature = "sequencer"))] - { - Ok(BlobResponse { blob_type: Some(blob_response::BlobType::PassedThroughBlob(blob)) }) - } - } + /// Gets a new Celestia client instance with the matching params. + pub async fn get_new_celestia_client(&self) -> Result { + Client::new(&self.celestia_url, Some(&self.celestia_token)) + .await + .map_err(|e| anyhow::anyhow!("Failed to create Celestia client: {}", e)) + } + + /// Creates a new blob instance with the provided data. + pub fn create_new_celestia_blob(&self, data: Vec) -> Result { + CelestiaBlob::new(self.celestia_namespace, data) + .map_err(|e| anyhow::anyhow!("Failed to create a blob: {}", e)) + } + + /// Submits a CelestiaNlob to the Celestia node. + pub async fn submit_celestia_blob(&self, blob: CelestiaBlob) -> Result { + let height = self + .default_client + .blob_submit(&[blob], GasPrice::default()) + .await + .map_err(|e| anyhow::anyhow!("Failed submitting the blob: {}", e))?; + + Ok(height) + } + + /// Submits a blob to the Celestia node. + pub async fn submit_blob(&self, data: Vec) -> Result { + let celestia_blob = self.create_new_celestia_blob(data)?; + let height = self.submit_celestia_blob(celestia_blob.clone()).await?; + Ok(Self::celestia_blob_to_blob(celestia_blob, height)?) + } + + /// Gets the blobs at a given height. + pub async fn get_celestia_blobs_at_height( + &self, + height: u64, + ) -> Result, anyhow::Error> { + let blobs = self.default_client.blob_get_all(height, &[self.celestia_namespace]).await; + + if let Err(e) = &blobs { + debug!("Error getting blobs: {:?}", e); + } + + let blobs = blobs.unwrap_or_default(); + + let mut verified_blobs = Vec::new(); + for blob in blobs { + debug!("Verifying blob"); + + let blob_data = blob.data.clone(); + + // todo: improve error boundary here to detect crashes + let verified = self + .verifier + .verify(*self.verification_mode.read().await, &blob_data, height) + .await; + + if let Err(e) = &verified { + debug!("Error verifying blob: {:?}", e); + } + + // FIXME: check the implications of treating errors as verification success. + // @l-monninger: under the assumption we are running a light node in the same + // trusted setup and have not experience a highly intrusive(?), the vulnerability here + // is fairly low. The light node should take care of verification on its own. + let verified = verified.unwrap_or(true); + + if verified { + verified_blobs.push(blob); + } + } + + Ok(verified_blobs) + } + + pub async fn get_blobs_at_height(&self, height: u64) -> Result, anyhow::Error> { + let celestia_blobs = self.get_celestia_blobs_at_height(height).await?; + let mut blobs = Vec::new(); + for blob in celestia_blobs { + blobs.push(Self::celestia_blob_to_blob(blob, height)?); + } + Ok(blobs) + } + + /// Streams blobs until it can't get another one in the loop + pub async fn stream_blobs_in_range( + &self, + start_height: u64, + end_height: Option, + ) -> Result< + std::pin::Pin> + Send>>, + anyhow::Error, + > { + let mut height = start_height; + let end_height = end_height.unwrap_or_else(|| u64::MAX); + let me = Arc::new(self.clone()); + + let stream = async_stream::try_stream! { + loop { + if height > end_height { + break; + } + + let blobs = me.get_blobs_at_height(height).await?; + for blob in blobs { + yield blob; + } + height += 1; + } + }; + + Ok(Box::pin(stream) + as std::pin::Pin> + Send>>) + } + + /// Streams the latest blobs that can subscribed to. + async fn stream_blobs_from_height_on( + &self, + start_height: Option, + ) -> Result< + std::pin::Pin> + Send>>, + anyhow::Error, + > { + let start_height = start_height.unwrap_or_else(|| u64::MAX); + let me = Arc::new(self.clone()); + let mut subscription = me.default_client.header_subscribe().await?; + + let stream = async_stream::try_stream! { + let mut first_flag = true; + while let Some(header_res) = subscription.next().await { + + let header = header_res?; + let height = header.height().into(); + + debug!("Stream got header: {:?}", header.height()); + + // back fetch the blobs + if first_flag && (height > start_height) { + + let mut blob_stream = me.stream_blobs_in_range(start_height, Some(height)).await?; + + while let Some(blob) = blob_stream.next().await { + + debug!("Stream got blob: {:?}", blob); + + yield blob?; + } + + } + first_flag = false; + + let blobs = me.get_blobs_at_height(height).await?; + for blob in blobs { + + debug!("Stream got blob: {:?}", blob); + + yield blob; + } + } + }; + + Ok(Box::pin(stream) + as std::pin::Pin> + Send>>) + } + + pub fn celestia_blob_to_blob(blob: CelestiaBlob, height: u64) -> Result { + let timestamp = chrono::Utc::now().timestamp() as u64; + + Ok(Blob { + data: blob.data, + blob_id: serde_json::to_string(&blob.commitment) + .map_err(|e| anyhow::anyhow!("Failed to serialize commitment: {}", e))?, + height, + timestamp, + }) + } + + pub fn blob_to_blob_write_response(blob: Blob) -> Result { + Ok(BlobResponse { blob_type: Some(blob_response::BlobType::PassedThroughBlob(blob)) }) + } + + pub fn blob_to_blob_read_response(blob: Blob) -> Result { + #[cfg(feature = "sequencer")] + { + Ok(BlobResponse { blob_type: Some(blob_response::BlobType::SequencedBlobBlock(blob)) }) + } + + #[cfg(not(feature = "sequencer"))] + { + Ok(BlobResponse { blob_type: Some(blob_response::BlobType::PassedThroughBlob(blob)) }) + } + } } #[tonic::async_trait] impl LightNodeService for LightNodeV1 { - /// Server streaming response type for the StreamReadFromHeight method. - type StreamReadFromHeightStream = std::pin::Pin< - Box< - dyn Stream> + Send + 'static, - >, - >; - - /// Stream blobs from a specified height or from the latest height. - async fn stream_read_from_height( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - let me = Arc::new(self.clone()); - let height = request.into_inner().height; - - let output = async_stream::try_stream! { - - let mut blob_stream = me.stream_blobs_from_height_on(Some(height)).await.map_err(|e| tonic::Status::internal(e.to_string()))?; - - while let Some(blob) = blob_stream.next().await { - let blob = blob.map_err(|e| tonic::Status::internal(e.to_string()))?; - let response = StreamReadFromHeightResponse { - blob : Some(Self::blob_to_blob_read_response(blob).map_err(|e| tonic::Status::internal(e.to_string()))?) - }; - yield response; - } - - }; - - Ok(tonic::Response::new(Box::pin(output) as Self::StreamReadFromHeightStream)) - } - - /// Server streaming response type for the StreamReadLatest method. - type StreamReadLatestStream = std::pin::Pin< - Box> + Send + 'static>, - >; - - /// Stream the latest blobs. - async fn stream_read_latest( - &self, - _request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - let me = Arc::new(self.clone()); - - let output = async_stream::try_stream! { - - let mut blob_stream = me.stream_blobs_from_height_on(None).await.map_err(|e| tonic::Status::internal(e.to_string()))?; - while let Some(blob) = blob_stream.next().await { - let blob = blob.map_err(|e| tonic::Status::internal(e.to_string()))?; - let response = StreamReadLatestResponse { - blob : Some(Self::blob_to_blob_read_response(blob).map_err(|e| tonic::Status::internal(e.to_string()))?) - }; - yield response; - } - - }; - - Ok(tonic::Response::new(Box::pin(output) as Self::StreamReadLatestStream)) - } - /// Server streaming response type for the StreamWriteCelestiaBlob method. - type StreamWriteBlobStream = std::pin::Pin< - Box> + Send + 'static>, - >; - /// Stream blobs out, either individually or in batches. - async fn stream_write_blob( - &self, - request: tonic::Request>, - ) -> std::result::Result, tonic::Status> { - let mut stream = request.into_inner(); - let me = Arc::new(self.clone()); - - let output = async_stream::try_stream! { - - while let Some(request) = stream.next().await { - let request = request?; - let blob_data = request.blob.ok_or(tonic::Status::invalid_argument("No blob in request"))?.data; - - let blob = me.submit_blob(blob_data).await.map_err(|e| tonic::Status::internal(e.to_string()))?; - - let write_response = StreamWriteBlobResponse { - blob : Some(Self::blob_to_blob_read_response(blob).map_err(|e| tonic::Status::internal(e.to_string()))?) - }; - - yield write_response; - - } - }; - - Ok(tonic::Response::new(Box::pin(output) as Self::StreamWriteBlobStream)) - } - /// Read blobs at a specified height. - async fn read_at_height( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - let height = request.into_inner().height; - let blobs = self - .get_blobs_at_height(height) - .await - .map_err(|e| tonic::Status::internal(e.to_string()))?; - - if blobs.is_empty() { - return Err(tonic::Status::not_found("No blobs found at the specified height")); - } - - let mut blob_responses = Vec::new(); - for blob in blobs { - blob_responses.push( - Self::blob_to_blob_read_response(blob) - .map_err(|e| tonic::Status::internal(e.to_string()))?, - ); - } - - Ok(tonic::Response::new(ReadAtHeightResponse { - // map blobs to the response type - blobs: blob_responses, - })) - } - /// Batch read and write operations for efficiency. - async fn batch_read( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - let heights = request.into_inner().heights; - let mut responses = Vec::with_capacity(heights.len()); - for height in heights { - let blobs = self - .get_blobs_at_height(height) - .await - .map_err(|e| tonic::Status::internal(e.to_string()))?; - - if blobs.is_empty() { - return Err(tonic::Status::not_found("No blobs found at the specified height")); - } - - let mut blob_responses = Vec::new(); - for blob in blobs { - blob_responses.push( - Self::blob_to_blob_read_response(blob) - .map_err(|e| tonic::Status::internal(e.to_string()))?, - ); - } - - responses.push(ReadAtHeightResponse { blobs: blob_responses }) - } - - Ok(tonic::Response::new(BatchReadResponse { responses })) - } - - /// Batch write blobs. - async fn batch_write( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - let blobs = request.into_inner().blobs; - let mut responses = Vec::with_capacity(blobs.len()); - for data in blobs { - let blob = self - .submit_blob(data.data) - .await - .map_err(|e| tonic::Status::internal(e.to_string()))?; - responses.push(blob); - } - - let mut blob_responses = Vec::new(); - for blob in responses { - blob_responses.push( - Self::blob_to_blob_write_response(blob) - .map_err(|e| tonic::Status::internal(e.to_string()))?, - ); - } - - Ok(tonic::Response::new(BatchWriteResponse { blobs: blob_responses })) - } - /// Update and manage verification parameters. - async fn update_verification_parameters( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> - { - let verification_mode = request.into_inner().mode(); - let mut mode = self.verification_mode.write().await; - *mode = verification_mode; - - Ok(tonic::Response::new(UpdateVerificationParametersResponse { - mode: verification_mode.into(), - })) - } + /// Server streaming response type for the StreamReadFromHeight method. + type StreamReadFromHeightStream = std::pin::Pin< + Box< + dyn Stream> + Send + 'static, + >, + >; + + /// Stream blobs from a specified height or from the latest height. + async fn stream_read_from_height( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let me = Arc::new(self.clone()); + let height = request.into_inner().height; + + let output = async_stream::try_stream! { + + let mut blob_stream = me.stream_blobs_from_height_on(Some(height)).await.map_err(|e| tonic::Status::internal(e.to_string()))?; + + while let Some(blob) = blob_stream.next().await { + let blob = blob.map_err(|e| tonic::Status::internal(e.to_string()))?; + let response = StreamReadFromHeightResponse { + blob : Some(Self::blob_to_blob_read_response(blob).map_err(|e| tonic::Status::internal(e.to_string()))?) + }; + yield response; + } + + }; + + Ok(tonic::Response::new(Box::pin(output) as Self::StreamReadFromHeightStream)) + } + + /// Server streaming response type for the StreamReadLatest method. + type StreamReadLatestStream = std::pin::Pin< + Box> + Send + 'static>, + >; + + /// Stream the latest blobs. + async fn stream_read_latest( + &self, + _request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let me = Arc::new(self.clone()); + + let output = async_stream::try_stream! { + + let mut blob_stream = me.stream_blobs_from_height_on(None).await.map_err(|e| tonic::Status::internal(e.to_string()))?; + while let Some(blob) = blob_stream.next().await { + let blob = blob.map_err(|e| tonic::Status::internal(e.to_string()))?; + let response = StreamReadLatestResponse { + blob : Some(Self::blob_to_blob_read_response(blob).map_err(|e| tonic::Status::internal(e.to_string()))?) + }; + yield response; + } + + }; + + Ok(tonic::Response::new(Box::pin(output) as Self::StreamReadLatestStream)) + } + /// Server streaming response type for the StreamWriteCelestiaBlob method. + type StreamWriteBlobStream = std::pin::Pin< + Box> + Send + 'static>, + >; + /// Stream blobs out, either individually or in batches. + async fn stream_write_blob( + &self, + request: tonic::Request>, + ) -> std::result::Result, tonic::Status> { + let mut stream = request.into_inner(); + let me = Arc::new(self.clone()); + + let output = async_stream::try_stream! { + + while let Some(request) = stream.next().await { + let request = request?; + let blob_data = request.blob.ok_or(tonic::Status::invalid_argument("No blob in request"))?.data; + + let blob = me.submit_blob(blob_data).await.map_err(|e| tonic::Status::internal(e.to_string()))?; + + let write_response = StreamWriteBlobResponse { + blob : Some(Self::blob_to_blob_read_response(blob).map_err(|e| tonic::Status::internal(e.to_string()))?) + }; + + yield write_response; + + } + }; + + Ok(tonic::Response::new(Box::pin(output) as Self::StreamWriteBlobStream)) + } + /// Read blobs at a specified height. + async fn read_at_height( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let height = request.into_inner().height; + let blobs = self + .get_blobs_at_height(height) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + + if blobs.is_empty() { + return Err(tonic::Status::not_found("No blobs found at the specified height")); + } + + let mut blob_responses = Vec::new(); + for blob in blobs { + blob_responses.push( + Self::blob_to_blob_read_response(blob) + .map_err(|e| tonic::Status::internal(e.to_string()))?, + ); + } + + Ok(tonic::Response::new(ReadAtHeightResponse { + // map blobs to the response type + blobs: blob_responses, + })) + } + /// Batch read and write operations for efficiency. + async fn batch_read( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let heights = request.into_inner().heights; + let mut responses = Vec::with_capacity(heights.len()); + for height in heights { + let blobs = self + .get_blobs_at_height(height) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + + if blobs.is_empty() { + return Err(tonic::Status::not_found("No blobs found at the specified height")); + } + + let mut blob_responses = Vec::new(); + for blob in blobs { + blob_responses.push( + Self::blob_to_blob_read_response(blob) + .map_err(|e| tonic::Status::internal(e.to_string()))?, + ); + } + + responses.push(ReadAtHeightResponse { blobs: blob_responses }) + } + + Ok(tonic::Response::new(BatchReadResponse { responses })) + } + + /// Batch write blobs. + async fn batch_write( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let blobs = request.into_inner().blobs; + let mut responses = Vec::with_capacity(blobs.len()); + for data in blobs { + let blob = self + .submit_blob(data.data) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + responses.push(blob); + } + + let mut blob_responses = Vec::new(); + for blob in responses { + blob_responses.push( + Self::blob_to_blob_write_response(blob) + .map_err(|e| tonic::Status::internal(e.to_string()))?, + ); + } + + Ok(tonic::Response::new(BatchWriteResponse { blobs: blob_responses })) + } + /// Update and manage verification parameters. + async fn update_verification_parameters( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let verification_mode = request.into_inner().mode(); + let mut mode = self.verification_mode.write().await; + *mode = verification_mode; + + Ok(tonic::Response::new(UpdateVerificationParametersResponse { + mode: verification_mode.into(), + })) + } } diff --git a/protocol-units/da/m1/light-node/src/v1/sequencer.rs b/protocol-units/da/m1/light-node/src/v1/sequencer.rs index d1bbdfb8d..67036ab8a 100644 --- a/protocol-units/da/m1/light-node/src/v1/sequencer.rs +++ b/protocol-units/da/m1/light-node/src/v1/sequencer.rs @@ -12,204 +12,203 @@ use crate::v1::{passthrough::LightNodeV1 as LightNodeV1PassThrough, LightNodeV1O #[derive(Clone)] pub struct LightNodeV1 { - pub pass_through: LightNodeV1PassThrough, - pub memseq: memseq::Memseq, + pub pass_through: LightNodeV1PassThrough, + pub memseq: memseq::Memseq, } impl LightNodeV1Operations for LightNodeV1 { - async fn try_from_env() -> Result { - info!("Initializing LightNodeV1 in sequencer mode from environment."); + async fn try_from_env() -> Result { + info!("Initializing LightNodeV1 in sequencer mode from environment."); - let pass_through = LightNodeV1PassThrough::try_from_env().await?; - info!("Initialized pass through for LightNodeV1 in sequencer mode."); + let pass_through = LightNodeV1PassThrough::try_from_env().await?; + info!("Initialized pass through for LightNodeV1 in sequencer mode."); - let memseq = memseq::Memseq::try_move_rocks_from_env()?; - info!("Initialized Memseq with Move Rocks for LightNodeV1 in sequencer mode."); + let memseq = memseq::Memseq::try_move_rocks_from_env()?; + info!("Initialized Memseq with Move Rocks for LightNodeV1 in sequencer mode."); - Ok(Self { pass_through, memseq }) - } + Ok(Self { pass_through, memseq }) + } - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - self.run_block_proposer().await?; + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + self.run_block_proposer().await?; - Ok(()) - } + Ok(()) + } } impl LightNodeV1 { - pub async fn tick_block_proposer(&self) -> Result<(), anyhow::Error> { - let block = self.memseq.wait_for_next_block().await?; - match block { - Some(block) => { - let block_blob = self.pass_through.create_new_celestia_blob( - serde_json::to_vec(&block) - .map_err(|e| anyhow::anyhow!("Failed to serialize block: {}", e))?, - )?; - - let height = self.pass_through.submit_celestia_blob(block_blob).await?; - - debug!("Submitted block: {:?} {:?}", block.id(), height); - } - None => { - // no transactions to include - } - } - Ok(()) - } - - pub async fn run_block_proposer(&self) -> Result<(), anyhow::Error> { - loop { - // build the next block from the blobs - self.tick_block_proposer().await?; - - // sleep for a while - tokio::time::sleep(std::time::Duration::from_millis(300)).await; - } - - Ok(()) - } - - pub fn to_sequenced_blob_block( - blob_response: BlobResponse, - ) -> Result { - let blob_type = blob_response.blob_type.ok_or(anyhow::anyhow!("No blob type"))?; - - let sequenced_block = match blob_type { - blob_response::BlobType::PassedThroughBlob(blob) => { - blob_response::BlobType::SequencedBlobBlock(blob) - } - blob_response::BlobType::SequencedBlobBlock(blob) => { - blob_response::BlobType::SequencedBlobBlock(blob) - } - _ => { - anyhow::bail!("Invalid blob type") - } - }; - - Ok(BlobResponse { blob_type: Some(sequenced_block) }) - } - - pub fn make_sequenced_blob_intent( - data: Vec, - height: u64, - ) -> Result { - Ok(BlobResponse { - blob_type: Some(blob_response::BlobType::SequencedBlobIntent(Blob { - data, - blob_id: "".to_string(), - height, - timestamp: 0, - })), - }) - } + pub async fn tick_block_proposer(&self) -> Result<(), anyhow::Error> { + let block = self.memseq.wait_for_next_block().await?; + match block { + Some(block) => { + let block_blob = self.pass_through.create_new_celestia_blob( + serde_json::to_vec(&block) + .map_err(|e| anyhow::anyhow!("Failed to serialize block: {}", e))?, + )?; + + let height = self.pass_through.submit_celestia_blob(block_blob).await?; + + debug!("Submitted block: {:?} {:?}", block.id(), height); + } + None => { + // no transactions to include + } + } + Ok(()) + } + + pub async fn run_block_proposer(&self) -> Result<(), anyhow::Error> { + loop { + // build the next block from the blobs + self.tick_block_proposer().await?; + + // sleep for a while + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + } + + Ok(()) + } + + pub fn to_sequenced_blob_block( + blob_response: BlobResponse, + ) -> Result { + let blob_type = blob_response.blob_type.ok_or(anyhow::anyhow!("No blob type"))?; + + let sequenced_block = match blob_type { + blob_response::BlobType::PassedThroughBlob(blob) => { + blob_response::BlobType::SequencedBlobBlock(blob) + } + blob_response::BlobType::SequencedBlobBlock(blob) => { + blob_response::BlobType::SequencedBlobBlock(blob) + } + _ => { + anyhow::bail!("Invalid blob type") + } + }; + + Ok(BlobResponse { blob_type: Some(sequenced_block) }) + } + + pub fn make_sequenced_blob_intent( + data: Vec, + height: u64, + ) -> Result { + Ok(BlobResponse { + blob_type: Some(blob_response::BlobType::SequencedBlobIntent(Blob { + data, + blob_id: "".to_string(), + height, + timestamp: 0, + })), + }) + } } #[tonic::async_trait] impl LightNodeService for LightNodeV1 { - /// Server streaming response type for the StreamReadFromHeight method. - type StreamReadFromHeightStream = std::pin::Pin< - Box< - dyn Stream> + Send + 'static, - >, - >; - - /// Stream blobs from a specified height or from the latest height. - async fn stream_read_from_height( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - self.pass_through.stream_read_from_height(request).await - } - - /// Server streaming response type for the StreamReadLatest method. - type StreamReadLatestStream = std::pin::Pin< - Box> + Send + 'static>, - >; - - /// Stream the latest blobs. - async fn stream_read_latest( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - self.pass_through.stream_read_latest(request).await - } - /// Server streaming response type for the StreamWriteCelestiaBlob method. - type StreamWriteBlobStream = std::pin::Pin< - Box> + Send + 'static>, - >; - /// Stream blobs out, either individually or in batches. - async fn stream_write_blob( - &self, - request: tonic::Request>, - ) -> std::result::Result, tonic::Status> { - unimplemented!("stream_write_blob") - } - /// Read blobs at a specified height. - async fn read_at_height( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - self.pass_through.read_at_height(request).await - } - /// Batch read and write operations for efficiency. - async fn batch_read( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - self.pass_through.batch_read(request).await - } - - /// Batch write blobs. - async fn batch_write( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> { - let blobs_for_intent = request.into_inner().blobs; - let blobs_for_submission = blobs_for_intent.clone(); - let height: u64 = self - .pass_through - .default_client - .header_network_head() - .await - .map_err(|e| tonic::Status::internal(e.to_string()))? - .height() - .into(); - - let intents: Vec = blobs_for_intent - .into_iter() - .map(|blob| { - Self::make_sequenced_blob_intent(blob.data, height) - .map_err(|e| tonic::Status::internal(e.to_string())) - }) - .collect::, tonic::Status>>()?; - - // make transactions from the blobs - let transactions: Vec = blobs_for_submission - .into_iter() - .map(|blob| { - let transaction = Transaction::from(blob.data); - transaction - }) - .collect(); - - // publish the transactions - for transaction in transactions { - debug!("Publishing transaction: {:?}", transaction.id()); - - self.memseq - .publish(transaction) - .await - .map_err(|e| tonic::Status::internal(e.to_string()))?; - } - - Ok(tonic::Response::new(BatchWriteResponse { blobs: intents })) - } - /// Update and manage verification parameters. - async fn update_verification_parameters( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status> - { - self.pass_through.update_verification_parameters(request).await - } + /// Server streaming response type for the StreamReadFromHeight method. + type StreamReadFromHeightStream = std::pin::Pin< + Box< + dyn Stream> + Send + 'static, + >, + >; + + /// Stream blobs from a specified height or from the latest height. + async fn stream_read_from_height( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + self.pass_through.stream_read_from_height(request).await + } + + /// Server streaming response type for the StreamReadLatest method. + type StreamReadLatestStream = std::pin::Pin< + Box> + Send + 'static>, + >; + + /// Stream the latest blobs. + async fn stream_read_latest( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + self.pass_through.stream_read_latest(request).await + } + /// Server streaming response type for the StreamWriteCelestiaBlob method. + type StreamWriteBlobStream = std::pin::Pin< + Box> + Send + 'static>, + >; + /// Stream blobs out, either individually or in batches. + async fn stream_write_blob( + &self, + request: tonic::Request>, + ) -> std::result::Result, tonic::Status> { + unimplemented!("stream_write_blob") + } + /// Read blobs at a specified height. + async fn read_at_height( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + self.pass_through.read_at_height(request).await + } + /// Batch read and write operations for efficiency. + async fn batch_read( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + self.pass_through.batch_read(request).await + } + + /// Batch write blobs. + async fn batch_write( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + let blobs_for_intent = request.into_inner().blobs; + let blobs_for_submission = blobs_for_intent.clone(); + let height: u64 = self + .pass_through + .default_client + .header_network_head() + .await + .map_err(|e| tonic::Status::internal(e.to_string()))? + .height() + .into(); + + let intents: Vec = blobs_for_intent + .into_iter() + .map(|blob| { + Self::make_sequenced_blob_intent(blob.data, height) + .map_err(|e| tonic::Status::internal(e.to_string())) + }) + .collect::, tonic::Status>>()?; + + // make transactions from the blobs + let transactions: Vec = blobs_for_submission + .into_iter() + .map(|blob| { + let transaction = Transaction::from(blob.data); + transaction + }) + .collect(); + + // publish the transactions + for transaction in transactions { + debug!("Publishing transaction: {:?}", transaction.id()); + + self.memseq + .publish(transaction) + .await + .map_err(|e| tonic::Status::internal(e.to_string()))?; + } + + Ok(tonic::Response::new(BatchWriteResponse { blobs: intents })) + } + /// Update and manage verification parameters. + async fn update_verification_parameters( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + self.pass_through.update_verification_parameters(request).await + } } diff --git a/protocol-units/da/m1/util/src/bin/wait_for_light_node.rs b/protocol-units/da/m1/util/src/bin/wait_for_light_node.rs index 17f1e25a6..97a457192 100644 --- a/protocol-units/da/m1/util/src/bin/wait_for_light_node.rs +++ b/protocol-units/da/m1/util/src/bin/wait_for_light_node.rs @@ -3,40 +3,40 @@ use m1_da_light_node_util::Config; #[tokio::main] async fn main() -> Result<(), anyhow::Error> { - let config = Config::try_from_env()?; - let client = config.connect_celestia().await?; + let config = Config::try_from_env()?; + let client = config.connect_celestia().await?; - /* ! header sync wait deserialization is broken - loop { - match client.header_sync_wait().await { - Ok(_) => break, - Err(e) => { - match e { - jsonrpsee::core::Error::RequestTimeout => { - println!("Request timeout"); - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - }, - jsonrpsee::core::Error::RestartNeeded(e) => { - println!("Restarting: {:?}", e); - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - }, - _ => return Err(anyhow::anyhow!("Error: {:?}", e)) - } + /* ! header sync wait deserialization is broken + loop { + match client.header_sync_wait().await { + Ok(_) => break, + Err(e) => { + match e { + jsonrpsee::core::Error::RequestTimeout => { + println!("Request timeout"); + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + }, + jsonrpsee::core::Error::RestartNeeded(e) => { + println!("Restarting: {:?}", e); + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + }, + _ => return Err(anyhow::anyhow!("Error: {:?}", e)) + } - } - } - }*/ + } + } + }*/ - loop { - let head = client.header_network_head().await?; - let height: u64 = head.height().into(); - let sync_state = client.header_sync_state().await?; - println!("Current height: {}, Synced height: {}", height, sync_state.height); - if height <= sync_state.height { - break; - } - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } + loop { + let head = client.header_network_head().await?; + let height: u64 = head.height().into(); + let sync_state = client.header_sync_state().await?; + println!("Current height: {}, Synced height: {}", height, sync_state.height); + if height <= sync_state.height { + break; + } + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } - Ok(()) + Ok(()) } diff --git a/protocol-units/da/m1/util/src/lib.rs b/protocol-units/da/m1/util/src/lib.rs index 7b22832b5..79f9676ba 100644 --- a/protocol-units/da/m1/util/src/lib.rs +++ b/protocol-units/da/m1/util/src/lib.rs @@ -5,85 +5,85 @@ use m1_da_light_node_grpc::*; #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { - pub celestia_url: String, - pub celestia_token: String, - pub celestia_namespace: Namespace, - pub verification_mode: VerificationMode, + pub celestia_url: String, + pub celestia_token: String, + pub celestia_namespace: Namespace, + pub verification_mode: VerificationMode, } impl Config { - const DEFAULT_CELESTIA_NODE_URL: &'static str = "ws://localhost:26658"; - const DEFAULT_NAMESPACE_BYTES: &'static str = "a673006fb64aa2e5360d"; + const DEFAULT_CELESTIA_NODE_URL: &'static str = "ws://localhost:26658"; + const DEFAULT_NAMESPACE_BYTES: &'static str = "a673006fb64aa2e5360d"; - pub fn try_from_env() -> Result { - let token = std::env::var("CELESTIA_NODE_AUTH_TOKEN") - .map_err(|_| anyhow::anyhow!("Token not provided"))?; // expect("Token not provided" - let url = std::env::var("CELESTIA_NODE_URL") - .unwrap_or_else(|_| Self::DEFAULT_CELESTIA_NODE_URL.to_string()); + pub fn try_from_env() -> Result { + let token = std::env::var("CELESTIA_NODE_AUTH_TOKEN") + .map_err(|_| anyhow::anyhow!("Token not provided"))?; // expect("Token not provided" + let url = std::env::var("CELESTIA_NODE_URL") + .unwrap_or_else(|_| Self::DEFAULT_CELESTIA_NODE_URL.to_string()); - let namespace_hex = std::env::var("CELESTIA_NAMESPACE_BYTES") - .unwrap_or_else(|_| Self::DEFAULT_NAMESPACE_BYTES.to_string()); + let namespace_hex = std::env::var("CELESTIA_NAMESPACE_BYTES") + .unwrap_or_else(|_| Self::DEFAULT_NAMESPACE_BYTES.to_string()); - // Decode the hex string to bytes - let namespace_bytes = hex::decode(namespace_hex) - .map_err(|e| anyhow::anyhow!("Failed to decode namespace bytes: {}", e))?; + // Decode the hex string to bytes + let namespace_bytes = hex::decode(namespace_hex) + .map_err(|e| anyhow::anyhow!("Failed to decode namespace bytes: {}", e))?; - // Create a namespace from the bytes - let namespace = - Namespace::new_v0(&namespace_bytes).context("Failed to create namespace from bytes")?; + // Create a namespace from the bytes + let namespace = + Namespace::new_v0(&namespace_bytes).context("Failed to create namespace from bytes")?; - // try to read the verification mode from the environment - let verification_mode = match std::env::var("VERIFICATION_MODE") { - Ok(mode) => VerificationMode::from_str_name(mode.as_str()) - .ok_or(anyhow::anyhow!("Invalid verification mode"))?, - Err(_) => VerificationMode::MOfN, - }; + // try to read the verification mode from the environment + let verification_mode = match std::env::var("VERIFICATION_MODE") { + Ok(mode) => VerificationMode::from_str_name(mode.as_str()) + .ok_or(anyhow::anyhow!("Invalid verification mode"))?, + Err(_) => VerificationMode::MOfN, + }; - Ok(Self { - celestia_url: url, - celestia_token: token, - celestia_namespace: namespace, - verification_mode, - }) - } + Ok(Self { + celestia_url: url, + celestia_token: token, + celestia_namespace: namespace, + verification_mode, + }) + } - fn last_ten_bytes_str(namespace: &Namespace) -> String { - let bytes = namespace.as_bytes(); - let len = bytes.len(); - let start = if len > 10 { len - 10 } else { 0 }; - hex::encode(&bytes[start..]) - } + fn last_ten_bytes_str(namespace: &Namespace) -> String { + let bytes = namespace.as_bytes(); + let len = bytes.len(); + let start = if len > 10 { len - 10 } else { 0 }; + hex::encode(&bytes[start..]) + } - pub fn write_to_env(&self) -> Result<(), anyhow::Error> { - std::env::set_var("CELESTIA_NODE_URL", self.celestia_url.clone()); - std::env::set_var("CELESTIA_NODE_AUTH_TOKEN", self.celestia_token.clone()); - std::env::set_var( - "CELESTIA_NAMESPACE_BYTES", - Self::last_ten_bytes_str(&self.celestia_namespace), - ); - std::env::set_var("VERIFICATION_MODE", self.verification_mode.as_str_name()); - Ok(()) - } + pub fn write_to_env(&self) -> Result<(), anyhow::Error> { + std::env::set_var("CELESTIA_NODE_URL", self.celestia_url.clone()); + std::env::set_var("CELESTIA_NODE_AUTH_TOKEN", self.celestia_token.clone()); + std::env::set_var( + "CELESTIA_NAMESPACE_BYTES", + Self::last_ten_bytes_str(&self.celestia_namespace), + ); + std::env::set_var("VERIFICATION_MODE", self.verification_mode.as_str_name()); + Ok(()) + } - pub fn write_bash_export_string(&self) -> Result { - Ok(format!( + pub fn write_bash_export_string(&self) -> Result { + Ok(format!( "export CELESTIA_NODE_URL={}\nexport CELESTIA_NODE_AUTH_TOKEN={}\nexport CELESTIA_NAMESPACE_BYTES={}\nexport VERIFICATION_MODE={}", self.celestia_url, self.celestia_token, Self::last_ten_bytes_str(&self.celestia_namespace), self.verification_mode.as_str_name() )) - } + } - pub async fn connect_celestia(&self) -> Result { - let client = - Client::new(&self.celestia_url, Some(&self.celestia_token)).await.map_err(|e| { - anyhow::anyhow!( - "Failed to connect to Celestia client at {}: {}", - self.celestia_url, - e - ) - })?; - Ok(client) - } + pub async fn connect_celestia(&self) -> Result { + let client = + Client::new(&self.celestia_url, Some(&self.celestia_token)).await.map_err(|e| { + anyhow::anyhow!( + "Failed to connect to Celestia client at {}: {}", + self.celestia_url, + e + ) + })?; + Ok(client) + } } diff --git a/protocol-units/execution/maptos/opt-executor/src/executor.rs b/protocol-units/execution/maptos/opt-executor/src/executor.rs index ae8d6c86b..3e498bdd1 100644 --- a/protocol-units/execution/maptos/opt-executor/src/executor.rs +++ b/protocol-units/execution/maptos/opt-executor/src/executor.rs @@ -1,43 +1,43 @@ use aptos_api::{ - get_api_service, - runtime::{get_apis, Apis}, - Context, + get_api_service, + runtime::{get_apis, Apis}, + Context, }; use aptos_config::config::NodeConfig; use aptos_crypto::{ed25519::Ed25519PublicKey, HashValue}; use aptos_db::AptosDB; use aptos_executor::{ - block_executor::BlockExecutor, - db_bootstrapper::{generate_waypoint, maybe_bootstrap}, + block_executor::BlockExecutor, + db_bootstrapper::{generate_waypoint, maybe_bootstrap}, }; use aptos_executor_types::BlockExecutorTrait; use aptos_mempool::SubmissionStatus; use aptos_mempool::{ - core_mempool::{CoreMempool, TimelineState}, - MempoolClientRequest, MempoolClientSender, + core_mempool::{CoreMempool, TimelineState}, + MempoolClientRequest, MempoolClientSender, }; use aptos_sdk::types::mempool_status::{MempoolStatus, MempoolStatusCode}; use aptos_sdk::types::on_chain_config::{OnChainConsensusConfig, OnChainExecutionConfig}; use aptos_storage_interface::DbReaderWriter; use aptos_types::{ - aggregate_signature::AggregateSignature, - block_executor::partitioner::ExecutableTransactions, - block_info::BlockInfo, - ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, - transaction::Version, + aggregate_signature::AggregateSignature, + block_executor::partitioner::ExecutableTransactions, + block_info::BlockInfo, + ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + transaction::Version, }; use aptos_types::{ - block_executor::{config::BlockExecutorConfigFromOnchain, partitioner::ExecutableBlock}, - chain_id::ChainId, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, ChangeSet, SignedTransaction, - Transaction, WriteSetPayload, - }, - validator_signer::ValidatorSigner, + block_executor::{config::BlockExecutorConfigFromOnchain, partitioner::ExecutableBlock}, + chain_id::ChainId, + transaction::{ + signature_verified_transaction::SignatureVerifiedTransaction, ChangeSet, SignedTransaction, + Transaction, WriteSetPayload, + }, + validator_signer::ValidatorSigner, }; use aptos_vm::AptosVM; use aptos_vm_genesis::{ - default_gas_schedule, encode_genesis_change_set, GenesisConfiguration, TestValidator, Validator, + default_gas_schedule, encode_genesis_change_set, GenesisConfiguration, TestValidator, Validator, }; use movement_types::{BlockCommitment, Commitment, Id}; @@ -54,808 +54,809 @@ use std::{path::PathBuf, sync::Arc}; /// against the `AptosVM`. #[derive(Clone)] pub struct Executor { - /// The executing type. - pub block_executor: Arc>>, - /// The access to db. - pub db: DbReaderWriter, - /// The signer of the executor's transactions. - pub signer: ValidatorSigner, - /// The core mempool (used for the api to query the mempool). - pub core_mempool: Arc>, - /// The sender for the mempool client. - pub mempool_client_sender: MempoolClientSender, - /// The receiver for the mempool client. - pub mempool_client_receiver: Arc>>, - /// The configuration of the node. - pub node_config: NodeConfig, - /// Context - pub context: Arc, - /// The Aptos VM configuration. - pub aptos_config: maptos_execution_util::config::just_aptos::Config, + /// The executing type. + pub block_executor: Arc>>, + /// The access to db. + pub db: DbReaderWriter, + /// The signer of the executor's transactions. + pub signer: ValidatorSigner, + /// The core mempool (used for the api to query the mempool). + pub core_mempool: Arc>, + /// The sender for the mempool client. + pub mempool_client_sender: MempoolClientSender, + /// The receiver for the mempool client. + pub mempool_client_receiver: Arc>>, + /// The configuration of the node. + pub node_config: NodeConfig, + /// Context + pub context: Arc, + /// The Aptos VM configuration. + pub aptos_config: maptos_execution_util::config::just_aptos::Config, } impl Executor { - /// Create a new `Executor` instance. - pub fn new( - block_executor: BlockExecutor, - signer: ValidatorSigner, - mempool_client_sender: MempoolClientSender, - mempool_client_receiver: futures_mpsc::Receiver, - node_config: NodeConfig, - aptos_config: maptos_execution_util::config::just_aptos::Config, - ) -> Self { - let (_aptos_db, reader_writer) = - DbReaderWriter::wrap(AptosDB::new_for_test(&aptos_config.aptos_db_path)); - let core_mempool = Arc::new(RwLock::new(CoreMempool::new(&node_config))); - let reader = reader_writer.reader.clone(); - Self { - block_executor: Arc::new(RwLock::new(block_executor)), - db: reader_writer, - signer, - core_mempool, - mempool_client_sender: mempool_client_sender.clone(), - node_config: node_config.clone(), - mempool_client_receiver: Arc::new(RwLock::new(mempool_client_receiver)), - context: Arc::new(Context::new( - aptos_config.chain_id.clone(), - reader, - mempool_client_sender, - node_config, - None, - )), - aptos_config, - } - } - - pub fn genesis_change_set_and_validators( - chain_id: ChainId, - count: Option, - public_key: &Ed25519PublicKey, - ) -> (ChangeSet, Vec) { - let framework = aptos_cached_packages::head_release_bundle(); - let test_validators = TestValidator::new_test_set(count, Some(100_000_000)); - let validators_: Vec = test_validators.iter().map(|t| t.data.clone()).collect(); - let validators = &validators_; - - let genesis = encode_genesis_change_set( - &public_key, - validators, - framework, - chain_id, - // todo: get this config from somewhere - &GenesisConfiguration { - allow_new_validators: true, - epoch_duration_secs: 3600, - is_test: true, - min_stake: 0, - min_voting_threshold: 0, - // 1M APTOS coins (with 8 decimals). - max_stake: 100_000_000_000_000, - recurring_lockup_duration_secs: 7200, - required_proposer_stake: 0, - rewards_apy_percentage: 10, - voting_duration_secs: 3600, - voting_power_increase_limit: 50, - employee_vesting_start: 1663456089, - employee_vesting_period_duration: 5 * 60, // 5 minutes - initial_features_override: None, - randomness_config_override: None, - jwk_consensus_config_override: None, - }, - &OnChainConsensusConfig::default_for_genesis(), - &OnChainExecutionConfig::default_for_genesis(), - &default_gas_schedule(), - ); - (genesis, test_validators) - } - - pub fn bootstrap_empty_db( - db_dir: &PathBuf, - chain_id: ChainId, - public_key: &Ed25519PublicKey, - ) -> Result<(DbReaderWriter, ValidatorSigner), anyhow::Error> { - let (genesis, validators) = - Self::genesis_change_set_and_validators(chain_id, Some(1), public_key); - let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis)); - let db_rw = DbReaderWriter::new(AptosDB::new_for_test(db_dir)); - - assert!(db_rw.reader.get_latest_ledger_info_option()?.is_none()); - - // Bootstrap empty DB. - let waypoint = generate_waypoint::(&db_rw, &genesis_txn)?; - maybe_bootstrap::(&db_rw, &genesis_txn, waypoint)? - .ok_or(anyhow::anyhow!("Failed to bootstrap DB"))?; - assert!(db_rw.reader.get_latest_ledger_info_option()?.is_some()); - - let validator_signer = ValidatorSigner::new( - validators[0].data.owner_address, - validators[0].consensus_key.clone(), - ); - - Ok((db_rw, validator_signer)) - } - - pub fn bootstrap( - mempool_client_sender: MempoolClientSender, - mempool_client_receiver: futures_mpsc::Receiver, - node_config: NodeConfig, - aptos_config: maptos_execution_util::config::just_aptos::Config, - ) -> Result { - let (db, signer) = Self::bootstrap_empty_db( - &aptos_config.aptos_db_path, - aptos_config.chain_id.clone(), - &aptos_config.aptos_public_key, - )?; - let reader = db.reader.clone(); - let core_mempool = Arc::new(RwLock::new(CoreMempool::new(&node_config))); - - Ok(Self { - block_executor: Arc::new(RwLock::new(BlockExecutor::new(db.clone()))), - db, - signer, - core_mempool, - mempool_client_sender: mempool_client_sender.clone(), - mempool_client_receiver: Arc::new(RwLock::new(mempool_client_receiver)), - node_config: node_config.clone(), - context: Arc::new(Context::new( - aptos_config.chain_id.clone(), - reader, - mempool_client_sender, - node_config, - None, - )), - aptos_config, - }) - } - - pub fn try_from_env() -> Result { - // use the default signer, block executor, and mempool - let (mempool_client_sender, mempool_client_receiver) = - futures_mpsc::channel::(10); - let node_config = NodeConfig::default(); - let aptos_config = maptos_execution_util::config::just_aptos::Config::try_from_env() - .context("Failed to create Aptos config")?; - - Self::bootstrap(mempool_client_sender, mempool_client_receiver, node_config, aptos_config) - } - - async fn execute_block_inner(&self, block: ExecutableBlock) -> Result { - let block_id = block.block_id.clone(); - let parent_block_id = { - let block_executor = self.block_executor.read().await; - block_executor.committed_block_id() - }; - - let state_compute = { - let block_executor = self.block_executor.write().await; - block_executor.execute_block( - block, - parent_block_id, - BlockExecutorConfigFromOnchain::new_no_block_limit(), - )? - }; - - debug!("State compute: {:?}", state_compute); - - let version = state_compute.version(); - - let (epoch, round) = self.get_next_epoch_and_round().await?; - - { - let ledger_info_with_sigs = - ledger_info_with_sigs(epoch, round, block_id, state_compute.root_hash(), version); - let block_executor = self.block_executor.write().await; - block_executor.commit_blocks(vec![block_id], ledger_info_with_sigs)?; - } - Ok(version) - } - - /// Execute a block which gets committed to the state. - pub async fn execute_block( - &self, - block: ExecutableBlock, - ) -> Result { - // todo: this should be deterministic, so let's rehash the block id - let hash_str = format!("{:?}", block.block_id); - let mut hash_bytes = hash_str.as_bytes().to_vec(); - hash_bytes.reverse(); - let metadata_block_id = HashValue::sha3_256_of(&hash_bytes); - - // To correctly update block height, we employ a workaround to execute - // the block metadata transaction in its own block first. - - // pop 0th transaction - let transactions = block.transactions; - let mut transactions = match transactions { - ExecutableTransactions::Unsharded(transactions) => transactions, - _ => anyhow::bail!("Only unsharded transactions are supported"), - }; - if transactions.len() == 0 { - anyhow::bail!("Block must have at least the metadata transaction"); - } - let block_metadata = match transactions.remove(0) { - SignatureVerifiedTransaction::Valid(Transaction::BlockMetadata(block_metadata)) => { - block_metadata - } - _ => anyhow::bail!("Block metadata not found"), - }; - - // execute the block metadata block - self.execute_block_inner(ExecutableBlock::new( - metadata_block_id.clone(), - ExecutableTransactions::Unsharded(vec![SignatureVerifiedTransaction::Valid( - Transaction::BlockMetadata(block_metadata), - )]), - )) - .await?; - - // execute the rest of the block - let version = self - .execute_block_inner(ExecutableBlock::new( - block.block_id.clone(), - ExecutableTransactions::Unsharded(transactions), - )) - .await?; - - let proof = { - let reader = self.db.reader.clone(); - reader.get_state_proof(version)? - }; - - // Context has a reach-around to the db so the block height should - // have been updated to the most recently committed block. - // Race conditions, anyone? - let block_height = self.get_block_head_height()?; - - let commitment = Commitment::digest_state_proof(&proof); - Ok(BlockCommitment { - block_id: Id(*block.block_id), - commitment, - height: block_height.into(), - }) - } - - pub fn get_block_head_height(&self) -> Result { - let ledger_info = self.context.get_latest_ledger_info_wrapped()?; - Ok(ledger_info.block_height.into()) - } - - fn context(&self) -> Arc { - self.context.clone() - } - - pub fn get_apis(&self) -> Apis { - get_apis(self.context()) - } - - pub async fn run_service(&self) -> Result<(), anyhow::Error> { - info!( - "Starting maptos-opt-executor services at: {:?}", - self.aptos_config.aptos_rest_listen_url - ); - - let api_service = get_api_service(self.context()) - .server(format!("http://{:?}", self.aptos_config.aptos_rest_listen_url)); - - let ui = api_service.swagger_ui(); - - let cors = - Cors::new().allow_methods(vec![Method::GET, Method::POST]).allow_credentials(true); - let app = Route::new().nest("/v1", api_service).nest("/spec", ui).with(cors); - - Server::new(TcpListener::bind(self.aptos_config.aptos_rest_listen_url.clone())) - .run(app) - .await - .map_err(|e| anyhow::anyhow!("Server error: {:?}", e))?; - - Ok(()) - } - - pub async fn get_transaction_sequence_number( - &self, - _transaction: &SignedTransaction, - ) -> Result { - // just use the ms since epoch for now - let ms = chrono::Utc::now().timestamp_millis(); - Ok(ms as u64) - } - - /// Ticks the transaction reader. - pub async fn tick_transaction_reader( - &self, - transaction_channel: async_channel::Sender, - ) -> Result<(), anyhow::Error> { - let mut mempool_client_receiver = self.mempool_client_receiver.write().await; - for _ in 0..256 { - // use select to safely timeout a request for a transaction without risking dropping the transaction - // !warn: this may still be unsafe - tokio::select! { - _ = tokio::time::sleep(tokio::time::Duration::from_millis(5)) => { () }, - request = mempool_client_receiver.next() => { - match request { - Some(request) => { - match request { - MempoolClientRequest::SubmitTransaction(transaction, callback) => { - // add to the mempool - { - - let mut core_mempool = self.core_mempool.write().await; - - let status = core_mempool.add_txn( - transaction.clone(), - 0, - transaction.sequence_number(), - TimelineState::NonQualified, - true - ); - - match status.code { - MempoolStatusCode::Accepted => { - - }, - _ => { - anyhow::bail!("Transaction not accepted: {:?}", status); - } - } - - // send along to the receiver - transaction_channel.send(transaction).await.map_err( - |e| anyhow::anyhow!("Error sending transaction: {:?}", e) - )?; - - }; - - // report status - let ms = MempoolStatus::new(MempoolStatusCode::Accepted); - let status: SubmissionStatus = (ms, None); - callback.send(Ok(status)).map_err( - |e| anyhow::anyhow!("Error sending callback: {:?}", e) - )?; - - }, - MempoolClientRequest::GetTransactionByHash(hash, sender) => { - let mempool = self.core_mempool.read().await; - let mempool_result = mempool.get_by_hash(hash); - sender.send(mempool_result).map_err( - |e| anyhow::anyhow!("Error sending callback: {:?}", e) - )?; - }, - } - }, - None => { - break; - } - } - } - } - } - - Ok(()) - } - - pub async fn tick_mempool_pipe( - &self, - _transaction_channel: async_channel::Sender, - ) -> Result<(), anyhow::Error> { - // todo: remove this old implementation - - Ok(()) - } - - pub async fn get_next_epoch_and_round(&self) -> Result<(u64, u64), anyhow::Error> { - let epoch = self.db.reader.get_latest_ledger_info()?.ledger_info().next_block_epoch(); - let round = self.db.reader.get_latest_ledger_info()?.ledger_info().round(); - Ok((epoch, round)) - } - - /// Pipes a batch of transactions from the mempool to the transaction channel. - /// todo: it may be wise to move the batching logic up a level to the consuming structs. - pub async fn tick_transaction_pipe( - &self, - transaction_channel: async_channel::Sender, - ) -> Result<(), anyhow::Error> { - self.tick_transaction_reader(transaction_channel.clone()).await?; - - self.tick_mempool_pipe(transaction_channel).await?; - - Ok(()) - } + /// Create a new `Executor` instance. + pub fn new( + block_executor: BlockExecutor, + signer: ValidatorSigner, + mempool_client_sender: MempoolClientSender, + mempool_client_receiver: futures_mpsc::Receiver, + node_config: NodeConfig, + aptos_config: maptos_execution_util::config::just_aptos::Config, + ) -> Self { + let (_aptos_db, reader_writer) = + DbReaderWriter::wrap(AptosDB::new_for_test(&aptos_config.aptos_db_path)); + let core_mempool = Arc::new(RwLock::new(CoreMempool::new(&node_config))); + let reader = reader_writer.reader.clone(); + Self { + block_executor: Arc::new(RwLock::new(block_executor)), + db: reader_writer, + signer, + core_mempool, + mempool_client_sender: mempool_client_sender.clone(), + node_config: node_config.clone(), + mempool_client_receiver: Arc::new(RwLock::new(mempool_client_receiver)), + context: Arc::new(Context::new( + aptos_config.chain_id.clone(), + reader, + mempool_client_sender, + node_config, + None, + )), + aptos_config, + } + } + + pub fn genesis_change_set_and_validators( + chain_id: ChainId, + count: Option, + public_key: &Ed25519PublicKey, + ) -> (ChangeSet, Vec) { + let framework = aptos_cached_packages::head_release_bundle(); + let test_validators = TestValidator::new_test_set(count, Some(100_000_000)); + let validators_: Vec = test_validators.iter().map(|t| t.data.clone()).collect(); + let validators = &validators_; + + let genesis = encode_genesis_change_set( + &public_key, + validators, + framework, + chain_id, + // todo: get this config from somewhere + &GenesisConfiguration { + allow_new_validators: true, + epoch_duration_secs: 3600, + is_test: true, + min_stake: 0, + min_voting_threshold: 0, + // 1M APTOS coins (with 8 decimals). + max_stake: 100_000_000_000_000, + recurring_lockup_duration_secs: 7200, + required_proposer_stake: 0, + rewards_apy_percentage: 10, + voting_duration_secs: 3600, + voting_power_increase_limit: 50, + employee_vesting_start: 1663456089, + employee_vesting_period_duration: 5 * 60, // 5 minutes + initial_features_override: None, + randomness_config_override: None, + jwk_consensus_config_override: None, + }, + &OnChainConsensusConfig::default_for_genesis(), + &OnChainExecutionConfig::default_for_genesis(), + &default_gas_schedule(), + ); + (genesis, test_validators) + } + + pub fn bootstrap_empty_db( + db_dir: &PathBuf, + chain_id: ChainId, + public_key: &Ed25519PublicKey, + ) -> Result<(DbReaderWriter, ValidatorSigner), anyhow::Error> { + let (genesis, validators) = + Self::genesis_change_set_and_validators(chain_id, Some(1), public_key); + let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis)); + let db_rw = DbReaderWriter::new(AptosDB::new_for_test(db_dir)); + + assert!(db_rw.reader.get_latest_ledger_info_option()?.is_none()); + + // Bootstrap empty DB. + let waypoint = generate_waypoint::(&db_rw, &genesis_txn)?; + maybe_bootstrap::(&db_rw, &genesis_txn, waypoint)? + .ok_or(anyhow::anyhow!("Failed to bootstrap DB"))?; + assert!(db_rw.reader.get_latest_ledger_info_option()?.is_some()); + + let validator_signer = ValidatorSigner::new( + validators[0].data.owner_address, + validators[0].consensus_key.clone(), + ); + + Ok((db_rw, validator_signer)) + } + + pub fn bootstrap( + mempool_client_sender: MempoolClientSender, + mempool_client_receiver: futures_mpsc::Receiver, + node_config: NodeConfig, + aptos_config: maptos_execution_util::config::just_aptos::Config, + ) -> Result { + let (db, signer) = Self::bootstrap_empty_db( + &aptos_config.aptos_db_path, + aptos_config.chain_id.clone(), + &aptos_config.aptos_public_key, + )?; + let reader = db.reader.clone(); + let core_mempool = Arc::new(RwLock::new(CoreMempool::new(&node_config))); + + Ok(Self { + block_executor: Arc::new(RwLock::new(BlockExecutor::new(db.clone()))), + db, + signer, + core_mempool, + mempool_client_sender: mempool_client_sender.clone(), + mempool_client_receiver: Arc::new(RwLock::new(mempool_client_receiver)), + node_config: node_config.clone(), + context: Arc::new(Context::new( + aptos_config.chain_id.clone(), + reader, + mempool_client_sender, + node_config, + None, + )), + aptos_config, + }) + } + + pub fn try_from_env() -> Result { + // use the default signer, block executor, and mempool + let (mempool_client_sender, mempool_client_receiver) = + futures_mpsc::channel::(10); + let node_config = NodeConfig::default(); + let aptos_config = maptos_execution_util::config::just_aptos::Config::try_from_env() + .context("Failed to create Aptos config")?; + + Self::bootstrap(mempool_client_sender, mempool_client_receiver, node_config, aptos_config) + } + + async fn execute_block_inner(&self, block: ExecutableBlock) -> Result { + let block_id = block.block_id.clone(); + let parent_block_id = { + let block_executor = self.block_executor.read().await; + block_executor.committed_block_id() + }; + + let state_compute = { + let block_executor = self.block_executor.write().await; + block_executor.execute_block( + block, + parent_block_id, + BlockExecutorConfigFromOnchain::new_no_block_limit(), + )? + }; + + debug!("State compute: {:?}", state_compute); + + let version = state_compute.version(); + + let (epoch, round) = self.get_next_epoch_and_round().await?; + + { + let ledger_info_with_sigs = + ledger_info_with_sigs(epoch, round, block_id, state_compute.root_hash(), version); + let block_executor = self.block_executor.write().await; + block_executor.commit_blocks(vec![block_id], ledger_info_with_sigs)?; + } + Ok(version) + } + + /// Execute a block which gets committed to the state. + pub async fn execute_block( + &self, + block: ExecutableBlock, + ) -> Result { + // todo: this should be deterministic, so let's rehash the block id + let hash_str = format!("{:?}", block.block_id); + let mut hash_bytes = hash_str.as_bytes().to_vec(); + hash_bytes.reverse(); + let metadata_block_id = HashValue::sha3_256_of(&hash_bytes); + + // To correctly update block height, we employ a workaround to execute + // the block metadata transaction in its own block first. + + // pop 0th transaction + let transactions = block.transactions; + let mut transactions = match transactions { + ExecutableTransactions::Unsharded(transactions) => transactions, + _ => anyhow::bail!("Only unsharded transactions are supported"), + }; + if transactions.len() == 0 { + anyhow::bail!("Block must have at least the metadata transaction"); + } + let block_metadata = match transactions.remove(0) { + SignatureVerifiedTransaction::Valid(Transaction::BlockMetadata(block_metadata)) => { + block_metadata + } + _ => anyhow::bail!("Block metadata not found"), + }; + + // execute the block metadata block + self.execute_block_inner(ExecutableBlock::new( + metadata_block_id.clone(), + ExecutableTransactions::Unsharded(vec![SignatureVerifiedTransaction::Valid( + Transaction::BlockMetadata(block_metadata), + )]), + )) + .await?; + + // execute the rest of the block + let version = self + .execute_block_inner(ExecutableBlock::new( + block.block_id.clone(), + ExecutableTransactions::Unsharded(transactions), + )) + .await?; + + let proof = { + let reader = self.db.reader.clone(); + reader.get_state_proof(version)? + }; + + // Context has a reach-around to the db so the block height should + // have been updated to the most recently committed block. + // Race conditions, anyone? + let block_height = self.get_block_head_height()?; + + let commitment = Commitment::digest_state_proof(&proof); + Ok(BlockCommitment { + block_id: Id(*block.block_id), + commitment, + height: block_height.into(), + }) + } + + pub fn get_block_head_height(&self) -> Result { + let ledger_info = self.context.get_latest_ledger_info_wrapped()?; + Ok(ledger_info.block_height.into()) + } + + fn context(&self) -> Arc { + self.context.clone() + } + + pub fn get_apis(&self) -> Apis { + get_apis(self.context()) + } + + pub async fn run_service(&self) -> Result<(), anyhow::Error> { + info!( + "Starting maptos-opt-executor services at: {:?}", + self.aptos_config.aptos_rest_listen_url + ); + + let api_service = get_api_service(self.context()) + .server(format!("http://{:?}", self.aptos_config.aptos_rest_listen_url)); + + let ui = api_service.swagger_ui(); + + let cors = Cors::new() + .allow_methods(vec![Method::GET, Method::POST]) + .allow_credentials(true); + let app = Route::new().nest("/v1", api_service).nest("/spec", ui).with(cors); + + Server::new(TcpListener::bind(self.aptos_config.aptos_rest_listen_url.clone())) + .run(app) + .await + .map_err(|e| anyhow::anyhow!("Server error: {:?}", e))?; + + Ok(()) + } + + pub async fn get_transaction_sequence_number( + &self, + _transaction: &SignedTransaction, + ) -> Result { + // just use the ms since epoch for now + let ms = chrono::Utc::now().timestamp_millis(); + Ok(ms as u64) + } + + /// Ticks the transaction reader. + pub async fn tick_transaction_reader( + &self, + transaction_channel: async_channel::Sender, + ) -> Result<(), anyhow::Error> { + let mut mempool_client_receiver = self.mempool_client_receiver.write().await; + for _ in 0..256 { + // use select to safely timeout a request for a transaction without risking dropping the transaction + // !warn: this may still be unsafe + tokio::select! { + _ = tokio::time::sleep(tokio::time::Duration::from_millis(5)) => { () }, + request = mempool_client_receiver.next() => { + match request { + Some(request) => { + match request { + MempoolClientRequest::SubmitTransaction(transaction, callback) => { + // add to the mempool + { + + let mut core_mempool = self.core_mempool.write().await; + + let status = core_mempool.add_txn( + transaction.clone(), + 0, + transaction.sequence_number(), + TimelineState::NonQualified, + true + ); + + match status.code { + MempoolStatusCode::Accepted => { + + }, + _ => { + anyhow::bail!("Transaction not accepted: {:?}", status); + } + } + + // send along to the receiver + transaction_channel.send(transaction).await.map_err( + |e| anyhow::anyhow!("Error sending transaction: {:?}", e) + )?; + + }; + + // report status + let ms = MempoolStatus::new(MempoolStatusCode::Accepted); + let status: SubmissionStatus = (ms, None); + callback.send(Ok(status)).map_err( + |e| anyhow::anyhow!("Error sending callback: {:?}", e) + )?; + + }, + MempoolClientRequest::GetTransactionByHash(hash, sender) => { + let mempool = self.core_mempool.read().await; + let mempool_result = mempool.get_by_hash(hash); + sender.send(mempool_result).map_err( + |e| anyhow::anyhow!("Error sending callback: {:?}", e) + )?; + }, + } + }, + None => { + break; + } + } + } + } + } + + Ok(()) + } + + pub async fn tick_mempool_pipe( + &self, + _transaction_channel: async_channel::Sender, + ) -> Result<(), anyhow::Error> { + // todo: remove this old implementation + + Ok(()) + } + + pub async fn get_next_epoch_and_round(&self) -> Result<(u64, u64), anyhow::Error> { + let epoch = self.db.reader.get_latest_ledger_info()?.ledger_info().next_block_epoch(); + let round = self.db.reader.get_latest_ledger_info()?.ledger_info().round(); + Ok((epoch, round)) + } + + /// Pipes a batch of transactions from the mempool to the transaction channel. + /// todo: it may be wise to move the batching logic up a level to the consuming structs. + pub async fn tick_transaction_pipe( + &self, + transaction_channel: async_channel::Sender, + ) -> Result<(), anyhow::Error> { + self.tick_transaction_reader(transaction_channel.clone()).await?; + + self.tick_mempool_pipe(transaction_channel).await?; + + Ok(()) + } } fn ledger_info_with_sigs( - epoch: u64, - round: u64, - block_id: HashValue, - root_hash: HashValue, - version: Version, + epoch: u64, + round: u64, + block_id: HashValue, + root_hash: HashValue, + version: Version, ) -> LedgerInfoWithSignatures { - let block_info = BlockInfo::new( - epoch, round, block_id, root_hash, version, 0, /* timestamp_usecs, doesn't matter */ - None, - ); - let ledger_info = LedgerInfo::new( - block_info, - HashValue::zero(), /* consensus_data_hash, doesn't matter */ - ); - LedgerInfoWithSignatures::new(ledger_info, AggregateSignature::empty() /* signatures */) + let block_info = BlockInfo::new( + epoch, round, block_id, root_hash, version, 0, /* timestamp_usecs, doesn't matter */ + None, + ); + let ledger_info = LedgerInfo::new( + block_info, + HashValue::zero(), /* consensus_data_hash, doesn't matter */ + ); + LedgerInfoWithSignatures::new(ledger_info, AggregateSignature::empty() /* signatures */) } #[cfg(test)] mod tests { - use std::collections::BTreeSet; - - use super::*; - use aptos_api::{accept_type::AcceptType, transactions::SubmitTransactionPost}; - use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519Signature}, - HashValue, PrivateKey, Uniform, - }; - use aptos_sdk::{ - transaction_builder::TransactionFactory, - types::{AccountKey, LocalAccount}, - }; - use aptos_storage_interface::state_view::DbStateViewAtVersion; - use aptos_types::account_config::aptos_test_root_address; - use aptos_types::account_view::AccountView; - use aptos_types::state_store::account_with_state_view::AsAccountWithStateView; - use aptos_types::transaction::signature_verified_transaction::into_signature_verified_block; - use aptos_types::{ - account_address::AccountAddress, - block_executor::partitioner::ExecutableTransactions, - block_metadata::BlockMetadata, - chain_id::ChainId, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, - SignedTransaction, Transaction, TransactionPayload, - }, - }; - use futures::channel::oneshot; - use futures::SinkExt; - use rand::SeedableRng; - - fn create_signed_transaction(gas_unit_price: u64, chain_id: ChainId) -> SignedTransaction { - let private_key = Ed25519PrivateKey::generate_for_testing(); - let public_key = private_key.public_key(); - let transaction_payload = TransactionPayload::Script(Script::new(vec![0], vec![], vec![])); - let raw_transaction = RawTransaction::new( - AccountAddress::random(), - 0, - transaction_payload, - 0, - gas_unit_price, - 0, - chain_id, // This is the value used in aptos testing code. - ); - SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) - } - - #[tokio::test] - async fn test_execute_block() -> Result<(), anyhow::Error> { - let executor = Executor::try_from_env()?; - let block_id = HashValue::random(); - let block_metadata = Transaction::BlockMetadata(BlockMetadata::new( - block_id, - 0, - 0, - executor.signer.author(), - vec![], - vec![], - chrono::Utc::now().timestamp_micros() as u64, - )); - let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( - create_signed_transaction(0, executor.aptos_config.chain_id.clone()), - )); - let txs = ExecutableTransactions::Unsharded(vec![ - SignatureVerifiedTransaction::Valid(block_metadata), - tx, - ]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block(block).await?; - Ok(()) - } - - // https://github.com/movementlabsxyz/aptos-core/blob/ea91067b81f9673547417bff9c70d5a2fe1b0e7b/execution/executor-test-helpers/src/integration_test_impl.rs#L535 - #[tokio::test] - async fn test_execute_block_state_db() -> Result<(), anyhow::Error> { - // Create an executor instance from the environment configuration. - let executor = Executor::try_from_env()?; - - // Initialize a root account using a predefined keypair and the test root address. - let root_account = LocalAccount::new( - aptos_test_root_address(), - AccountKey::from_private_key(executor.aptos_config.aptos_private_key.clone()), - 0, - ); - - // Seed for random number generator, used here to generate predictable results in a test environment. - let seed = [3u8; 32]; - let mut rng = ::rand::rngs::StdRng::from_seed(seed); - - // Create a transaction factory with the chain ID of the executor, used for creating transactions. - let tx_factory = TransactionFactory::new(executor.aptos_config.chain_id.clone()); - - // Loop to simulate the execution of multiple blocks. - for i in 0..10 { - let (epoch, round) = executor.get_next_epoch_and_round().await?; - - // Generate a random block ID. - let block_id = HashValue::random(); - // Clone the signer from the executor for signing the metadata. - let signer = executor.signer.clone(); - // Get the current time in microseconds for the block timestamp. - let current_time_micros = chrono::Utc::now().timestamp_micros() as u64; - - // Create a block metadata transaction. - let block_metadata = Transaction::BlockMetadata(BlockMetadata::new( - block_id, - epoch, - round, - signer.author(), - vec![], - vec![], - current_time_micros, - )); - - // Create a state checkpoint transaction using the block ID. - let state_checkpoint_tx = Transaction::StateCheckpoint(block_id.clone()); - // Generate a new account for transaction tests. - let new_account = LocalAccount::generate(&mut rng); - let new_account_address = new_account.address(); - - // Create a user account creation transaction. - let user_account_creation_tx = root_account.sign_with_transaction_builder( - tx_factory.create_user_account(new_account.public_key()), - ); - - // Create a mint transaction to provide the new account with some initial balance. - let mint_tx = root_account - .sign_with_transaction_builder(tx_factory.mint(new_account.address(), 2000)); - // Store the hash of the committed transaction for later verification. - let mint_tx_hash = mint_tx.clone().committed_hash(); - - // Block Metadata - let transactions = - ExecutableTransactions::Unsharded(into_signature_verified_block(vec![ - block_metadata, - Transaction::UserTransaction(user_account_creation_tx), - Transaction::UserTransaction(mint_tx), - ])); - let block = ExecutableBlock::new(block_id.clone(), transactions); - let block_commitment = executor.execute_block(block).await?; - - // Access the database reader to verify state after execution. - let db_reader = executor.db.reader.clone(); - // Get the latest version of the blockchain state from the database. - let latest_version = db_reader.get_latest_version()?; - // Verify the transaction by its hash to ensure it was committed. - let transaction_result = - db_reader.get_transaction_by_hash(mint_tx_hash, latest_version, false)?; - assert!(transaction_result.is_some()); - - // Create a state view at the latest version to inspect account states. - let state_view = db_reader.state_view_at_version(Some(latest_version))?; - // Access the state view of the new account to verify its state and existence. - let account_state_view = state_view.as_account_with_state_view(&new_account_address); - let queried_account_address = account_state_view.get_account_address()?; - assert!(queried_account_address.is_some()); - let account_resource = account_state_view.get_account_resource()?; - assert!(account_resource.is_some()); - - // Check the commitment against state proof - let state_proof = db_reader.get_state_proof(latest_version)?; - let expected_commitment = Commitment::digest_state_proof(&state_proof); - assert_eq!(block_commitment.height, i + 1); - assert_eq!(block_commitment.commitment, expected_commitment); - } - - Ok(()) - } - - #[tokio::test] - async fn test_execute_block_state_get_api() -> Result<(), anyhow::Error> { - // Create an executor instance from the environment configuration. - let executor = Executor::try_from_env()?; - - // Initialize a root account using a predefined keypair and the test root address. - let root_account = LocalAccount::new( - aptos_test_root_address(), - AccountKey::from_private_key(executor.aptos_config.aptos_private_key.clone()), - 0, - ); - - // Seed for random number generator, used here to generate predictable results in a test environment. - let seed = [3u8; 32]; - let mut rng = ::rand::rngs::StdRng::from_seed(seed); - - // Create a transaction factory with the chain ID of the executor. - let tx_factory = TransactionFactory::new(executor.aptos_config.chain_id.clone()); - - // Simulate the execution of multiple blocks. - for _ in 0..10 { - // For example, create and execute 3 blocks. - let (epoch, round) = executor.get_next_epoch_and_round().await?; - - let block_id = HashValue::random(); // Generate a random block ID for each block. - - // Clone the signer from the executor for signing the metadata. - let signer = executor.signer.clone(); - // Get the current time in microseconds for the block timestamp. - let current_time_micros = chrono::Utc::now().timestamp_micros() as u64; - - // Create a block metadata transaction. - let block_metadata = Transaction::BlockMetadata(BlockMetadata::new( - block_id, - epoch, - round, - signer.author(), - vec![], - vec![], - current_time_micros, - )); - - // Generate new accounts and create transactions for each block. - let mut transactions = Vec::new(); - let mut transaction_hashes = Vec::new(); - transactions.push(block_metadata.clone()); - for _ in 0..2 { - // Each block will contain 2 transactions. - let new_account = LocalAccount::generate(&mut rng); - let user_account_creation_tx = root_account.sign_with_transaction_builder( - tx_factory.create_user_account(new_account.public_key()), - ); - let tx_hash = user_account_creation_tx.clone().committed_hash(); - transaction_hashes.push(tx_hash); - transactions.push(Transaction::UserTransaction(user_account_creation_tx)); - } - - // Group all transactions into an unsharded block for execution. - let executable_transactions = ExecutableTransactions::Unsharded( - transactions.into_iter().map(SignatureVerifiedTransaction::Valid).collect(), - ); - let block = ExecutableBlock::new(block_id.clone(), executable_transactions); - executor.execute_block(block).await?; - - // Retrieve the executor's API interface and fetch the transaction by each hash. - let apis = executor.get_apis(); - for hash in transaction_hashes { - let _ = apis - .transactions - .get_transaction_by_hash_inner(&AcceptType::Bcs, hash.into()) - .await?; - } - } - - Ok(()) - } - - #[tokio::test] - async fn test_pipe_mempool() -> Result<(), anyhow::Error> { - // header - let mut executor = Executor::try_from_env()?; - let user_transaction = create_signed_transaction(0, executor.aptos_config.chain_id.clone()); - - // send transaction to mempool - let (req_sender, callback) = oneshot::channel(); - executor - .mempool_client_sender - .send(MempoolClientRequest::SubmitTransaction(user_transaction.clone(), req_sender)) - .await?; - - // tick the transaction pipe - let (tx, rx) = async_channel::unbounded(); - executor.tick_transaction_pipe(tx).await?; - - // receive the callback - callback.await??; - - // receive the transaction - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, user_transaction); - - Ok(()) - } - - #[tokio::test] - async fn test_pipe_mempool_while_server_running() -> Result<(), anyhow::Error> { - let mut executor = Executor::try_from_env()?; - let server_executor = executor.clone(); - - let handle = tokio::spawn(async move { - server_executor.run_service().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - let user_transaction = create_signed_transaction(0, executor.aptos_config.chain_id.clone()); - - // send transaction to mempool - let (req_sender, callback) = oneshot::channel(); - executor - .mempool_client_sender - .send(MempoolClientRequest::SubmitTransaction(user_transaction.clone(), req_sender)) - .await?; - - // tick the transaction pipe - let (tx, rx) = async_channel::unbounded(); - executor.tick_transaction_pipe(tx).await?; - - // receive the callback - callback.await??; - - // receive the transaction - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, user_transaction); - - handle.abort(); - - Ok(()) - } - - #[tokio::test] - async fn test_pipe_mempool_from_api() -> Result<(), anyhow::Error> { - let executor = Executor::try_from_env()?; - let mempool_executor = executor.clone(); - - let (tx, rx) = async_channel::unbounded(); - let mempool_handle = tokio::spawn(async move { - loop { - mempool_executor.tick_transaction_pipe(tx.clone()).await?; - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } - Ok(()) as Result<(), anyhow::Error> - }); - - let api = executor.get_apis(); - let user_transaction = create_signed_transaction(0, executor.aptos_config.chain_id.clone()); - let comparison_user_transaction = user_transaction.clone(); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, comparison_user_transaction); - - mempool_handle.abort(); - - Ok(()) - } - - #[tokio::test] - async fn test_repeated_pipe_mempool_from_api() -> Result<(), anyhow::Error> { - let executor = Executor::try_from_env()?; - let mempool_executor = executor.clone(); - - let (tx, rx) = async_channel::unbounded(); - let mempool_handle = tokio::spawn(async move { - loop { - mempool_executor.tick_transaction_pipe(tx.clone()).await?; - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } - Ok(()) as Result<(), anyhow::Error> - }); - - let api = executor.get_apis(); - let mut user_transactions = BTreeSet::new(); - let mut comparison_user_transactions = BTreeSet::new(); - for _ in 0..25 { - let user_transaction = - create_signed_transaction(0, executor.aptos_config.chain_id.clone()); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - user_transactions.insert(bcs_user_transaction.clone()); - - let request = - SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - - let received_transaction = rx.recv().await?; - let bcs_received_transaction = bcs::to_bytes(&received_transaction)?; - comparison_user_transactions.insert(bcs_received_transaction.clone()); - } - - assert_eq!(user_transactions.len(), comparison_user_transactions.len()); - assert_eq!(user_transactions, comparison_user_transactions); - - mempool_handle.abort(); - - Ok(()) - } + use std::collections::BTreeSet; + + use super::*; + use aptos_api::{accept_type::AcceptType, transactions::SubmitTransactionPost}; + use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519Signature}, + HashValue, PrivateKey, Uniform, + }; + use aptos_sdk::{ + transaction_builder::TransactionFactory, + types::{AccountKey, LocalAccount}, + }; + use aptos_storage_interface::state_view::DbStateViewAtVersion; + use aptos_types::account_config::aptos_test_root_address; + use aptos_types::account_view::AccountView; + use aptos_types::state_store::account_with_state_view::AsAccountWithStateView; + use aptos_types::transaction::signature_verified_transaction::into_signature_verified_block; + use aptos_types::{ + account_address::AccountAddress, + block_executor::partitioner::ExecutableTransactions, + block_metadata::BlockMetadata, + chain_id::ChainId, + transaction::{ + signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, + SignedTransaction, Transaction, TransactionPayload, + }, + }; + use futures::channel::oneshot; + use futures::SinkExt; + use rand::SeedableRng; + + fn create_signed_transaction(gas_unit_price: u64, chain_id: ChainId) -> SignedTransaction { + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + let transaction_payload = TransactionPayload::Script(Script::new(vec![0], vec![], vec![])); + let raw_transaction = RawTransaction::new( + AccountAddress::random(), + 0, + transaction_payload, + 0, + gas_unit_price, + 0, + chain_id, // This is the value used in aptos testing code. + ); + SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) + } + + #[tokio::test] + async fn test_execute_block() -> Result<(), anyhow::Error> { + let executor = Executor::try_from_env()?; + let block_id = HashValue::random(); + let block_metadata = Transaction::BlockMetadata(BlockMetadata::new( + block_id, + 0, + 0, + executor.signer.author(), + vec![], + vec![], + chrono::Utc::now().timestamp_micros() as u64, + )); + let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( + create_signed_transaction(0, executor.aptos_config.chain_id.clone()), + )); + let txs = ExecutableTransactions::Unsharded(vec![ + SignatureVerifiedTransaction::Valid(block_metadata), + tx, + ]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block(block).await?; + Ok(()) + } + + // https://github.com/movementlabsxyz/aptos-core/blob/ea91067b81f9673547417bff9c70d5a2fe1b0e7b/execution/executor-test-helpers/src/integration_test_impl.rs#L535 + #[tokio::test] + async fn test_execute_block_state_db() -> Result<(), anyhow::Error> { + // Create an executor instance from the environment configuration. + let executor = Executor::try_from_env()?; + + // Initialize a root account using a predefined keypair and the test root address. + let root_account = LocalAccount::new( + aptos_test_root_address(), + AccountKey::from_private_key(executor.aptos_config.aptos_private_key.clone()), + 0, + ); + + // Seed for random number generator, used here to generate predictable results in a test environment. + let seed = [3u8; 32]; + let mut rng = ::rand::rngs::StdRng::from_seed(seed); + + // Create a transaction factory with the chain ID of the executor, used for creating transactions. + let tx_factory = TransactionFactory::new(executor.aptos_config.chain_id.clone()); + + // Loop to simulate the execution of multiple blocks. + for i in 0..10 { + let (epoch, round) = executor.get_next_epoch_and_round().await?; + + // Generate a random block ID. + let block_id = HashValue::random(); + // Clone the signer from the executor for signing the metadata. + let signer = executor.signer.clone(); + // Get the current time in microseconds for the block timestamp. + let current_time_micros = chrono::Utc::now().timestamp_micros() as u64; + + // Create a block metadata transaction. + let block_metadata = Transaction::BlockMetadata(BlockMetadata::new( + block_id, + epoch, + round, + signer.author(), + vec![], + vec![], + current_time_micros, + )); + + // Create a state checkpoint transaction using the block ID. + let state_checkpoint_tx = Transaction::StateCheckpoint(block_id.clone()); + // Generate a new account for transaction tests. + let new_account = LocalAccount::generate(&mut rng); + let new_account_address = new_account.address(); + + // Create a user account creation transaction. + let user_account_creation_tx = root_account.sign_with_transaction_builder( + tx_factory.create_user_account(new_account.public_key()), + ); + + // Create a mint transaction to provide the new account with some initial balance. + let mint_tx = root_account + .sign_with_transaction_builder(tx_factory.mint(new_account.address(), 2000)); + // Store the hash of the committed transaction for later verification. + let mint_tx_hash = mint_tx.clone().committed_hash(); + + // Block Metadata + let transactions = + ExecutableTransactions::Unsharded(into_signature_verified_block(vec![ + block_metadata, + Transaction::UserTransaction(user_account_creation_tx), + Transaction::UserTransaction(mint_tx), + ])); + let block = ExecutableBlock::new(block_id.clone(), transactions); + let block_commitment = executor.execute_block(block).await?; + + // Access the database reader to verify state after execution. + let db_reader = executor.db.reader.clone(); + // Get the latest version of the blockchain state from the database. + let latest_version = db_reader.get_latest_version()?; + // Verify the transaction by its hash to ensure it was committed. + let transaction_result = + db_reader.get_transaction_by_hash(mint_tx_hash, latest_version, false)?; + assert!(transaction_result.is_some()); + + // Create a state view at the latest version to inspect account states. + let state_view = db_reader.state_view_at_version(Some(latest_version))?; + // Access the state view of the new account to verify its state and existence. + let account_state_view = state_view.as_account_with_state_view(&new_account_address); + let queried_account_address = account_state_view.get_account_address()?; + assert!(queried_account_address.is_some()); + let account_resource = account_state_view.get_account_resource()?; + assert!(account_resource.is_some()); + + // Check the commitment against state proof + let state_proof = db_reader.get_state_proof(latest_version)?; + let expected_commitment = Commitment::digest_state_proof(&state_proof); + assert_eq!(block_commitment.height, i + 1); + assert_eq!(block_commitment.commitment, expected_commitment); + } + + Ok(()) + } + + #[tokio::test] + async fn test_execute_block_state_get_api() -> Result<(), anyhow::Error> { + // Create an executor instance from the environment configuration. + let executor = Executor::try_from_env()?; + + // Initialize a root account using a predefined keypair and the test root address. + let root_account = LocalAccount::new( + aptos_test_root_address(), + AccountKey::from_private_key(executor.aptos_config.aptos_private_key.clone()), + 0, + ); + + // Seed for random number generator, used here to generate predictable results in a test environment. + let seed = [3u8; 32]; + let mut rng = ::rand::rngs::StdRng::from_seed(seed); + + // Create a transaction factory with the chain ID of the executor. + let tx_factory = TransactionFactory::new(executor.aptos_config.chain_id.clone()); + + // Simulate the execution of multiple blocks. + for _ in 0..10 { + // For example, create and execute 3 blocks. + let (epoch, round) = executor.get_next_epoch_and_round().await?; + + let block_id = HashValue::random(); // Generate a random block ID for each block. + + // Clone the signer from the executor for signing the metadata. + let signer = executor.signer.clone(); + // Get the current time in microseconds for the block timestamp. + let current_time_micros = chrono::Utc::now().timestamp_micros() as u64; + + // Create a block metadata transaction. + let block_metadata = Transaction::BlockMetadata(BlockMetadata::new( + block_id, + epoch, + round, + signer.author(), + vec![], + vec![], + current_time_micros, + )); + + // Generate new accounts and create transactions for each block. + let mut transactions = Vec::new(); + let mut transaction_hashes = Vec::new(); + transactions.push(block_metadata.clone()); + for _ in 0..2 { + // Each block will contain 2 transactions. + let new_account = LocalAccount::generate(&mut rng); + let user_account_creation_tx = root_account.sign_with_transaction_builder( + tx_factory.create_user_account(new_account.public_key()), + ); + let tx_hash = user_account_creation_tx.clone().committed_hash(); + transaction_hashes.push(tx_hash); + transactions.push(Transaction::UserTransaction(user_account_creation_tx)); + } + + // Group all transactions into an unsharded block for execution. + let executable_transactions = ExecutableTransactions::Unsharded( + transactions.into_iter().map(SignatureVerifiedTransaction::Valid).collect(), + ); + let block = ExecutableBlock::new(block_id.clone(), executable_transactions); + executor.execute_block(block).await?; + + // Retrieve the executor's API interface and fetch the transaction by each hash. + let apis = executor.get_apis(); + for hash in transaction_hashes { + let _ = apis + .transactions + .get_transaction_by_hash_inner(&AcceptType::Bcs, hash.into()) + .await?; + } + } + + Ok(()) + } + + #[tokio::test] + async fn test_pipe_mempool() -> Result<(), anyhow::Error> { + // header + let mut executor = Executor::try_from_env()?; + let user_transaction = create_signed_transaction(0, executor.aptos_config.chain_id.clone()); + + // send transaction to mempool + let (req_sender, callback) = oneshot::channel(); + executor + .mempool_client_sender + .send(MempoolClientRequest::SubmitTransaction(user_transaction.clone(), req_sender)) + .await?; + + // tick the transaction pipe + let (tx, rx) = async_channel::unbounded(); + executor.tick_transaction_pipe(tx).await?; + + // receive the callback + callback.await??; + + // receive the transaction + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, user_transaction); + + Ok(()) + } + + #[tokio::test] + async fn test_pipe_mempool_while_server_running() -> Result<(), anyhow::Error> { + let mut executor = Executor::try_from_env()?; + let server_executor = executor.clone(); + + let handle = tokio::spawn(async move { + server_executor.run_service().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + let user_transaction = create_signed_transaction(0, executor.aptos_config.chain_id.clone()); + + // send transaction to mempool + let (req_sender, callback) = oneshot::channel(); + executor + .mempool_client_sender + .send(MempoolClientRequest::SubmitTransaction(user_transaction.clone(), req_sender)) + .await?; + + // tick the transaction pipe + let (tx, rx) = async_channel::unbounded(); + executor.tick_transaction_pipe(tx).await?; + + // receive the callback + callback.await??; + + // receive the transaction + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, user_transaction); + + handle.abort(); + + Ok(()) + } + + #[tokio::test] + async fn test_pipe_mempool_from_api() -> Result<(), anyhow::Error> { + let executor = Executor::try_from_env()?; + let mempool_executor = executor.clone(); + + let (tx, rx) = async_channel::unbounded(); + let mempool_handle = tokio::spawn(async move { + loop { + mempool_executor.tick_transaction_pipe(tx.clone()).await?; + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + Ok(()) as Result<(), anyhow::Error> + }); + + let api = executor.get_apis(); + let user_transaction = create_signed_transaction(0, executor.aptos_config.chain_id.clone()); + let comparison_user_transaction = user_transaction.clone(); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, comparison_user_transaction); + + mempool_handle.abort(); + + Ok(()) + } + + #[tokio::test] + async fn test_repeated_pipe_mempool_from_api() -> Result<(), anyhow::Error> { + let executor = Executor::try_from_env()?; + let mempool_executor = executor.clone(); + + let (tx, rx) = async_channel::unbounded(); + let mempool_handle = tokio::spawn(async move { + loop { + mempool_executor.tick_transaction_pipe(tx.clone()).await?; + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } + Ok(()) as Result<(), anyhow::Error> + }); + + let api = executor.get_apis(); + let mut user_transactions = BTreeSet::new(); + let mut comparison_user_transactions = BTreeSet::new(); + for _ in 0..25 { + let user_transaction = + create_signed_transaction(0, executor.aptos_config.chain_id.clone()); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + user_transactions.insert(bcs_user_transaction.clone()); + + let request = + SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + + let received_transaction = rx.recv().await?; + let bcs_received_transaction = bcs::to_bytes(&received_transaction)?; + comparison_user_transactions.insert(bcs_received_transaction.clone()); + } + + assert_eq!(user_transactions.len(), comparison_user_transactions.len()); + assert_eq!(user_transactions, comparison_user_transactions); + + mempool_handle.abort(); + + Ok(()) + } } diff --git a/protocol-units/execution/maptos/util/src/config.rs b/protocol-units/execution/maptos/util/src/config.rs index 2eabeb084..36c8a26a4 100644 --- a/protocol-units/execution/maptos/util/src/config.rs +++ b/protocol-units/execution/maptos/util/src/config.rs @@ -1,163 +1,163 @@ pub mod just_aptos { - use std::path::PathBuf; - - use anyhow::Context; - use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, - PrivateKey, Uniform, ValidCryptoMaterialStringExt, - }; - use aptos_sdk::types::chain_id::ChainId; - - #[derive(Debug, Clone, PartialEq, Eq)] - pub struct Config { - pub chain_id: ChainId, - pub aptos_rest_listen_url: String, - pub aptos_faucet_listen_url: String, - pub aptos_private_key: Ed25519PrivateKey, - pub aptos_public_key: Ed25519PublicKey, - pub aptos_db_path: PathBuf, - } - - impl Config { - pub const CHAIN_ID_ENV_VAR: &'static str = "MONZA_CHAIN_ID"; - pub const APTOS_REST_LISTEN_ADDR_ENV_VAR: &'static str = "MONZA_APTOS_REST_LISTEN_ADDR"; - pub const APTOS_FAUCET_LISTEN_ADDR_ENV_VAR: &'static str = "MONZA_APTOS_FAUCET_LISTEN_ADDR"; - pub const APTOS_PRIVATE_KEY_ENV_VAR: &'static str = "MONZA_APTOS_PRIVATE_KEY"; - pub const APTOS_PUBLIC_KEY_ENV_VAR: &'static str = "MONZA_APTOS_PUBLIC_KEY"; - pub const APTOS_DB_PATH_ENV_VAR: &'static str = "MONZA_APTOS_DB_PATH"; - - pub fn new( - chain_id: ChainId, - aptos_rest_listen_url: String, - aptos_faucet_listen_url: String, - aptos_private_key: Ed25519PrivateKey, - aptos_public_key: Ed25519PublicKey, - aptos_db_path: PathBuf, - ) -> Self { - Self { - chain_id, - aptos_rest_listen_url, - aptos_faucet_listen_url, - aptos_private_key, - aptos_public_key, - aptos_db_path, - } - } - - pub fn try_from_env() -> Result { - let chain_id = match std::env::var(Self::CHAIN_ID_ENV_VAR) { - Ok(chain_id) => { - serde_json::from_str(chain_id.as_str()).context("Failed to parse chain id")? - } - Err(_) => ChainId::default(), - }; - - let aptos_rest_listen_url = std::env::var(Self::APTOS_REST_LISTEN_ADDR_ENV_VAR) - .unwrap_or("0.0.0.0:30731".to_string()); - - let aptos_faucet_listen_url = std::env::var(Self::APTOS_FAUCET_LISTEN_ADDR_ENV_VAR) - .unwrap_or("0.0.0.0:30732".to_string()); - - let aptos_private_key = match std::env::var(Self::APTOS_PRIVATE_KEY_ENV_VAR) { - Ok(private_key) => Ed25519PrivateKey::from_encoded_string(private_key.as_str()) - .context("Failed to parse private key")?, - Err(_) => Ed25519PrivateKey::generate(&mut rand::thread_rng()), - }; - - let aptos_public_key = aptos_private_key.public_key(); - - let aptos_db_path = match std::env::var(Self::APTOS_DB_PATH_ENV_VAR) { - Ok(db_path) => PathBuf::from(db_path), - Err(_) => { - // generate a tempdir - // this should work because the dir will be top level of /tmp - let tempdir = tempfile::tempdir()?; - tempdir.into_path() - } - }; - - Ok(Self { - chain_id, - aptos_rest_listen_url, - aptos_faucet_listen_url, - aptos_private_key, - aptos_public_key, - aptos_db_path, - }) - } - - pub fn write_to_env(&self) -> Result<(), anyhow::Error> { - std::env::set_var(Self::CHAIN_ID_ENV_VAR, self.chain_id.to_string()); - std::env::set_var( - Self::APTOS_REST_LISTEN_ADDR_ENV_VAR, - self.aptos_rest_listen_url.clone(), - ); - std::env::set_var( - Self::APTOS_FAUCET_LISTEN_ADDR_ENV_VAR, - self.aptos_faucet_listen_url.clone(), - ); - std::env::set_var( - Self::APTOS_PRIVATE_KEY_ENV_VAR, - self.aptos_private_key.to_encoded_string()?, - ); - std::env::set_var( - Self::APTOS_PUBLIC_KEY_ENV_VAR, - self.aptos_public_key.to_encoded_string()?, - ); - Ok(()) - } - - pub fn write_bash_export_string(&self) -> Result { - Ok(format!( - "export {}={}\nexport {}={}\nexport {}={}\nexport {}={}\nexport {}={}", - Self::CHAIN_ID_ENV_VAR, - serde_json::to_string(&self.chain_id)?, - Self::APTOS_REST_LISTEN_ADDR_ENV_VAR, - self.aptos_rest_listen_url, - Self::APTOS_FAUCET_LISTEN_ADDR_ENV_VAR, - self.aptos_faucet_listen_url, - Self::APTOS_PRIVATE_KEY_ENV_VAR, - self.aptos_private_key.to_encoded_string()?, - Self::APTOS_PUBLIC_KEY_ENV_VAR, - self.aptos_public_key.to_encoded_string()? - )) - } - } + use std::path::PathBuf; + + use anyhow::Context; + use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, + PrivateKey, Uniform, ValidCryptoMaterialStringExt, + }; + use aptos_sdk::types::chain_id::ChainId; + + #[derive(Debug, Clone, PartialEq, Eq)] + pub struct Config { + pub chain_id: ChainId, + pub aptos_rest_listen_url: String, + pub aptos_faucet_listen_url: String, + pub aptos_private_key: Ed25519PrivateKey, + pub aptos_public_key: Ed25519PublicKey, + pub aptos_db_path: PathBuf, + } + + impl Config { + pub const CHAIN_ID_ENV_VAR: &'static str = "MONZA_CHAIN_ID"; + pub const APTOS_REST_LISTEN_ADDR_ENV_VAR: &'static str = "MONZA_APTOS_REST_LISTEN_ADDR"; + pub const APTOS_FAUCET_LISTEN_ADDR_ENV_VAR: &'static str = "MONZA_APTOS_FAUCET_LISTEN_ADDR"; + pub const APTOS_PRIVATE_KEY_ENV_VAR: &'static str = "MONZA_APTOS_PRIVATE_KEY"; + pub const APTOS_PUBLIC_KEY_ENV_VAR: &'static str = "MONZA_APTOS_PUBLIC_KEY"; + pub const APTOS_DB_PATH_ENV_VAR: &'static str = "MONZA_APTOS_DB_PATH"; + + pub fn new( + chain_id: ChainId, + aptos_rest_listen_url: String, + aptos_faucet_listen_url: String, + aptos_private_key: Ed25519PrivateKey, + aptos_public_key: Ed25519PublicKey, + aptos_db_path: PathBuf, + ) -> Self { + Self { + chain_id, + aptos_rest_listen_url, + aptos_faucet_listen_url, + aptos_private_key, + aptos_public_key, + aptos_db_path, + } + } + + pub fn try_from_env() -> Result { + let chain_id = match std::env::var(Self::CHAIN_ID_ENV_VAR) { + Ok(chain_id) => { + serde_json::from_str(chain_id.as_str()).context("Failed to parse chain id")? + } + Err(_) => ChainId::default(), + }; + + let aptos_rest_listen_url = std::env::var(Self::APTOS_REST_LISTEN_ADDR_ENV_VAR) + .unwrap_or("0.0.0.0:30731".to_string()); + + let aptos_faucet_listen_url = std::env::var(Self::APTOS_FAUCET_LISTEN_ADDR_ENV_VAR) + .unwrap_or("0.0.0.0:30732".to_string()); + + let aptos_private_key = match std::env::var(Self::APTOS_PRIVATE_KEY_ENV_VAR) { + Ok(private_key) => Ed25519PrivateKey::from_encoded_string(private_key.as_str()) + .context("Failed to parse private key")?, + Err(_) => Ed25519PrivateKey::generate(&mut rand::thread_rng()), + }; + + let aptos_public_key = aptos_private_key.public_key(); + + let aptos_db_path = match std::env::var(Self::APTOS_DB_PATH_ENV_VAR) { + Ok(db_path) => PathBuf::from(db_path), + Err(_) => { + // generate a tempdir + // this should work because the dir will be top level of /tmp + let tempdir = tempfile::tempdir()?; + tempdir.into_path() + } + }; + + Ok(Self { + chain_id, + aptos_rest_listen_url, + aptos_faucet_listen_url, + aptos_private_key, + aptos_public_key, + aptos_db_path, + }) + } + + pub fn write_to_env(&self) -> Result<(), anyhow::Error> { + std::env::set_var(Self::CHAIN_ID_ENV_VAR, self.chain_id.to_string()); + std::env::set_var( + Self::APTOS_REST_LISTEN_ADDR_ENV_VAR, + self.aptos_rest_listen_url.clone(), + ); + std::env::set_var( + Self::APTOS_FAUCET_LISTEN_ADDR_ENV_VAR, + self.aptos_faucet_listen_url.clone(), + ); + std::env::set_var( + Self::APTOS_PRIVATE_KEY_ENV_VAR, + self.aptos_private_key.to_encoded_string()?, + ); + std::env::set_var( + Self::APTOS_PUBLIC_KEY_ENV_VAR, + self.aptos_public_key.to_encoded_string()?, + ); + Ok(()) + } + + pub fn write_bash_export_string(&self) -> Result { + Ok(format!( + "export {}={}\nexport {}={}\nexport {}={}\nexport {}={}\nexport {}={}", + Self::CHAIN_ID_ENV_VAR, + serde_json::to_string(&self.chain_id)?, + Self::APTOS_REST_LISTEN_ADDR_ENV_VAR, + self.aptos_rest_listen_url, + Self::APTOS_FAUCET_LISTEN_ADDR_ENV_VAR, + self.aptos_faucet_listen_url, + Self::APTOS_PRIVATE_KEY_ENV_VAR, + self.aptos_private_key.to_encoded_string()?, + Self::APTOS_PUBLIC_KEY_ENV_VAR, + self.aptos_public_key.to_encoded_string()? + )) + } + } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Config { - pub aptos_config: just_aptos::Config, - pub light_node_config: m1_da_light_node_util::Config, + pub aptos_config: just_aptos::Config, + pub light_node_config: m1_da_light_node_util::Config, } impl Config { - pub fn new( - aptos_config: just_aptos::Config, - light_node_config: m1_da_light_node_util::Config, - ) -> Self { - Self { aptos_config, light_node_config } - } - - pub fn try_from_env() -> Result { - let aptos_config = just_aptos::Config::try_from_env()?; - let light_node_config = m1_da_light_node_util::Config::try_from_env()?; - - Ok(Self { aptos_config, light_node_config }) - } - - pub fn write_to_env(&self) -> Result<(), anyhow::Error> { - self.aptos_config.write_to_env()?; - self.light_node_config.write_to_env()?; - Ok(()) - } - - pub fn write_bash_export_string(&self) -> Result { - Ok(format!( - "{}\n{}", - self.aptos_config.write_bash_export_string()?, - self.light_node_config.write_bash_export_string()? - )) - } + pub fn new( + aptos_config: just_aptos::Config, + light_node_config: m1_da_light_node_util::Config, + ) -> Self { + Self { aptos_config, light_node_config } + } + + pub fn try_from_env() -> Result { + let aptos_config = just_aptos::Config::try_from_env()?; + let light_node_config = m1_da_light_node_util::Config::try_from_env()?; + + Ok(Self { aptos_config, light_node_config }) + } + + pub fn write_to_env(&self) -> Result<(), anyhow::Error> { + self.aptos_config.write_to_env()?; + self.light_node_config.write_to_env()?; + Ok(()) + } + + pub fn write_bash_export_string(&self) -> Result { + Ok(format!( + "{}\n{}", + self.aptos_config.write_bash_export_string()?, + self.light_node_config.write_bash_export_string()? + )) + } } diff --git a/protocol-units/execution/monza/executor/src/lib.rs b/protocol-units/execution/monza/executor/src/lib.rs index 579678dec..d14760de8 100644 --- a/protocol-units/execution/monza/executor/src/lib.rs +++ b/protocol-units/execution/monza/executor/src/lib.rs @@ -3,11 +3,11 @@ pub mod v1; pub use aptos_api::runtime::Apis; pub use aptos_crypto::hash::HashValue; pub use aptos_types::{ - block_executor::partitioner::ExecutableBlock, - block_executor::partitioner::ExecutableTransactions, - block_metadata::BlockMetadata, - transaction::signature_verified_transaction::SignatureVerifiedTransaction, - transaction::{SignedTransaction, Transaction}, + block_executor::partitioner::ExecutableBlock, + block_executor::partitioner::ExecutableTransactions, + block_metadata::BlockMetadata, + transaction::signature_verified_transaction::SignatureVerifiedTransaction, + transaction::{SignedTransaction, Transaction}, }; pub use movement_types::BlockCommitment; @@ -16,31 +16,31 @@ use async_channel::Sender; #[tonic::async_trait] pub trait MonzaExecutor { - /// Runs the service - async fn run_service(&self) -> Result<(), anyhow::Error>; + /// Runs the service + async fn run_service(&self) -> Result<(), anyhow::Error>; - /// Runs the necessary background tasks. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; + /// Runs the necessary background tasks. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; - /// Executes a block optimistically - async fn execute_block_opt( - &self, - block: ExecutableBlock, - ) -> Result; + /// Executes a block optimistically + async fn execute_block_opt( + &self, + block: ExecutableBlock, + ) -> Result; - /// Sets the transaction channel. - fn set_tx_channel(&mut self, tx_channel: Sender); + /// Sets the transaction channel. + fn set_tx_channel(&mut self, tx_channel: Sender); - /// Gets the dyn API. - fn get_apis(&self) -> Apis; + /// Gets the dyn API. + fn get_apis(&self) -> Apis; - /// Get block head height. - async fn get_block_head_height(&self) -> Result; + /// Get block head height. + async fn get_block_head_height(&self) -> Result; - /// Build block metadata for a timestamp - async fn build_block_metadata( - &self, - block_id: HashValue, - timestamp: u64, - ) -> Result; + /// Build block metadata for a timestamp + async fn build_block_metadata( + &self, + block_id: HashValue, + timestamp: u64, + ) -> Result; } diff --git a/protocol-units/execution/monza/executor/src/v1.rs b/protocol-units/execution/monza/executor/src/v1.rs index 9ce5838b6..9cbe1ce16 100644 --- a/protocol-units/execution/monza/executor/src/v1.rs +++ b/protocol-units/execution/monza/executor/src/v1.rs @@ -9,374 +9,374 @@ use tracing::debug; #[derive(Clone)] pub struct MonzaExecutorV1 { - // this rwlock may be somewhat redundant - pub executor: Executor, - pub transaction_channel: Sender, + // this rwlock may be somewhat redundant + pub executor: Executor, + pub transaction_channel: Sender, } impl MonzaExecutorV1 { - pub fn new(executor: Executor, transaction_channel: Sender) -> Self { - Self { executor, transaction_channel } - } - - pub async fn try_from_env( - transaction_channel: Sender, - ) -> Result { - let executor = Executor::try_from_env()?; - Ok(Self::new(executor, transaction_channel)) - } + pub fn new(executor: Executor, transaction_channel: Sender) -> Self { + Self { executor, transaction_channel } + } + + pub async fn try_from_env( + transaction_channel: Sender, + ) -> Result { + let executor = Executor::try_from_env()?; + Ok(Self::new(executor, transaction_channel)) + } } #[tonic::async_trait] impl MonzaExecutor for MonzaExecutorV1 { - /// Runs the service. - async fn run_service(&self) -> Result<(), anyhow::Error> { - self.executor.run_service().await - } - - /// Runs the necessary background tasks. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - loop { - // readers should be able to run concurrently - self.executor.tick_transaction_pipe(self.transaction_channel.clone()).await?; - } - - Ok(()) - } - - /// Executes a block optimistically - async fn execute_block_opt( - &self, - block: ExecutableBlock, - ) -> Result { - debug!("Executing opt block: {:?}", block.block_id); - self.executor.execute_block(block).await - } - - /// Sets the transaction channel. - fn set_tx_channel(&mut self, tx_channel: Sender) { - self.transaction_channel = tx_channel; - } - - /// Gets the API. - fn get_apis(&self) -> Apis { - self.executor.get_apis() - } - - /// Get block head height. - async fn get_block_head_height(&self) -> Result { - self.executor.get_block_head_height() - } - - /// Build block metadata for a timestamp - async fn build_block_metadata( - &self, - block_id: HashValue, - timestamp: u64, - ) -> Result { - let (epoch, round) = self.executor.get_next_epoch_and_round().await?; - // Clone the signer from the executor for signing the metadata. - let signer = self.executor.signer.clone(); - - // Create a block metadata transaction. - Ok(BlockMetadata::new(block_id, epoch, round, signer.author(), vec![], vec![], timestamp)) - } + /// Runs the service. + async fn run_service(&self) -> Result<(), anyhow::Error> { + self.executor.run_service().await + } + + /// Runs the necessary background tasks. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + loop { + // readers should be able to run concurrently + self.executor.tick_transaction_pipe(self.transaction_channel.clone()).await?; + } + + Ok(()) + } + + /// Executes a block optimistically + async fn execute_block_opt( + &self, + block: ExecutableBlock, + ) -> Result { + debug!("Executing opt block: {:?}", block.block_id); + self.executor.execute_block(block).await + } + + /// Sets the transaction channel. + fn set_tx_channel(&mut self, tx_channel: Sender) { + self.transaction_channel = tx_channel; + } + + /// Gets the API. + fn get_apis(&self) -> Apis { + self.executor.get_apis() + } + + /// Get block head height. + async fn get_block_head_height(&self) -> Result { + self.executor.get_block_head_height() + } + + /// Build block metadata for a timestamp + async fn build_block_metadata( + &self, + block_id: HashValue, + timestamp: u64, + ) -> Result { + let (epoch, round) = self.executor.get_next_epoch_and_round().await?; + // Clone the signer from the executor for signing the metadata. + let signer = self.executor.signer.clone(); + + // Create a block metadata transaction. + Ok(BlockMetadata::new(block_id, epoch, round, signer.author(), vec![], vec![], timestamp)) + } } #[cfg(test)] mod tests { - use std::collections::HashMap; - - use super::*; - use aptos_api::{accept_type::AcceptType, transactions::SubmitTransactionPost}; - use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519Signature}, - HashValue, PrivateKey, Uniform, - }; - use aptos_mempool::{MempoolClientRequest, MempoolClientSender}; - use aptos_sdk::{ - transaction_builder::TransactionFactory, - types::{AccountKey, LocalAccount}, - }; - use aptos_storage_interface::state_view::DbStateViewAtVersion; - use aptos_types::{ - account_address::AccountAddress, - account_config::aptos_test_root_address, - block_executor::partitioner::ExecutableTransactions, - chain_id::ChainId, - ledger_info::LedgerInfoWithSignatures, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, - SignedTransaction, Transaction, TransactionPayload, Version, - }, - }; - use futures::channel::oneshot; - use futures::SinkExt; - use rand::SeedableRng; - - fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { - let private_key = Ed25519PrivateKey::generate_for_testing(); - let public_key = private_key.public_key(); - let transaction_payload = TransactionPayload::Script(Script::new(vec![0], vec![], vec![])); - let raw_transaction = RawTransaction::new( - AccountAddress::random(), - 0, - transaction_payload, - 0, - gas_unit_price, - 0, - ChainId::test(), // This is the value used in aptos testing code. - ); - SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) - } - - #[tokio::test] - async fn test_execute_opt_block() -> Result<(), anyhow::Error> { - let (tx, _rx) = async_channel::unbounded(); - let executor = MonzaExecutorV1::try_from_env(tx).await?; - let block_id = HashValue::random(); - let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( - create_signed_transaction(0), - )); - let txs = ExecutableTransactions::Unsharded(vec![tx]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block_opt(block).await?; - Ok(()) - } - - #[tokio::test] - async fn test_pipe_transactions_from_api() -> Result<(), anyhow::Error> { - let (tx, rx) = async_channel::unbounded(); - let executor = MonzaExecutorV1::try_from_env(tx).await?; - let services_executor = executor.clone(); - let background_executor = executor.clone(); - - let services_handle = tokio::spawn(async move { - services_executor.run_service().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - let background_handle = tokio::spawn(async move { - background_executor.run_background_tasks().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - // Start the background tasks - let user_transaction = create_signed_transaction(0); - let comparison_user_transaction = user_transaction.clone(); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - - let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - let api = executor.get_apis(); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - - services_handle.abort(); - background_handle.abort(); - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, comparison_user_transaction); - - Ok(()) - } - - #[tokio::test] - async fn test_pipe_transactions_from_api_and_execute() -> Result<(), anyhow::Error> { - let (tx, rx) = async_channel::unbounded(); - let executor = MonzaExecutorV1::try_from_env(tx).await?; - let services_executor = executor.clone(); - let background_executor = executor.clone(); - - let services_handle = tokio::spawn(async move { - services_executor.run_service().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - let background_handle = tokio::spawn(async move { - background_executor.run_background_tasks().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - // Start the background tasks - let user_transaction = create_signed_transaction(0); - let comparison_user_transaction = user_transaction.clone(); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - - let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - let api = executor.get_apis(); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, comparison_user_transaction); - - // Now execute the block - let block_id = HashValue::random(); - let tx = - SignatureVerifiedTransaction::Valid(Transaction::UserTransaction(received_transaction)); - let txs = ExecutableTransactions::Unsharded(vec![tx]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block_opt(block).await?; - - services_handle.abort(); - background_handle.abort(); - - Ok(()) - } - - #[tokio::test] - async fn test_revert_chain_state_at_nth_commit() -> Result<(), anyhow::Error> { - use aptos_db::db::test_helper::arb_blocks_to_commit_with_block_nums; - use aptos_proptest_helpers::ValueGenerator; - - #[derive(Debug)] - struct Commit { - hash: HashValue, - info: LedgerInfoWithSignatures, - cur_ver: Version, - } - - let (tx, rx) = async_channel::unbounded::(); - let executor = MonzaExecutorV1::try_from_env(tx).await?; - let services_executor = executor.clone(); - let background_executor = executor.clone(); - let services_handle = tokio::spawn(async move { - services_executor.run_service().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - let background_handle = tokio::spawn(async move { - background_executor.run_background_tasks().await?; - Ok(()) as Result<(), anyhow::Error> - }); - let mut committed_blocks = HashMap::new(); - - let mut val_generator = ValueGenerator::new(); - // set range of min and max blocks to 5 to always gen 5 blocks - let (blocks, _) = val_generator.generate(arb_blocks_to_commit_with_block_nums(5, 5)); - let mut blockheight = 0; - let mut cur_ver: Version = 0; - let mut commit_versions = vec![]; - - for (txns_to_commit, ledger_info_with_sigs) in &blocks { - let user_transaction = create_signed_transaction(0); - let comparison_user_transaction = user_transaction.clone(); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - - let request = - SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - let api = executor.get_apis(); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, comparison_user_transaction); - - // Now execute the block - let block_id = HashValue::random(); - let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( - received_transaction, - )); - let txs = ExecutableTransactions::Unsharded(vec![tx]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block_opt(block).await?; - - blockheight += 1; - committed_blocks.insert( - blockheight, - Commit { - hash: ledger_info_with_sigs.commit_info().executed_state_id(), - info: ledger_info_with_sigs.clone(), - cur_ver, - }, - ); - commit_versions.push(cur_ver); - cur_ver += txns_to_commit.len() as u64; - blockheight += 1; - } - - // Get the 3rd block back from the latest block - let revert_block_num = blockheight - 3; - let revert = committed_blocks.get(&revert_block_num).unwrap(); - - // Get the version to revert to - let version_to_revert = revert.cur_ver - 1; - - if let Some((_max_blockheight, last_commit)) = - committed_blocks.iter().max_by_key(|(&k, _)| k) - { - let db_writer = executor.executor.db.writer.clone(); - db_writer.revert_commit( - version_to_revert, - last_commit.cur_ver, - revert.hash, - revert.info.clone(), - )?; - } else { - panic!("No blocks to revert"); - } - - let db_reader = executor.executor.db.reader.clone(); - let latest_version = db_reader.get_latest_version()?; - assert_eq!(latest_version, version_to_revert - 1); - - services_handle.abort(); - background_handle.abort(); - Ok(()) - } - - #[tokio::test] - async fn test_execute_block_state_get_api() -> Result<(), anyhow::Error> { - // Create an executor instance from the environment configuration. - let executor = Executor::try_from_env()?; - - // Initialize a root account using a predefined keypair and the test root address. - let root_account = LocalAccount::new( - aptos_test_root_address(), - AccountKey::from_private_key(executor.aptos_config.aptos_private_key.clone()), - 0, - ); - - // Seed for random number generator, used here to generate predictable results in a test environment. - let seed = [3u8; 32]; - let mut rng = ::rand::rngs::StdRng::from_seed(seed); - - // Create a transaction factory with the chain ID of the executor. - let tx_factory = TransactionFactory::new(executor.aptos_config.chain_id.clone()); - - // Simulate the execution of multiple blocks. - for _ in 0..10 { - // For example, create and execute 3 blocks. - let block_id = HashValue::random(); // Generate a random block ID for each block. - - // Generate new accounts and create transactions for each block. - let mut transactions = Vec::new(); - let mut transaction_hashes = Vec::new(); - for _ in 0..2 { - // Each block will contain 2 transactions. - let new_account = LocalAccount::generate(&mut rng); - let user_account_creation_tx = root_account.sign_with_transaction_builder( - tx_factory.create_user_account(new_account.public_key()), - ); - let tx_hash = user_account_creation_tx.clone().committed_hash(); - transaction_hashes.push(tx_hash); - transactions.push(Transaction::UserTransaction(user_account_creation_tx)); - } - - // Group all transactions into an unsharded block for execution. - let executable_transactions = ExecutableTransactions::Unsharded( - transactions.into_iter().map(SignatureVerifiedTransaction::Valid).collect(), - ); - let block = ExecutableBlock::new(block_id.clone(), executable_transactions); - executor.execute_block(block).await?; - - // Retrieve the executor's API interface and fetch the transaction by each hash. - let apis = executor.get_apis(); - for hash in transaction_hashes { - let _ = apis - .transactions - .get_transaction_by_hash_inner(&AcceptType::Bcs, hash.into()) - .await?; - } - } - - Ok(()) - } + use std::collections::HashMap; + + use super::*; + use aptos_api::{accept_type::AcceptType, transactions::SubmitTransactionPost}; + use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519Signature}, + HashValue, PrivateKey, Uniform, + }; + use aptos_mempool::{MempoolClientRequest, MempoolClientSender}; + use aptos_sdk::{ + transaction_builder::TransactionFactory, + types::{AccountKey, LocalAccount}, + }; + use aptos_storage_interface::state_view::DbStateViewAtVersion; + use aptos_types::{ + account_address::AccountAddress, + account_config::aptos_test_root_address, + block_executor::partitioner::ExecutableTransactions, + chain_id::ChainId, + ledger_info::LedgerInfoWithSignatures, + transaction::{ + signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, + SignedTransaction, Transaction, TransactionPayload, Version, + }, + }; + use futures::channel::oneshot; + use futures::SinkExt; + use rand::SeedableRng; + + fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + let transaction_payload = TransactionPayload::Script(Script::new(vec![0], vec![], vec![])); + let raw_transaction = RawTransaction::new( + AccountAddress::random(), + 0, + transaction_payload, + 0, + gas_unit_price, + 0, + ChainId::test(), // This is the value used in aptos testing code. + ); + SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) + } + + #[tokio::test] + async fn test_execute_opt_block() -> Result<(), anyhow::Error> { + let (tx, _rx) = async_channel::unbounded(); + let executor = MonzaExecutorV1::try_from_env(tx).await?; + let block_id = HashValue::random(); + let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( + create_signed_transaction(0), + )); + let txs = ExecutableTransactions::Unsharded(vec![tx]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block_opt(block).await?; + Ok(()) + } + + #[tokio::test] + async fn test_pipe_transactions_from_api() -> Result<(), anyhow::Error> { + let (tx, rx) = async_channel::unbounded(); + let executor = MonzaExecutorV1::try_from_env(tx).await?; + let services_executor = executor.clone(); + let background_executor = executor.clone(); + + let services_handle = tokio::spawn(async move { + services_executor.run_service().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + let background_handle = tokio::spawn(async move { + background_executor.run_background_tasks().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + // Start the background tasks + let user_transaction = create_signed_transaction(0); + let comparison_user_transaction = user_transaction.clone(); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + + let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + let api = executor.get_apis(); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + + services_handle.abort(); + background_handle.abort(); + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, comparison_user_transaction); + + Ok(()) + } + + #[tokio::test] + async fn test_pipe_transactions_from_api_and_execute() -> Result<(), anyhow::Error> { + let (tx, rx) = async_channel::unbounded(); + let executor = MonzaExecutorV1::try_from_env(tx).await?; + let services_executor = executor.clone(); + let background_executor = executor.clone(); + + let services_handle = tokio::spawn(async move { + services_executor.run_service().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + let background_handle = tokio::spawn(async move { + background_executor.run_background_tasks().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + // Start the background tasks + let user_transaction = create_signed_transaction(0); + let comparison_user_transaction = user_transaction.clone(); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + + let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + let api = executor.get_apis(); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, comparison_user_transaction); + + // Now execute the block + let block_id = HashValue::random(); + let tx = + SignatureVerifiedTransaction::Valid(Transaction::UserTransaction(received_transaction)); + let txs = ExecutableTransactions::Unsharded(vec![tx]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block_opt(block).await?; + + services_handle.abort(); + background_handle.abort(); + + Ok(()) + } + + #[tokio::test] + async fn test_revert_chain_state_at_nth_commit() -> Result<(), anyhow::Error> { + use aptos_db::db::test_helper::arb_blocks_to_commit_with_block_nums; + use aptos_proptest_helpers::ValueGenerator; + + #[derive(Debug)] + struct Commit { + hash: HashValue, + info: LedgerInfoWithSignatures, + cur_ver: Version, + } + + let (tx, rx) = async_channel::unbounded::(); + let executor = MonzaExecutorV1::try_from_env(tx).await?; + let services_executor = executor.clone(); + let background_executor = executor.clone(); + let services_handle = tokio::spawn(async move { + services_executor.run_service().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + let background_handle = tokio::spawn(async move { + background_executor.run_background_tasks().await?; + Ok(()) as Result<(), anyhow::Error> + }); + let mut committed_blocks = HashMap::new(); + + let mut val_generator = ValueGenerator::new(); + // set range of min and max blocks to 5 to always gen 5 blocks + let (blocks, _) = val_generator.generate(arb_blocks_to_commit_with_block_nums(5, 5)); + let mut blockheight = 0; + let mut cur_ver: Version = 0; + let mut commit_versions = vec![]; + + for (txns_to_commit, ledger_info_with_sigs) in &blocks { + let user_transaction = create_signed_transaction(0); + let comparison_user_transaction = user_transaction.clone(); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + + let request = + SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + let api = executor.get_apis(); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, comparison_user_transaction); + + // Now execute the block + let block_id = HashValue::random(); + let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( + received_transaction, + )); + let txs = ExecutableTransactions::Unsharded(vec![tx]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block_opt(block).await?; + + blockheight += 1; + committed_blocks.insert( + blockheight, + Commit { + hash: ledger_info_with_sigs.commit_info().executed_state_id(), + info: ledger_info_with_sigs.clone(), + cur_ver, + }, + ); + commit_versions.push(cur_ver); + cur_ver += txns_to_commit.len() as u64; + blockheight += 1; + } + + // Get the 3rd block back from the latest block + let revert_block_num = blockheight - 3; + let revert = committed_blocks.get(&revert_block_num).unwrap(); + + // Get the version to revert to + let version_to_revert = revert.cur_ver - 1; + + if let Some((_max_blockheight, last_commit)) = + committed_blocks.iter().max_by_key(|(&k, _)| k) + { + let db_writer = executor.executor.db.writer.clone(); + db_writer.revert_commit( + version_to_revert, + last_commit.cur_ver, + revert.hash, + revert.info.clone(), + )?; + } else { + panic!("No blocks to revert"); + } + + let db_reader = executor.executor.db.reader.clone(); + let latest_version = db_reader.get_latest_version()?; + assert_eq!(latest_version, version_to_revert - 1); + + services_handle.abort(); + background_handle.abort(); + Ok(()) + } + + #[tokio::test] + async fn test_execute_block_state_get_api() -> Result<(), anyhow::Error> { + // Create an executor instance from the environment configuration. + let executor = Executor::try_from_env()?; + + // Initialize a root account using a predefined keypair and the test root address. + let root_account = LocalAccount::new( + aptos_test_root_address(), + AccountKey::from_private_key(executor.aptos_config.aptos_private_key.clone()), + 0, + ); + + // Seed for random number generator, used here to generate predictable results in a test environment. + let seed = [3u8; 32]; + let mut rng = ::rand::rngs::StdRng::from_seed(seed); + + // Create a transaction factory with the chain ID of the executor. + let tx_factory = TransactionFactory::new(executor.aptos_config.chain_id.clone()); + + // Simulate the execution of multiple blocks. + for _ in 0..10 { + // For example, create and execute 3 blocks. + let block_id = HashValue::random(); // Generate a random block ID for each block. + + // Generate new accounts and create transactions for each block. + let mut transactions = Vec::new(); + let mut transaction_hashes = Vec::new(); + for _ in 0..2 { + // Each block will contain 2 transactions. + let new_account = LocalAccount::generate(&mut rng); + let user_account_creation_tx = root_account.sign_with_transaction_builder( + tx_factory.create_user_account(new_account.public_key()), + ); + let tx_hash = user_account_creation_tx.clone().committed_hash(); + transaction_hashes.push(tx_hash); + transactions.push(Transaction::UserTransaction(user_account_creation_tx)); + } + + // Group all transactions into an unsharded block for execution. + let executable_transactions = ExecutableTransactions::Unsharded( + transactions.into_iter().map(SignatureVerifiedTransaction::Valid).collect(), + ); + let block = ExecutableBlock::new(block_id.clone(), executable_transactions); + executor.execute_block(block).await?; + + // Retrieve the executor's API interface and fetch the transaction by each hash. + let apis = executor.get_apis(); + for hash in transaction_hashes { + let _ = apis + .transactions + .get_transaction_by_hash_inner(&AcceptType::Bcs, hash.into()) + .await?; + } + } + + Ok(()) + } } diff --git a/protocol-units/execution/monza/fin-executor/src/executor.rs b/protocol-units/execution/monza/fin-executor/src/executor.rs index ebd0b5c1c..a9dd050ef 100644 --- a/protocol-units/execution/monza/fin-executor/src/executor.rs +++ b/protocol-units/execution/monza/fin-executor/src/executor.rs @@ -1,221 +1,221 @@ use aptos_config::config::NodeConfig; use aptos_db::AptosDB; use aptos_executor::{ - block_executor::BlockExecutor, - db_bootstrapper::{generate_waypoint, maybe_bootstrap}, + block_executor::BlockExecutor, + db_bootstrapper::{generate_waypoint, maybe_bootstrap}, }; use aptos_executor_types::{state_checkpoint_output::StateCheckpointOutput, BlockExecutorTrait}; use aptos_mempool::core_mempool::CoreMempool; use aptos_storage_interface::DbReaderWriter; use aptos_types::{ - block_executor::config::BlockExecutorConfigFromOnchain, - block_executor::partitioner::ExecutableBlock, - transaction::{Transaction, WriteSetPayload}, - validator_signer::ValidatorSigner, + block_executor::config::BlockExecutorConfigFromOnchain, + block_executor::partitioner::ExecutableBlock, + transaction::{Transaction, WriteSetPayload}, + validator_signer::ValidatorSigner, }; use aptos_vm::AptosVM; use std::{ - path::PathBuf, - sync::{Arc, RwLock}, + path::PathBuf, + sync::{Arc, RwLock}, }; /// The state of `movement-network` execution can exist in three states, /// `Dynamic`, `Optimistic`, and `Final`. The `Dynamic` state is the state. pub enum FinalityState { - /// The dynamic state that is subject to change and is not - /// yet finalized. It is the state that is derived from the blocks - /// received before any finality is reached and simply represents a - /// local application of the fork-choice rule (longest chain) - /// of the gossipped blocks. - Dynamic, - /// The optimistic state that is derived from the blocks received after DA finality. - /// It is the state that is derived from the blocks that have been finalized by the DA. - Optimistic, - /// The final state that is derived from the blocks received after the finality is reached. - Final, + /// The dynamic state that is subject to change and is not + /// yet finalized. It is the state that is derived from the blocks + /// received before any finality is reached and simply represents a + /// local application of the fork-choice rule (longest chain) + /// of the gossipped blocks. + Dynamic, + /// The optimistic state that is derived from the blocks received after DA finality. + /// It is the state that is derived from the blocks that have been finalized by the DA. + Optimistic, + /// The final state that is derived from the blocks received after the finality is reached. + Final, } /// The current state of the executor and its execution of blocks. #[derive(PartialEq, Debug, Clone, Copy)] pub enum ExecutorState { - /// The executor is idle and waiting for a block to be executed. - Idle, - /// The block is executed in a speculative manner and its effects held in memory. - Speculate, - /// The network agrees on the block. - Consensus, - /// The block is committed to the state, at this point - /// fork choices must be resolved otherwise the commitment and subsequent execution will fail. - Commit, + /// The executor is idle and waiting for a block to be executed. + Idle, + /// The block is executed in a speculative manner and its effects held in memory. + Speculate, + /// The network agrees on the block. + Consensus, + /// The block is committed to the state, at this point + /// fork choices must be resolved otherwise the commitment and subsequent execution will fail. + Commit, } /// The `Executor` is responsible for executing blocks and managing the state of the execution /// against the `AptosVM`. pub struct Executor { - /// The executing type. - pub block_executor: Arc>>, - /// The current state of the executor. - pub status: ExecutorState, - /// The access to db. - pub db: DbReaderWriter, - /// The signer of the executor's transactions. - pub signer: ValidatorSigner, - /// The access to the core mempool. - pub mempool: CoreMempool, + /// The executing type. + pub block_executor: Arc>>, + /// The current state of the executor. + pub status: ExecutorState, + /// The access to db. + pub db: DbReaderWriter, + /// The signer of the executor's transactions. + pub signer: ValidatorSigner, + /// The access to the core mempool. + pub mempool: CoreMempool, } impl Executor { - const DB_PATH_ENV_VAR: &'static str = "DB_DIR"; - - /// Create a new `Executor` instance. - pub fn new( - db_dir: PathBuf, - block_executor: BlockExecutor, - signer: ValidatorSigner, - mempool: CoreMempool, - ) -> Self { - let (_aptos_db, reader_writer) = DbReaderWriter::wrap(AptosDB::new_for_test(&db_dir)); - Self { - block_executor: Arc::new(RwLock::new(block_executor)), - status: ExecutorState::Idle, - db: reader_writer, - signer, - mempool, - } - } - - pub fn bootstrap_empty_db(db_dir: PathBuf) -> Result { - let genesis = aptos_vm_genesis::test_genesis_change_set_and_validators(Some(1)); - let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0)); - let db_rw = DbReaderWriter::new(AptosDB::new_for_test(&db_dir)); - assert!(db_rw.reader.get_latest_ledger_info_option()?.is_none()); - - // Bootstrap empty DB. - let waypoint = - generate_waypoint::(&db_rw, &genesis_txn).expect("Should not fail."); - maybe_bootstrap::(&db_rw, &genesis_txn, waypoint)?; - assert!(db_rw.reader.get_latest_ledger_info_option()?.is_some()); - - Ok(db_rw) - } - - pub fn bootstrap( - db_dir: PathBuf, - signer: ValidatorSigner, - mempool: CoreMempool, - ) -> Result { - let db = Self::bootstrap_empty_db(db_dir)?; - - Ok(Self { - block_executor: Arc::new(RwLock::new(BlockExecutor::new(db.clone()))), - status: ExecutorState::Idle, - db, - signer, - mempool, - }) - } - - pub fn try_from_env() -> Result { - // read the db dir from env or use a tempfile - let db_dir = match std::env::var(Self::DB_PATH_ENV_VAR) { - Ok(dir) => PathBuf::from(dir), - Err(_) => { - let temp_dir = tempfile::tempdir()?; - temp_dir.path().to_path_buf() - } - }; - - // use the default signer, block executor, and mempool - let signer = ValidatorSigner::random(None); - let mempool = CoreMempool::new(&NodeConfig::default()); - - Self::bootstrap(db_dir, signer, mempool) - } - - pub fn set_commit_state(&mut self) { - self.status = ExecutorState::Commit; - } - - /// Execute a block which gets committed to the state. - /// `ExecutorState` must be set to `Commit` before calling this method. - pub async fn execute_block( - &mut self, - block: ExecutableBlock, - ) -> Result { - if self.status != ExecutorState::Commit { - return Err(anyhow::anyhow!("Executor is not in the Commit state")); - } - - let parent_block_id = { - let block_executor = self.block_executor.read().map_err(|e| { - anyhow::anyhow!("Failed to acquire block executor read lock: {:?}", e) - })?; // acquire read lock - block_executor.committed_block_id() - }; - - let state_checkpoint = { - let block_executor = self.block_executor.write().map_err(|e| { - anyhow::anyhow!("Failed to acquire block executor write lock: {:?}", e) - })?; // acquire write lock - block_executor.execute_and_state_checkpoint( - block, - parent_block_id, - BlockExecutorConfigFromOnchain::new_no_block_limit(), - )? - }; - - // Update the executor state - self.status = ExecutorState::Idle; - - Ok(state_checkpoint) - } + const DB_PATH_ENV_VAR: &'static str = "DB_DIR"; + + /// Create a new `Executor` instance. + pub fn new( + db_dir: PathBuf, + block_executor: BlockExecutor, + signer: ValidatorSigner, + mempool: CoreMempool, + ) -> Self { + let (_aptos_db, reader_writer) = DbReaderWriter::wrap(AptosDB::new_for_test(&db_dir)); + Self { + block_executor: Arc::new(RwLock::new(block_executor)), + status: ExecutorState::Idle, + db: reader_writer, + signer, + mempool, + } + } + + pub fn bootstrap_empty_db(db_dir: PathBuf) -> Result { + let genesis = aptos_vm_genesis::test_genesis_change_set_and_validators(Some(1)); + let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0)); + let db_rw = DbReaderWriter::new(AptosDB::new_for_test(&db_dir)); + assert!(db_rw.reader.get_latest_ledger_info_option()?.is_none()); + + // Bootstrap empty DB. + let waypoint = + generate_waypoint::(&db_rw, &genesis_txn).expect("Should not fail."); + maybe_bootstrap::(&db_rw, &genesis_txn, waypoint)?; + assert!(db_rw.reader.get_latest_ledger_info_option()?.is_some()); + + Ok(db_rw) + } + + pub fn bootstrap( + db_dir: PathBuf, + signer: ValidatorSigner, + mempool: CoreMempool, + ) -> Result { + let db = Self::bootstrap_empty_db(db_dir)?; + + Ok(Self { + block_executor: Arc::new(RwLock::new(BlockExecutor::new(db.clone()))), + status: ExecutorState::Idle, + db, + signer, + mempool, + }) + } + + pub fn try_from_env() -> Result { + // read the db dir from env or use a tempfile + let db_dir = match std::env::var(Self::DB_PATH_ENV_VAR) { + Ok(dir) => PathBuf::from(dir), + Err(_) => { + let temp_dir = tempfile::tempdir()?; + temp_dir.path().to_path_buf() + } + }; + + // use the default signer, block executor, and mempool + let signer = ValidatorSigner::random(None); + let mempool = CoreMempool::new(&NodeConfig::default()); + + Self::bootstrap(db_dir, signer, mempool) + } + + pub fn set_commit_state(&mut self) { + self.status = ExecutorState::Commit; + } + + /// Execute a block which gets committed to the state. + /// `ExecutorState` must be set to `Commit` before calling this method. + pub async fn execute_block( + &mut self, + block: ExecutableBlock, + ) -> Result { + if self.status != ExecutorState::Commit { + return Err(anyhow::anyhow!("Executor is not in the Commit state")); + } + + let parent_block_id = { + let block_executor = self.block_executor.read().map_err(|e| { + anyhow::anyhow!("Failed to acquire block executor read lock: {:?}", e) + })?; // acquire read lock + block_executor.committed_block_id() + }; + + let state_checkpoint = { + let block_executor = self.block_executor.write().map_err(|e| { + anyhow::anyhow!("Failed to acquire block executor write lock: {:?}", e) + })?; // acquire write lock + block_executor.execute_and_state_checkpoint( + block, + parent_block_id, + BlockExecutorConfigFromOnchain::new_no_block_limit(), + )? + }; + + // Update the executor state + self.status = ExecutorState::Idle; + + Ok(state_checkpoint) + } } #[cfg(test)] mod tests { - use super::*; - use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519Signature}, - HashValue, PrivateKey, Uniform, - }; - use aptos_types::{ - account_address::AccountAddress, - block_executor::partitioner::ExecutableTransactions, - chain_id::ChainId, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, - SignedTransaction, Transaction, TransactionPayload, - }, - }; - - fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { - let private_key = Ed25519PrivateKey::generate_for_testing(); - let public_key = private_key.public_key(); - - let transaction_payload = TransactionPayload::Script(Script::new(vec![], vec![], vec![])); - let raw_transaction = RawTransaction::new( - AccountAddress::random(), - 0, - transaction_payload, - 0, - gas_unit_price, - 0, - ChainId::new(10), // This is the value used in aptos testing code. - ); - SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) - } - - #[tokio::test] - async fn test_execute_block() -> Result<(), anyhow::Error> { - let mut executor = Executor::try_from_env()?; - executor.set_commit_state(); - let block_id = HashValue::random(); - let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( - create_signed_transaction(0), - )); - let txs = ExecutableTransactions::Unsharded(vec![tx]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block(block).await?; - Ok(()) - } + use super::*; + use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519Signature}, + HashValue, PrivateKey, Uniform, + }; + use aptos_types::{ + account_address::AccountAddress, + block_executor::partitioner::ExecutableTransactions, + chain_id::ChainId, + transaction::{ + signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, + SignedTransaction, Transaction, TransactionPayload, + }, + }; + + fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + + let transaction_payload = TransactionPayload::Script(Script::new(vec![], vec![], vec![])); + let raw_transaction = RawTransaction::new( + AccountAddress::random(), + 0, + transaction_payload, + 0, + gas_unit_price, + 0, + ChainId::new(10), // This is the value used in aptos testing code. + ); + SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) + } + + #[tokio::test] + async fn test_execute_block() -> Result<(), anyhow::Error> { + let mut executor = Executor::try_from_env()?; + executor.set_commit_state(); + let block_id = HashValue::random(); + let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( + create_signed_transaction(0), + )); + let txs = ExecutableTransactions::Unsharded(vec![tx]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block(block).await?; + Ok(()) + } } diff --git a/protocol-units/execution/suzuka/executor/src/lib.rs b/protocol-units/execution/suzuka/executor/src/lib.rs index 95922f9d7..e3be33043 100644 --- a/protocol-units/execution/suzuka/executor/src/lib.rs +++ b/protocol-units/execution/suzuka/executor/src/lib.rs @@ -3,11 +3,11 @@ pub mod v1; use aptos_api::runtime::Apis; pub use aptos_crypto::hash::HashValue; pub use aptos_types::{ - block_executor::partitioner::ExecutableBlock, - block_executor::partitioner::ExecutableTransactions, - block_metadata::BlockMetadata, - transaction::signature_verified_transaction::SignatureVerifiedTransaction, - transaction::{SignedTransaction, Transaction}, + block_executor::partitioner::ExecutableBlock, + block_executor::partitioner::ExecutableTransactions, + block_metadata::BlockMetadata, + transaction::signature_verified_transaction::SignatureVerifiedTransaction, + transaction::{SignedTransaction, Transaction}, }; use movement_types::BlockCommitment; @@ -16,31 +16,31 @@ use async_channel::Sender; #[tonic::async_trait] pub trait SuzukaExecutor { - /// Runs the service - async fn run_service(&self) -> Result<(), anyhow::Error>; + /// Runs the service + async fn run_service(&self) -> Result<(), anyhow::Error>; - /// Runs the necessary background tasks. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; + /// Runs the necessary background tasks. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error>; - /// Executes a block optimistically - async fn execute_block_opt( - &self, - block: ExecutableBlock, - ) -> Result; + /// Executes a block optimistically + async fn execute_block_opt( + &self, + block: ExecutableBlock, + ) -> Result; - /// Sets the transaction channel. - fn set_tx_channel(&mut self, tx_channel: Sender); + /// Sets the transaction channel. + fn set_tx_channel(&mut self, tx_channel: Sender); - /// Gets the dyn API. - fn get_apis(&self) -> Apis; + /// Gets the dyn API. + fn get_apis(&self) -> Apis; - /// Get block head height. - async fn get_block_head_height(&self) -> Result; + /// Get block head height. + async fn get_block_head_height(&self) -> Result; - /// Build block metadata for a timestamp - async fn build_block_metadata( - &self, - block_id: HashValue, - timestamp: u64, - ) -> Result; + /// Build block metadata for a timestamp + async fn build_block_metadata( + &self, + block_id: HashValue, + timestamp: u64, + ) -> Result; } diff --git a/protocol-units/execution/suzuka/executor/src/v1.rs b/protocol-units/execution/suzuka/executor/src/v1.rs index 56b9329d0..6332533c9 100644 --- a/protocol-units/execution/suzuka/executor/src/v1.rs +++ b/protocol-units/execution/suzuka/executor/src/v1.rs @@ -9,215 +9,215 @@ use tracing::debug; #[derive(Clone)] pub struct SuzukaExecutorV1 { - // this rwlock may be somewhat redundant - pub executor: Executor, - pub transaction_channel: Sender, + // this rwlock may be somewhat redundant + pub executor: Executor, + pub transaction_channel: Sender, } impl SuzukaExecutorV1 { - pub fn new(executor: Executor, transaction_channel: Sender) -> Self { - Self { executor, transaction_channel } - } - - pub async fn try_from_env( - transaction_channel: Sender, - ) -> Result { - let executor = Executor::try_from_env()?; - Ok(Self::new(executor, transaction_channel)) - } + pub fn new(executor: Executor, transaction_channel: Sender) -> Self { + Self { executor, transaction_channel } + } + + pub async fn try_from_env( + transaction_channel: Sender, + ) -> Result { + let executor = Executor::try_from_env()?; + Ok(Self::new(executor, transaction_channel)) + } } #[tonic::async_trait] impl SuzukaExecutor for SuzukaExecutorV1 { - /// Runs the service. - async fn run_service(&self) -> Result<(), anyhow::Error> { - self.executor.run_service().await - } - - /// Runs the necessary background tasks. - async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { - loop { - // readers should be able to run concurrently - self.executor.tick_transaction_pipe(self.transaction_channel.clone()).await?; - } - - Ok(()) - } - - async fn execute_block_opt( - &self, - block: ExecutableBlock, - ) -> Result { - debug!("Executing block: {:?}", block.block_id); - self.executor.execute_block(block).await - } - - /// Sets the transaction channel. - fn set_tx_channel(&mut self, tx_channel: Sender) { - self.transaction_channel = tx_channel; - } - - /// Gets the API. - fn get_apis(&self) -> Apis { - self.executor.get_apis() - } - - /// Get block head height. - async fn get_block_head_height(&self) -> Result { - self.executor.get_block_head_height() - } - - /// Build block metadata for a timestamp - async fn build_block_metadata( - &self, - block_id: HashValue, - timestamp: u64, - ) -> Result { - let (epoch, round) = self.executor.get_next_epoch_and_round().await?; - // Clone the signer from the executor for signing the metadata. - let signer = self.executor.signer.clone(); - - // Create a block metadata transaction. - Ok(BlockMetadata::new(block_id, epoch, round, signer.author(), vec![], vec![], timestamp)) - } + /// Runs the service. + async fn run_service(&self) -> Result<(), anyhow::Error> { + self.executor.run_service().await + } + + /// Runs the necessary background tasks. + async fn run_background_tasks(&self) -> Result<(), anyhow::Error> { + loop { + // readers should be able to run concurrently + self.executor.tick_transaction_pipe(self.transaction_channel.clone()).await?; + } + + Ok(()) + } + + async fn execute_block_opt( + &self, + block: ExecutableBlock, + ) -> Result { + debug!("Executing block: {:?}", block.block_id); + self.executor.execute_block(block).await + } + + /// Sets the transaction channel. + fn set_tx_channel(&mut self, tx_channel: Sender) { + self.transaction_channel = tx_channel; + } + + /// Gets the API. + fn get_apis(&self) -> Apis { + self.executor.get_apis() + } + + /// Get block head height. + async fn get_block_head_height(&self) -> Result { + self.executor.get_block_head_height() + } + + /// Build block metadata for a timestamp + async fn build_block_metadata( + &self, + block_id: HashValue, + timestamp: u64, + ) -> Result { + let (epoch, round) = self.executor.get_next_epoch_and_round().await?; + // Clone the signer from the executor for signing the metadata. + let signer = self.executor.signer.clone(); + + // Create a block metadata transaction. + Ok(BlockMetadata::new(block_id, epoch, round, signer.author(), vec![], vec![], timestamp)) + } } #[cfg(test)] mod opt_tests { - use super::*; - use aptos_api::{accept_type::AcceptType, transactions::SubmitTransactionPost}; - use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519Signature}, - HashValue, PrivateKey, Uniform, - }; - use aptos_types::{ - account_address::AccountAddress, - block_executor::partitioner::ExecutableTransactions, - chain_id::ChainId, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, - SignedTransaction, Transaction, TransactionPayload, - }, - }; - - fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { - let private_key = Ed25519PrivateKey::generate_for_testing(); - let public_key = private_key.public_key(); - let transaction_payload = TransactionPayload::Script(Script::new(vec![0], vec![], vec![])); - let raw_transaction = RawTransaction::new( - AccountAddress::random(), - 0, - transaction_payload, - 0, - gas_unit_price, - 0, - ChainId::test(), // This is the value used in aptos testing code. - ); - SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) - } - - #[tokio::test] - async fn test_execute_opt_block() -> Result<(), anyhow::Error> { - let (tx, _rx) = async_channel::unbounded(); - let executor = SuzukaExecutorV1::try_from_env(tx).await?; - let block_id = HashValue::random(); - let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( - create_signed_transaction(0), - )); - let txs = ExecutableTransactions::Unsharded(vec![tx]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block_opt(block).await?; - Ok(()) - } - - #[tokio::test] - async fn test_pipe_transactions_from_api() -> Result<(), anyhow::Error> { - let (tx, rx) = async_channel::unbounded(); - let executor = SuzukaExecutorV1::try_from_env(tx).await?; - let services_executor = executor.clone(); - let background_executor = executor.clone(); - - let services_handle = tokio::spawn(async move { - services_executor.run_service().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - let background_handle = tokio::spawn(async move { - background_executor.run_background_tasks().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - // Start the background tasks - let user_transaction = create_signed_transaction(0); - let comparison_user_transaction = user_transaction.clone(); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - - let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - let api = executor.get_apis(); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - - services_handle.abort(); - background_handle.abort(); - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, comparison_user_transaction); - - Ok(()) - } - - #[tokio::test] - async fn test_pipe_transactions_from_api_and_execute() -> Result<(), anyhow::Error> { - let (tx, rx) = async_channel::unbounded(); - let executor = SuzukaExecutorV1::try_from_env(tx).await?; - let services_executor = executor.clone(); - let background_executor = executor.clone(); - - let services_handle = tokio::spawn(async move { - services_executor.run_service().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - let background_handle = tokio::spawn(async move { - background_executor.run_background_tasks().await?; - Ok(()) as Result<(), anyhow::Error> - }); - - // Start the background tasks - let user_transaction = create_signed_transaction(0); - let comparison_user_transaction = user_transaction.clone(); - let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; - - let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); - let api = executor.get_apis(); - api.transactions.submit_transaction(AcceptType::Bcs, request).await?; - - let received_transaction = rx.recv().await?; - assert_eq!(received_transaction, comparison_user_transaction); - - // Now execute the block - let block_id = HashValue::random(); - let block_metadata = executor - .build_block_metadata(block_id.clone(), chrono::Utc::now().timestamp_micros() as u64) - .await - .unwrap(); - let txs = ExecutableTransactions::Unsharded( - [ - Transaction::BlockMetadata(block_metadata), - Transaction::UserTransaction(received_transaction), - ] - .into_iter() - .map(SignatureVerifiedTransaction::Valid) - .collect(), - ); - let block = ExecutableBlock::new(block_id.clone(), txs); - let commitment = executor.execute_block_opt(block).await?; - - assert_eq!(commitment.block_id.to_vec(), block_id.to_vec()); - assert_eq!(commitment.height, 1); - - services_handle.abort(); - background_handle.abort(); - - Ok(()) - } + use super::*; + use aptos_api::{accept_type::AcceptType, transactions::SubmitTransactionPost}; + use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519Signature}, + HashValue, PrivateKey, Uniform, + }; + use aptos_types::{ + account_address::AccountAddress, + block_executor::partitioner::ExecutableTransactions, + chain_id::ChainId, + transaction::{ + signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, + SignedTransaction, Transaction, TransactionPayload, + }, + }; + + fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + let transaction_payload = TransactionPayload::Script(Script::new(vec![0], vec![], vec![])); + let raw_transaction = RawTransaction::new( + AccountAddress::random(), + 0, + transaction_payload, + 0, + gas_unit_price, + 0, + ChainId::test(), // This is the value used in aptos testing code. + ); + SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) + } + + #[tokio::test] + async fn test_execute_opt_block() -> Result<(), anyhow::Error> { + let (tx, _rx) = async_channel::unbounded(); + let executor = SuzukaExecutorV1::try_from_env(tx).await?; + let block_id = HashValue::random(); + let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( + create_signed_transaction(0), + )); + let txs = ExecutableTransactions::Unsharded(vec![tx]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block_opt(block).await?; + Ok(()) + } + + #[tokio::test] + async fn test_pipe_transactions_from_api() -> Result<(), anyhow::Error> { + let (tx, rx) = async_channel::unbounded(); + let executor = SuzukaExecutorV1::try_from_env(tx).await?; + let services_executor = executor.clone(); + let background_executor = executor.clone(); + + let services_handle = tokio::spawn(async move { + services_executor.run_service().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + let background_handle = tokio::spawn(async move { + background_executor.run_background_tasks().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + // Start the background tasks + let user_transaction = create_signed_transaction(0); + let comparison_user_transaction = user_transaction.clone(); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + + let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + let api = executor.get_apis(); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + + services_handle.abort(); + background_handle.abort(); + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, comparison_user_transaction); + + Ok(()) + } + + #[tokio::test] + async fn test_pipe_transactions_from_api_and_execute() -> Result<(), anyhow::Error> { + let (tx, rx) = async_channel::unbounded(); + let executor = SuzukaExecutorV1::try_from_env(tx).await?; + let services_executor = executor.clone(); + let background_executor = executor.clone(); + + let services_handle = tokio::spawn(async move { + services_executor.run_service().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + let background_handle = tokio::spawn(async move { + background_executor.run_background_tasks().await?; + Ok(()) as Result<(), anyhow::Error> + }); + + // Start the background tasks + let user_transaction = create_signed_transaction(0); + let comparison_user_transaction = user_transaction.clone(); + let bcs_user_transaction = bcs::to_bytes(&user_transaction)?; + + let request = SubmitTransactionPost::Bcs(aptos_api::bcs_payload::Bcs(bcs_user_transaction)); + let api = executor.get_apis(); + api.transactions.submit_transaction(AcceptType::Bcs, request).await?; + + let received_transaction = rx.recv().await?; + assert_eq!(received_transaction, comparison_user_transaction); + + // Now execute the block + let block_id = HashValue::random(); + let block_metadata = executor + .build_block_metadata(block_id.clone(), chrono::Utc::now().timestamp_micros() as u64) + .await + .unwrap(); + let txs = ExecutableTransactions::Unsharded( + [ + Transaction::BlockMetadata(block_metadata), + Transaction::UserTransaction(received_transaction), + ] + .into_iter() + .map(SignatureVerifiedTransaction::Valid) + .collect(), + ); + let block = ExecutableBlock::new(block_id.clone(), txs); + let commitment = executor.execute_block_opt(block).await?; + + assert_eq!(commitment.block_id.to_vec(), block_id.to_vec()); + assert_eq!(commitment.height, 1); + + services_handle.abort(); + background_handle.abort(); + + Ok(()) + } } diff --git a/protocol-units/execution/suzuka/fin-executor/src/executor.rs b/protocol-units/execution/suzuka/fin-executor/src/executor.rs index ebd0b5c1c..a9dd050ef 100644 --- a/protocol-units/execution/suzuka/fin-executor/src/executor.rs +++ b/protocol-units/execution/suzuka/fin-executor/src/executor.rs @@ -1,221 +1,221 @@ use aptos_config::config::NodeConfig; use aptos_db::AptosDB; use aptos_executor::{ - block_executor::BlockExecutor, - db_bootstrapper::{generate_waypoint, maybe_bootstrap}, + block_executor::BlockExecutor, + db_bootstrapper::{generate_waypoint, maybe_bootstrap}, }; use aptos_executor_types::{state_checkpoint_output::StateCheckpointOutput, BlockExecutorTrait}; use aptos_mempool::core_mempool::CoreMempool; use aptos_storage_interface::DbReaderWriter; use aptos_types::{ - block_executor::config::BlockExecutorConfigFromOnchain, - block_executor::partitioner::ExecutableBlock, - transaction::{Transaction, WriteSetPayload}, - validator_signer::ValidatorSigner, + block_executor::config::BlockExecutorConfigFromOnchain, + block_executor::partitioner::ExecutableBlock, + transaction::{Transaction, WriteSetPayload}, + validator_signer::ValidatorSigner, }; use aptos_vm::AptosVM; use std::{ - path::PathBuf, - sync::{Arc, RwLock}, + path::PathBuf, + sync::{Arc, RwLock}, }; /// The state of `movement-network` execution can exist in three states, /// `Dynamic`, `Optimistic`, and `Final`. The `Dynamic` state is the state. pub enum FinalityState { - /// The dynamic state that is subject to change and is not - /// yet finalized. It is the state that is derived from the blocks - /// received before any finality is reached and simply represents a - /// local application of the fork-choice rule (longest chain) - /// of the gossipped blocks. - Dynamic, - /// The optimistic state that is derived from the blocks received after DA finality. - /// It is the state that is derived from the blocks that have been finalized by the DA. - Optimistic, - /// The final state that is derived from the blocks received after the finality is reached. - Final, + /// The dynamic state that is subject to change and is not + /// yet finalized. It is the state that is derived from the blocks + /// received before any finality is reached and simply represents a + /// local application of the fork-choice rule (longest chain) + /// of the gossipped blocks. + Dynamic, + /// The optimistic state that is derived from the blocks received after DA finality. + /// It is the state that is derived from the blocks that have been finalized by the DA. + Optimistic, + /// The final state that is derived from the blocks received after the finality is reached. + Final, } /// The current state of the executor and its execution of blocks. #[derive(PartialEq, Debug, Clone, Copy)] pub enum ExecutorState { - /// The executor is idle and waiting for a block to be executed. - Idle, - /// The block is executed in a speculative manner and its effects held in memory. - Speculate, - /// The network agrees on the block. - Consensus, - /// The block is committed to the state, at this point - /// fork choices must be resolved otherwise the commitment and subsequent execution will fail. - Commit, + /// The executor is idle and waiting for a block to be executed. + Idle, + /// The block is executed in a speculative manner and its effects held in memory. + Speculate, + /// The network agrees on the block. + Consensus, + /// The block is committed to the state, at this point + /// fork choices must be resolved otherwise the commitment and subsequent execution will fail. + Commit, } /// The `Executor` is responsible for executing blocks and managing the state of the execution /// against the `AptosVM`. pub struct Executor { - /// The executing type. - pub block_executor: Arc>>, - /// The current state of the executor. - pub status: ExecutorState, - /// The access to db. - pub db: DbReaderWriter, - /// The signer of the executor's transactions. - pub signer: ValidatorSigner, - /// The access to the core mempool. - pub mempool: CoreMempool, + /// The executing type. + pub block_executor: Arc>>, + /// The current state of the executor. + pub status: ExecutorState, + /// The access to db. + pub db: DbReaderWriter, + /// The signer of the executor's transactions. + pub signer: ValidatorSigner, + /// The access to the core mempool. + pub mempool: CoreMempool, } impl Executor { - const DB_PATH_ENV_VAR: &'static str = "DB_DIR"; - - /// Create a new `Executor` instance. - pub fn new( - db_dir: PathBuf, - block_executor: BlockExecutor, - signer: ValidatorSigner, - mempool: CoreMempool, - ) -> Self { - let (_aptos_db, reader_writer) = DbReaderWriter::wrap(AptosDB::new_for_test(&db_dir)); - Self { - block_executor: Arc::new(RwLock::new(block_executor)), - status: ExecutorState::Idle, - db: reader_writer, - signer, - mempool, - } - } - - pub fn bootstrap_empty_db(db_dir: PathBuf) -> Result { - let genesis = aptos_vm_genesis::test_genesis_change_set_and_validators(Some(1)); - let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0)); - let db_rw = DbReaderWriter::new(AptosDB::new_for_test(&db_dir)); - assert!(db_rw.reader.get_latest_ledger_info_option()?.is_none()); - - // Bootstrap empty DB. - let waypoint = - generate_waypoint::(&db_rw, &genesis_txn).expect("Should not fail."); - maybe_bootstrap::(&db_rw, &genesis_txn, waypoint)?; - assert!(db_rw.reader.get_latest_ledger_info_option()?.is_some()); - - Ok(db_rw) - } - - pub fn bootstrap( - db_dir: PathBuf, - signer: ValidatorSigner, - mempool: CoreMempool, - ) -> Result { - let db = Self::bootstrap_empty_db(db_dir)?; - - Ok(Self { - block_executor: Arc::new(RwLock::new(BlockExecutor::new(db.clone()))), - status: ExecutorState::Idle, - db, - signer, - mempool, - }) - } - - pub fn try_from_env() -> Result { - // read the db dir from env or use a tempfile - let db_dir = match std::env::var(Self::DB_PATH_ENV_VAR) { - Ok(dir) => PathBuf::from(dir), - Err(_) => { - let temp_dir = tempfile::tempdir()?; - temp_dir.path().to_path_buf() - } - }; - - // use the default signer, block executor, and mempool - let signer = ValidatorSigner::random(None); - let mempool = CoreMempool::new(&NodeConfig::default()); - - Self::bootstrap(db_dir, signer, mempool) - } - - pub fn set_commit_state(&mut self) { - self.status = ExecutorState::Commit; - } - - /// Execute a block which gets committed to the state. - /// `ExecutorState` must be set to `Commit` before calling this method. - pub async fn execute_block( - &mut self, - block: ExecutableBlock, - ) -> Result { - if self.status != ExecutorState::Commit { - return Err(anyhow::anyhow!("Executor is not in the Commit state")); - } - - let parent_block_id = { - let block_executor = self.block_executor.read().map_err(|e| { - anyhow::anyhow!("Failed to acquire block executor read lock: {:?}", e) - })?; // acquire read lock - block_executor.committed_block_id() - }; - - let state_checkpoint = { - let block_executor = self.block_executor.write().map_err(|e| { - anyhow::anyhow!("Failed to acquire block executor write lock: {:?}", e) - })?; // acquire write lock - block_executor.execute_and_state_checkpoint( - block, - parent_block_id, - BlockExecutorConfigFromOnchain::new_no_block_limit(), - )? - }; - - // Update the executor state - self.status = ExecutorState::Idle; - - Ok(state_checkpoint) - } + const DB_PATH_ENV_VAR: &'static str = "DB_DIR"; + + /// Create a new `Executor` instance. + pub fn new( + db_dir: PathBuf, + block_executor: BlockExecutor, + signer: ValidatorSigner, + mempool: CoreMempool, + ) -> Self { + let (_aptos_db, reader_writer) = DbReaderWriter::wrap(AptosDB::new_for_test(&db_dir)); + Self { + block_executor: Arc::new(RwLock::new(block_executor)), + status: ExecutorState::Idle, + db: reader_writer, + signer, + mempool, + } + } + + pub fn bootstrap_empty_db(db_dir: PathBuf) -> Result { + let genesis = aptos_vm_genesis::test_genesis_change_set_and_validators(Some(1)); + let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis.0)); + let db_rw = DbReaderWriter::new(AptosDB::new_for_test(&db_dir)); + assert!(db_rw.reader.get_latest_ledger_info_option()?.is_none()); + + // Bootstrap empty DB. + let waypoint = + generate_waypoint::(&db_rw, &genesis_txn).expect("Should not fail."); + maybe_bootstrap::(&db_rw, &genesis_txn, waypoint)?; + assert!(db_rw.reader.get_latest_ledger_info_option()?.is_some()); + + Ok(db_rw) + } + + pub fn bootstrap( + db_dir: PathBuf, + signer: ValidatorSigner, + mempool: CoreMempool, + ) -> Result { + let db = Self::bootstrap_empty_db(db_dir)?; + + Ok(Self { + block_executor: Arc::new(RwLock::new(BlockExecutor::new(db.clone()))), + status: ExecutorState::Idle, + db, + signer, + mempool, + }) + } + + pub fn try_from_env() -> Result { + // read the db dir from env or use a tempfile + let db_dir = match std::env::var(Self::DB_PATH_ENV_VAR) { + Ok(dir) => PathBuf::from(dir), + Err(_) => { + let temp_dir = tempfile::tempdir()?; + temp_dir.path().to_path_buf() + } + }; + + // use the default signer, block executor, and mempool + let signer = ValidatorSigner::random(None); + let mempool = CoreMempool::new(&NodeConfig::default()); + + Self::bootstrap(db_dir, signer, mempool) + } + + pub fn set_commit_state(&mut self) { + self.status = ExecutorState::Commit; + } + + /// Execute a block which gets committed to the state. + /// `ExecutorState` must be set to `Commit` before calling this method. + pub async fn execute_block( + &mut self, + block: ExecutableBlock, + ) -> Result { + if self.status != ExecutorState::Commit { + return Err(anyhow::anyhow!("Executor is not in the Commit state")); + } + + let parent_block_id = { + let block_executor = self.block_executor.read().map_err(|e| { + anyhow::anyhow!("Failed to acquire block executor read lock: {:?}", e) + })?; // acquire read lock + block_executor.committed_block_id() + }; + + let state_checkpoint = { + let block_executor = self.block_executor.write().map_err(|e| { + anyhow::anyhow!("Failed to acquire block executor write lock: {:?}", e) + })?; // acquire write lock + block_executor.execute_and_state_checkpoint( + block, + parent_block_id, + BlockExecutorConfigFromOnchain::new_no_block_limit(), + )? + }; + + // Update the executor state + self.status = ExecutorState::Idle; + + Ok(state_checkpoint) + } } #[cfg(test)] mod tests { - use super::*; - use aptos_crypto::{ - ed25519::{Ed25519PrivateKey, Ed25519Signature}, - HashValue, PrivateKey, Uniform, - }; - use aptos_types::{ - account_address::AccountAddress, - block_executor::partitioner::ExecutableTransactions, - chain_id::ChainId, - transaction::{ - signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, - SignedTransaction, Transaction, TransactionPayload, - }, - }; - - fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { - let private_key = Ed25519PrivateKey::generate_for_testing(); - let public_key = private_key.public_key(); - - let transaction_payload = TransactionPayload::Script(Script::new(vec![], vec![], vec![])); - let raw_transaction = RawTransaction::new( - AccountAddress::random(), - 0, - transaction_payload, - 0, - gas_unit_price, - 0, - ChainId::new(10), // This is the value used in aptos testing code. - ); - SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) - } - - #[tokio::test] - async fn test_execute_block() -> Result<(), anyhow::Error> { - let mut executor = Executor::try_from_env()?; - executor.set_commit_state(); - let block_id = HashValue::random(); - let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( - create_signed_transaction(0), - )); - let txs = ExecutableTransactions::Unsharded(vec![tx]); - let block = ExecutableBlock::new(block_id.clone(), txs); - executor.execute_block(block).await?; - Ok(()) - } + use super::*; + use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519Signature}, + HashValue, PrivateKey, Uniform, + }; + use aptos_types::{ + account_address::AccountAddress, + block_executor::partitioner::ExecutableTransactions, + chain_id::ChainId, + transaction::{ + signature_verified_transaction::SignatureVerifiedTransaction, RawTransaction, Script, + SignedTransaction, Transaction, TransactionPayload, + }, + }; + + fn create_signed_transaction(gas_unit_price: u64) -> SignedTransaction { + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key = private_key.public_key(); + + let transaction_payload = TransactionPayload::Script(Script::new(vec![], vec![], vec![])); + let raw_transaction = RawTransaction::new( + AccountAddress::random(), + 0, + transaction_payload, + 0, + gas_unit_price, + 0, + ChainId::new(10), // This is the value used in aptos testing code. + ); + SignedTransaction::new(raw_transaction, public_key, Ed25519Signature::dummy_signature()) + } + + #[tokio::test] + async fn test_execute_block() -> Result<(), anyhow::Error> { + let mut executor = Executor::try_from_env()?; + executor.set_commit_state(); + let block_id = HashValue::random(); + let tx = SignatureVerifiedTransaction::Valid(Transaction::UserTransaction( + create_signed_transaction(0), + )); + let txs = ExecutableTransactions::Unsharded(vec![tx]); + let block = ExecutableBlock::new(block_id.clone(), txs); + executor.execute_block(block).await?; + Ok(()) + } } diff --git a/protocol-units/mempool/move-rocks/src/lib.rs b/protocol-units/mempool/move-rocks/src/lib.rs index 2e4d266c8..61b2e382c 100644 --- a/protocol-units/mempool/move-rocks/src/lib.rs +++ b/protocol-units/mempool/move-rocks/src/lib.rs @@ -8,263 +8,265 @@ use tokio::sync::RwLock; #[derive(Debug, Clone)] pub struct RocksdbMempool { - db: Arc>, + db: Arc>, } impl RocksdbMempool { - pub fn try_new(path: &str) -> Result { - let mut options = Options::default(); - options.create_if_missing(true); - options.create_missing_column_families(true); - - let mempool_transactions_cf = - ColumnFamilyDescriptor::new("mempool_transactions", Options::default()); - let transaction_truths_cf = - ColumnFamilyDescriptor::new("transaction_truths", Options::default()); - let blocks_cf = ColumnFamilyDescriptor::new("blocks", Options::default()); - let transaction_lookups_cf = - ColumnFamilyDescriptor::new("transaction_lookups", Options::default()); - - let db = DB::open_cf_descriptors( - &options, - path, - vec![mempool_transactions_cf, transaction_truths_cf, blocks_cf, transaction_lookups_cf], - ) - .map_err(|e| Error::new(e))?; - - Ok(RocksdbMempool { db: Arc::new(RwLock::new(db)) }) - } - - pub fn construct_mempool_transaction_key(transaction: &MempoolTransaction) -> String { - // pad to 32 characters - let slot_seconds_str = format!("{:032}", transaction.timestamp); - - // Assuming transaction.transaction.id() returns a hex string of length 32 - let transaction_id_hex = transaction.transaction.id(); // This should be a String of hex characters - - // Concatenate the two parts to form a 48-character hex string key - let key = format!("{}:{}", slot_seconds_str, transaction_id_hex); - - key - } - - /// Helper function to retrieve the key for mempool transaction from the lookup table. - async fn get_mempool_transaction_key( - &self, - transaction_id: &Id, - ) -> Result>, Error> { - let db = self.db.read().await; - let cf_handle = - db.cf_handle("transaction_lookups").ok_or_else(|| Error::msg("CF handle not found"))?; - db.get_cf(&cf_handle, transaction_id.to_vec()).map_err(|e| Error::new(e)) - } + pub fn try_new(path: &str) -> Result { + let mut options = Options::default(); + options.create_if_missing(true); + options.create_missing_column_families(true); + + let mempool_transactions_cf = + ColumnFamilyDescriptor::new("mempool_transactions", Options::default()); + let transaction_truths_cf = + ColumnFamilyDescriptor::new("transaction_truths", Options::default()); + let blocks_cf = ColumnFamilyDescriptor::new("blocks", Options::default()); + let transaction_lookups_cf = + ColumnFamilyDescriptor::new("transaction_lookups", Options::default()); + + let db = DB::open_cf_descriptors( + &options, + path, + vec![mempool_transactions_cf, transaction_truths_cf, blocks_cf, transaction_lookups_cf], + ) + .map_err(|e| Error::new(e))?; + + Ok(RocksdbMempool { db: Arc::new(RwLock::new(db)) }) + } + + pub fn construct_mempool_transaction_key(transaction: &MempoolTransaction) -> String { + // pad to 32 characters + let slot_seconds_str = format!("{:032}", transaction.timestamp); + + // Assuming transaction.transaction.id() returns a hex string of length 32 + let transaction_id_hex = transaction.transaction.id(); // This should be a String of hex characters + + // Concatenate the two parts to form a 48-character hex string key + let key = format!("{}:{}", slot_seconds_str, transaction_id_hex); + + key + } + + /// Helper function to retrieve the key for mempool transaction from the lookup table. + async fn get_mempool_transaction_key( + &self, + transaction_id: &Id, + ) -> Result>, Error> { + let db = self.db.read().await; + let cf_handle = db + .cf_handle("transaction_lookups") + .ok_or_else(|| Error::msg("CF handle not found"))?; + db.get_cf(&cf_handle, transaction_id.to_vec()).map_err(|e| Error::new(e)) + } } impl MempoolTransactionOperations for RocksdbMempool { - async fn has_mempool_transaction(&self, transaction_id: Id) -> Result { - let key = self.get_mempool_transaction_key(&transaction_id).await?; - match key { - Some(k) => { - let db = self.db.read().await; - let cf_handle = db - .cf_handle("mempool_transactions") - .ok_or_else(|| Error::msg("CF handle not found"))?; - Ok(db.get_cf(&cf_handle, k)?.is_some()) - } - None => Ok(false), - } - } - - async fn add_mempool_transaction(&self, tx: MempoolTransaction) -> Result<(), Error> { - let serialized_tx = serde_json::to_vec(&tx)?; - let db = self.db.write().await; - let mempool_transactions_cf_handle = db - .cf_handle("mempool_transactions") - .ok_or_else(|| Error::msg("CF handle not found"))?; - let transaction_lookups_cf_handle = - db.cf_handle("transaction_lookups").ok_or_else(|| Error::msg("CF handle not found"))?; - - let key = Self::construct_mempool_transaction_key(&tx); - db.put_cf(&mempool_transactions_cf_handle, &key, &serialized_tx)?; - db.put_cf(&transaction_lookups_cf_handle, tx.transaction.id().to_vec(), &key)?; - - Ok(()) - } - - async fn remove_mempool_transaction(&self, transaction_id: Id) -> Result<(), Error> { - let key = self.get_mempool_transaction_key(&transaction_id).await?; - - match key { - Some(k) => { - let db = self.db.write().await; - let cf_handle = db - .cf_handle("mempool_transactions") - .ok_or_else(|| Error::msg("CF handle not found"))?; - db.delete_cf(&cf_handle, k)?; - let lookups_cf_handle = db - .cf_handle("transaction_lookups") - .ok_or_else(|| Error::msg("CF handle not found"))?; - db.delete_cf(&lookups_cf_handle, transaction_id.to_vec())?; - } - None => (), - } - Ok(()) - } - - // Updated method signatures and implementations go here - async fn get_mempool_transaction( - &self, - transaction_id: Id, - ) -> Result, Error> { - let key = match self.get_mempool_transaction_key(&transaction_id).await? { - Some(k) => k, - None => return Ok(None), // If no key found in lookup, return None - }; - let db = self.db.read().await; - let cf_handle = db - .cf_handle("mempool_transactions") - .ok_or_else(|| Error::msg("CF handle not found"))?; - match db.get_cf(&cf_handle, &key)? { - Some(serialized_tx) => { - let tx: MempoolTransaction = serde_json::from_slice(&serialized_tx)?; - Ok(Some(tx)) - } - None => Ok(None), - } - } - - async fn pop_mempool_transaction(&self) -> Result, Error> { - let db = self.db.write().await; - let cf_handle = db - .cf_handle("mempool_transactions") - .ok_or_else(|| Error::msg("CF handle not found"))?; - let mut iter = db.iterator_cf(&cf_handle, rocksdb::IteratorMode::Start); - - match iter.next() { - None => return Ok(None), // No transactions to pop - Some(res) => { - let (key, value) = res?; - let tx: MempoolTransaction = serde_json::from_slice(&value)?; - db.delete_cf(&cf_handle, &key)?; - - // Optionally, remove from the lookup table as well - let lookups_cf_handle = db - .cf_handle("transaction_lookups") - .ok_or_else(|| Error::msg("CF handle not found"))?; - db.delete_cf(&lookups_cf_handle, tx.transaction.id().to_vec())?; - - Ok(Some(tx)) - } - } - } + async fn has_mempool_transaction(&self, transaction_id: Id) -> Result { + let key = self.get_mempool_transaction_key(&transaction_id).await?; + match key { + Some(k) => { + let db = self.db.read().await; + let cf_handle = db + .cf_handle("mempool_transactions") + .ok_or_else(|| Error::msg("CF handle not found"))?; + Ok(db.get_cf(&cf_handle, k)?.is_some()) + } + None => Ok(false), + } + } + + async fn add_mempool_transaction(&self, tx: MempoolTransaction) -> Result<(), Error> { + let serialized_tx = serde_json::to_vec(&tx)?; + let db = self.db.write().await; + let mempool_transactions_cf_handle = db + .cf_handle("mempool_transactions") + .ok_or_else(|| Error::msg("CF handle not found"))?; + let transaction_lookups_cf_handle = db + .cf_handle("transaction_lookups") + .ok_or_else(|| Error::msg("CF handle not found"))?; + + let key = Self::construct_mempool_transaction_key(&tx); + db.put_cf(&mempool_transactions_cf_handle, &key, &serialized_tx)?; + db.put_cf(&transaction_lookups_cf_handle, tx.transaction.id().to_vec(), &key)?; + + Ok(()) + } + + async fn remove_mempool_transaction(&self, transaction_id: Id) -> Result<(), Error> { + let key = self.get_mempool_transaction_key(&transaction_id).await?; + + match key { + Some(k) => { + let db = self.db.write().await; + let cf_handle = db + .cf_handle("mempool_transactions") + .ok_or_else(|| Error::msg("CF handle not found"))?; + db.delete_cf(&cf_handle, k)?; + let lookups_cf_handle = db + .cf_handle("transaction_lookups") + .ok_or_else(|| Error::msg("CF handle not found"))?; + db.delete_cf(&lookups_cf_handle, transaction_id.to_vec())?; + } + None => (), + } + Ok(()) + } + + // Updated method signatures and implementations go here + async fn get_mempool_transaction( + &self, + transaction_id: Id, + ) -> Result, Error> { + let key = match self.get_mempool_transaction_key(&transaction_id).await? { + Some(k) => k, + None => return Ok(None), // If no key found in lookup, return None + }; + let db = self.db.read().await; + let cf_handle = db + .cf_handle("mempool_transactions") + .ok_or_else(|| Error::msg("CF handle not found"))?; + match db.get_cf(&cf_handle, &key)? { + Some(serialized_tx) => { + let tx: MempoolTransaction = serde_json::from_slice(&serialized_tx)?; + Ok(Some(tx)) + } + None => Ok(None), + } + } + + async fn pop_mempool_transaction(&self) -> Result, Error> { + let db = self.db.write().await; + let cf_handle = db + .cf_handle("mempool_transactions") + .ok_or_else(|| Error::msg("CF handle not found"))?; + let mut iter = db.iterator_cf(&cf_handle, rocksdb::IteratorMode::Start); + + match iter.next() { + None => return Ok(None), // No transactions to pop + Some(res) => { + let (key, value) = res?; + let tx: MempoolTransaction = serde_json::from_slice(&value)?; + db.delete_cf(&cf_handle, &key)?; + + // Optionally, remove from the lookup table as well + let lookups_cf_handle = db + .cf_handle("transaction_lookups") + .ok_or_else(|| Error::msg("CF handle not found"))?; + db.delete_cf(&lookups_cf_handle, tx.transaction.id().to_vec())?; + + Ok(Some(tx)) + } + } + } } impl MempoolBlockOperations for RocksdbMempool { - async fn has_block(&self, block_id: Id) -> Result { - let db = self.db.read().await; - let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; - Ok(db.get_cf(&cf_handle, block_id.to_vec())?.is_some()) - } - - async fn add_block(&self, block: Block) -> Result<(), Error> { - let serialized_block = serde_json::to_vec(&block)?; - let db = self.db.write().await; - let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; - db.put_cf(&cf_handle, block.id().to_vec(), &serialized_block)?; - Ok(()) - } - - async fn remove_block(&self, block_id: Id) -> Result<(), Error> { - let db = self.db.write().await; - let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; - db.delete_cf(&cf_handle, block_id.to_vec())?; - Ok(()) - } - - async fn get_block(&self, block_id: Id) -> Result, Error> { - let db = self.db.read().await; - let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; - let serialized_block = db.get_cf(&cf_handle, block_id.to_vec())?; - match serialized_block { - Some(serialized_block) => { - let block: Block = serde_json::from_slice(&serialized_block)?; - Ok(Some(block)) - } - None => Ok(None), - } - } + async fn has_block(&self, block_id: Id) -> Result { + let db = self.db.read().await; + let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; + Ok(db.get_cf(&cf_handle, block_id.to_vec())?.is_some()) + } + + async fn add_block(&self, block: Block) -> Result<(), Error> { + let serialized_block = serde_json::to_vec(&block)?; + let db = self.db.write().await; + let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; + db.put_cf(&cf_handle, block.id().to_vec(), &serialized_block)?; + Ok(()) + } + + async fn remove_block(&self, block_id: Id) -> Result<(), Error> { + let db = self.db.write().await; + let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; + db.delete_cf(&cf_handle, block_id.to_vec())?; + Ok(()) + } + + async fn get_block(&self, block_id: Id) -> Result, Error> { + let db = self.db.read().await; + let cf_handle = db.cf_handle("blocks").ok_or_else(|| Error::msg("CF handle not found"))?; + let serialized_block = db.get_cf(&cf_handle, block_id.to_vec())?; + match serialized_block { + Some(serialized_block) => { + let block: Block = serde_json::from_slice(&serialized_block)?; + Ok(Some(block)) + } + None => Ok(None), + } + } } #[cfg(test)] pub mod test { - use super::*; - use movement_types::Transaction; - use tempfile::tempdir; - - #[tokio::test] - async fn test_rocksdb_mempool_basic_operations() -> Result<(), Error> { - let temp_dir = tempdir().unwrap(); - let path = temp_dir.path().to_str().unwrap(); - let mempool = RocksdbMempool::try_new(path)?; - - let tx = MempoolTransaction::test(); - let tx_id = tx.id(); - mempool.add_mempool_transaction(tx.clone()).await?; - assert!(mempool.has_mempool_transaction(tx_id.clone()).await?); - let tx2 = mempool.get_mempool_transaction(tx_id.clone()).await?; - assert_eq!(Some(tx), tx2); - mempool.remove_mempool_transaction(tx_id.clone()).await?; - assert!(!mempool.has_mempool_transaction(tx_id.clone()).await?); - - let block = Block::test(); - let block_id = block.id(); - mempool.add_block(block.clone()).await?; - assert!(mempool.has_block(block_id.clone()).await?); - let block2 = mempool.get_block(block_id.clone()).await?; - assert_eq!(Some(block), block2); - mempool.remove_block(block_id.clone()).await?; - assert!(!mempool.has_block(block_id.clone()).await?); - - Ok(()) - } - - #[tokio::test] - async fn test_rocksdb_transaction_operations() -> Result<(), Error> { - let temp_dir = tempdir().unwrap(); - let path = temp_dir.path().to_str().unwrap(); - let mempool = RocksdbMempool::try_new(path)?; - - let tx = Transaction::test(); - let tx_id = tx.id(); - mempool.add_transaction(tx.clone()).await?; - assert!(mempool.has_transaction(tx_id.clone()).await?); - let tx2 = mempool.get_transaction(tx_id.clone()).await?; - assert_eq!(Some(tx), tx2); - mempool.remove_transaction(tx_id.clone()).await?; - assert!(!mempool.has_transaction(tx_id.clone()).await?); - - Ok(()) - } - - #[tokio::test] - async fn test_transaction_slot_based_ordering() -> Result<(), Error> { - let temp_dir = tempdir().unwrap(); - let path = temp_dir.path().to_str().unwrap(); - let mempool = RocksdbMempool::try_new(path)?; - - let tx1 = MempoolTransaction::at_time(Transaction::new(vec![1]), 2); - let tx2 = MempoolTransaction::at_time(Transaction::new(vec![2]), 64); - let tx3 = MempoolTransaction::at_time(Transaction::new(vec![3]), 128); - - mempool.add_mempool_transaction(tx2.clone()).await?; - mempool.add_mempool_transaction(tx1.clone()).await?; - mempool.add_mempool_transaction(tx3.clone()).await?; - - let txs = mempool.pop_mempool_transactions(3).await?; - assert_eq!(txs[0], tx1); - assert_eq!(txs[1], tx2); - assert_eq!(txs[2], tx3); - - Ok(()) - } + use super::*; + use movement_types::Transaction; + use tempfile::tempdir; + + #[tokio::test] + async fn test_rocksdb_mempool_basic_operations() -> Result<(), Error> { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path().to_str().unwrap(); + let mempool = RocksdbMempool::try_new(path)?; + + let tx = MempoolTransaction::test(); + let tx_id = tx.id(); + mempool.add_mempool_transaction(tx.clone()).await?; + assert!(mempool.has_mempool_transaction(tx_id.clone()).await?); + let tx2 = mempool.get_mempool_transaction(tx_id.clone()).await?; + assert_eq!(Some(tx), tx2); + mempool.remove_mempool_transaction(tx_id.clone()).await?; + assert!(!mempool.has_mempool_transaction(tx_id.clone()).await?); + + let block = Block::test(); + let block_id = block.id(); + mempool.add_block(block.clone()).await?; + assert!(mempool.has_block(block_id.clone()).await?); + let block2 = mempool.get_block(block_id.clone()).await?; + assert_eq!(Some(block), block2); + mempool.remove_block(block_id.clone()).await?; + assert!(!mempool.has_block(block_id.clone()).await?); + + Ok(()) + } + + #[tokio::test] + async fn test_rocksdb_transaction_operations() -> Result<(), Error> { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path().to_str().unwrap(); + let mempool = RocksdbMempool::try_new(path)?; + + let tx = Transaction::test(); + let tx_id = tx.id(); + mempool.add_transaction(tx.clone()).await?; + assert!(mempool.has_transaction(tx_id.clone()).await?); + let tx2 = mempool.get_transaction(tx_id.clone()).await?; + assert_eq!(Some(tx), tx2); + mempool.remove_transaction(tx_id.clone()).await?; + assert!(!mempool.has_transaction(tx_id.clone()).await?); + + Ok(()) + } + + #[tokio::test] + async fn test_transaction_slot_based_ordering() -> Result<(), Error> { + let temp_dir = tempdir().unwrap(); + let path = temp_dir.path().to_str().unwrap(); + let mempool = RocksdbMempool::try_new(path)?; + + let tx1 = MempoolTransaction::at_time(Transaction::new(vec![1]), 2); + let tx2 = MempoolTransaction::at_time(Transaction::new(vec![2]), 64); + let tx3 = MempoolTransaction::at_time(Transaction::new(vec![3]), 128); + + mempool.add_mempool_transaction(tx2.clone()).await?; + mempool.add_mempool_transaction(tx1.clone()).await?; + mempool.add_mempool_transaction(tx3.clone()).await?; + + let txs = mempool.pop_mempool_transactions(3).await?; + assert_eq!(txs[0], tx1); + assert_eq!(txs[1], tx2); + assert_eq!(txs[2], tx3); + + Ok(()) + } } diff --git a/protocol-units/mempool/util/src/lib.rs b/protocol-units/mempool/util/src/lib.rs index 9ce718c16..de563b224 100644 --- a/protocol-units/mempool/util/src/lib.rs +++ b/protocol-units/mempool/util/src/lib.rs @@ -4,156 +4,158 @@ use movement_types::{Block, Id, Transaction}; use std::cmp::Ordering; pub trait MempoolTransactionOperations { - // todo: move mempool_transaction methods into separate trait - - /// Checks whether a mempool transaction exists in the mempool. - async fn has_mempool_transaction(&self, transaction_id: Id) -> Result; - - /// Adds a mempool transaction to the mempool. - async fn add_mempool_transaction(&self, tx: MempoolTransaction) -> Result<(), anyhow::Error>; - - /// Removes a mempool transaction from the mempool. - async fn remove_mempool_transaction(&self, transaction_id: Id) -> Result<(), anyhow::Error>; - - /// Pops mempool transaction from the mempool. - async fn pop_mempool_transaction(&self) -> Result, anyhow::Error>; - - /// Gets a mempool transaction from the mempool. - async fn get_mempool_transaction( - &self, - transaction_id: Id, - ) -> Result, anyhow::Error>; - - /// Pops the next n mempool transactions from the mempool. - async fn pop_mempool_transactions( - &self, - n: usize, - ) -> Result, anyhow::Error> { - let mut mempool_transactions = Vec::with_capacity(n); - for _ in 0..n { - if let Some(mempool_transaction) = self.pop_mempool_transaction().await? { - mempool_transactions.push(mempool_transaction); - } else { - break; - } - } - Ok(mempool_transactions) - } - - /// Checks whether the mempool has the transaction. - async fn has_transaction(&self, transaction_id: Id) -> Result { - self.has_mempool_transaction(transaction_id).await - } - - /// Adds a transaction to the mempool. - async fn add_transaction(&self, tx: Transaction) -> Result<(), anyhow::Error> { - if self.has_transaction(tx.id()).await? { - return Ok(()); - } - - let mempool_transaction = MempoolTransaction::slot_now(tx); - self.add_mempool_transaction(mempool_transaction).await - } - - /// Removes a transaction from the mempool. - async fn remove_transaction(&self, transaction_id: Id) -> Result<(), anyhow::Error> { - self.remove_mempool_transaction(transaction_id).await - } - - /// Pops transaction from the mempool. - async fn pop_transaction(&self) -> Result, anyhow::Error> { - let mempool_transaction = self.pop_mempool_transaction().await?; - Ok(mempool_transaction.map(|mempool_transaction| mempool_transaction.transaction)) - } - - /// Gets a transaction from the mempool. - async fn get_transaction( - &self, - transaction_id: Id, - ) -> Result, anyhow::Error> { - let mempool_transaction = self.get_mempool_transaction(transaction_id).await?; - Ok(mempool_transaction.map(|mempool_transaction| mempool_transaction.transaction)) - } - - /// Pops the next n transactions from the mempool. - async fn pop_transactions(&self, n: usize) -> Result, anyhow::Error> { - let mempool_transactions = self.pop_mempool_transactions(n).await?; - Ok(mempool_transactions - .into_iter() - .map(|mempool_transaction| mempool_transaction.transaction) - .collect()) - } + // todo: move mempool_transaction methods into separate trait + + /// Checks whether a mempool transaction exists in the mempool. + async fn has_mempool_transaction(&self, transaction_id: Id) -> Result; + + /// Adds a mempool transaction to the mempool. + async fn add_mempool_transaction(&self, tx: MempoolTransaction) -> Result<(), anyhow::Error>; + + /// Removes a mempool transaction from the mempool. + async fn remove_mempool_transaction(&self, transaction_id: Id) -> Result<(), anyhow::Error>; + + /// Pops mempool transaction from the mempool. + async fn pop_mempool_transaction(&self) -> Result, anyhow::Error>; + + /// Gets a mempool transaction from the mempool. + async fn get_mempool_transaction( + &self, + transaction_id: Id, + ) -> Result, anyhow::Error>; + + /// Pops the next n mempool transactions from the mempool. + async fn pop_mempool_transactions( + &self, + n: usize, + ) -> Result, anyhow::Error> { + let mut mempool_transactions = Vec::with_capacity(n); + for _ in 0..n { + if let Some(mempool_transaction) = self.pop_mempool_transaction().await? { + mempool_transactions.push(mempool_transaction); + } else { + break; + } + } + Ok(mempool_transactions) + } + + /// Checks whether the mempool has the transaction. + async fn has_transaction(&self, transaction_id: Id) -> Result { + self.has_mempool_transaction(transaction_id).await + } + + /// Adds a transaction to the mempool. + async fn add_transaction(&self, tx: Transaction) -> Result<(), anyhow::Error> { + if self.has_transaction(tx.id()).await? { + return Ok(()); + } + + let mempool_transaction = MempoolTransaction::slot_now(tx); + self.add_mempool_transaction(mempool_transaction).await + } + + /// Removes a transaction from the mempool. + async fn remove_transaction(&self, transaction_id: Id) -> Result<(), anyhow::Error> { + self.remove_mempool_transaction(transaction_id).await + } + + /// Pops transaction from the mempool. + async fn pop_transaction(&self) -> Result, anyhow::Error> { + let mempool_transaction = self.pop_mempool_transaction().await?; + Ok(mempool_transaction.map(|mempool_transaction| mempool_transaction.transaction)) + } + + /// Gets a transaction from the mempool. + async fn get_transaction( + &self, + transaction_id: Id, + ) -> Result, anyhow::Error> { + let mempool_transaction = self.get_mempool_transaction(transaction_id).await?; + Ok(mempool_transaction.map(|mempool_transaction| mempool_transaction.transaction)) + } + + /// Pops the next n transactions from the mempool. + async fn pop_transactions(&self, n: usize) -> Result, anyhow::Error> { + let mempool_transactions = self.pop_mempool_transactions(n).await?; + Ok(mempool_transactions + .into_iter() + .map(|mempool_transaction| mempool_transaction.transaction) + .collect()) + } } pub trait MempoolBlockOperations { - /// Checks whether a block exists in the mempool. - async fn has_block(&self, block_id: Id) -> Result; + /// Checks whether a block exists in the mempool. + async fn has_block(&self, block_id: Id) -> Result; - /// Adds a block to the mempool. - async fn add_block(&self, block: Block) -> Result<(), anyhow::Error>; + /// Adds a block to the mempool. + async fn add_block(&self, block: Block) -> Result<(), anyhow::Error>; - /// Removes a block from the mempool. - async fn remove_block(&self, block_id: Id) -> Result<(), anyhow::Error>; + /// Removes a block from the mempool. + async fn remove_block(&self, block_id: Id) -> Result<(), anyhow::Error>; - /// Gets a block from the mempool. - async fn get_block(&self, block_id: Id) -> Result, anyhow::Error>; + /// Gets a block from the mempool. + async fn get_block(&self, block_id: Id) -> Result, anyhow::Error>; } /// Wraps a transaction with a timestamp for help ordering. #[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)] pub struct MempoolTransaction { - pub transaction: Transaction, - pub timestamp: u64, - pub slot_seconds: u64, + pub transaction: Transaction, + pub timestamp: u64, + pub slot_seconds: u64, } impl PartialOrd for MempoolTransaction { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } /// Ordered first by slot_seconds, then by transaction. /// This allows us to use a BTreeSet to order transactions by slot_seconds, and then by transaction and pop them off in order. impl Ord for MempoolTransaction { - fn cmp(&self, other: &Self) -> Ordering { - // First, compare by slot_seconds - match self.slot_seconds.cmp(&other.slot_seconds) { - Ordering::Equal => {} - non_equal => return non_equal, - } - // If slot_seconds are equal, then compare by transaction - self.transaction.cmp(&other.transaction) - } + fn cmp(&self, other: &Self) -> Ordering { + // First, compare by slot_seconds + match self.slot_seconds.cmp(&other.slot_seconds) { + Ordering::Equal => {} + non_equal => return non_equal, + } + // If slot_seconds are equal, then compare by transaction + self.transaction.cmp(&other.transaction) + } } impl MempoolTransaction { - const SLOT_SECONDS: u64 = 2; - - /// Creates a test MempoolTransaction. - pub fn test() -> Self { - Self { transaction: Transaction::test(), timestamp: 0, slot_seconds: Self::SLOT_SECONDS } - } - - pub fn at_time(transaction: Transaction, timestamp: u64) -> Self { - let floor = (timestamp / Self::SLOT_SECONDS) * Self::SLOT_SECONDS; - Self { transaction, timestamp: floor, slot_seconds: Self::SLOT_SECONDS } - } - - pub fn new(transaction: Transaction, timestamp: u64, slot_seconds: u64) -> Self { - Self { transaction, timestamp, slot_seconds } - } - - /// Creates a new MempoolTransaction with the current timestamp floored to the nearest slot. - /// todo: probably want to move this out to a factory. - pub fn slot_now(transaction: Transaction) -> MempoolTransaction { - let timestamp = - std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs(); - - Self::at_time(transaction, timestamp) - } - - pub fn id(&self) -> Id { - self.transaction.id() - } + const SLOT_SECONDS: u64 = 2; + + /// Creates a test MempoolTransaction. + pub fn test() -> Self { + Self { transaction: Transaction::test(), timestamp: 0, slot_seconds: Self::SLOT_SECONDS } + } + + pub fn at_time(transaction: Transaction, timestamp: u64) -> Self { + let floor = (timestamp / Self::SLOT_SECONDS) * Self::SLOT_SECONDS; + Self { transaction, timestamp: floor, slot_seconds: Self::SLOT_SECONDS } + } + + pub fn new(transaction: Transaction, timestamp: u64, slot_seconds: u64) -> Self { + Self { transaction, timestamp, slot_seconds } + } + + /// Creates a new MempoolTransaction with the current timestamp floored to the nearest slot. + /// todo: probably want to move this out to a factory. + pub fn slot_now(transaction: Transaction) -> MempoolTransaction { + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_secs(); + + Self::at_time(transaction, timestamp) + } + + pub fn id(&self) -> Id { + self.transaction.id() + } } diff --git a/protocol-units/sequencing/memseq/src/lib.rs b/protocol-units/sequencing/memseq/src/lib.rs index d0ebc9236..58dc2133f 100644 --- a/protocol-units/sequencing/memseq/src/lib.rs +++ b/protocol-units/sequencing/memseq/src/lib.rs @@ -7,462 +7,463 @@ use tokio::sync::RwLock; #[derive(Clone)] pub struct Memseq { - pub mempool: Arc>, - // this value should not be changed after initialization - block_size: u32, - pub parent_block: Arc>, - // this value should not be changed after initialization - building_time_ms: u64, + pub mempool: Arc>, + // this value should not be changed after initialization + block_size: u32, + pub parent_block: Arc>, + // this value should not be changed after initialization + building_time_ms: u64, } impl Memseq { - pub fn new( - mempool: Arc>, - block_size: u32, - parent_block: Arc>, - building_time_ms: u64, - ) -> Self { - Self { mempool, block_size, parent_block, building_time_ms } - } - - pub fn with_block_size(mut self, block_size: u32) -> Self { - self.block_size = block_size; - self - } - - pub fn with_building_time_ms(mut self, building_time_ms: u64) -> Self { - self.building_time_ms = building_time_ms; - self - } + pub fn new( + mempool: Arc>, + block_size: u32, + parent_block: Arc>, + building_time_ms: u64, + ) -> Self { + Self { mempool, block_size, parent_block, building_time_ms } + } + + pub fn with_block_size(mut self, block_size: u32) -> Self { + self.block_size = block_size; + self + } + + pub fn with_building_time_ms(mut self, building_time_ms: u64) -> Self { + self.building_time_ms = building_time_ms; + self + } } impl Memseq { - pub fn try_move_rocks(path: PathBuf) -> Result { - let mempool = RocksdbMempool::try_new( - path.to_str().ok_or(anyhow::anyhow!("PathBuf to str failed"))?, - )?; - let mempool = Arc::new(RwLock::new(mempool)); - let parent_block = Arc::new(RwLock::new(Id::default())); - Ok(Self::new(mempool, 10, parent_block, 1000)) - } - - pub fn try_move_rocks_from_env() -> Result { - let path = std::env::var("MOVE_ROCKS_PATH") - .or(Err(anyhow::anyhow!("MOVE_ROCKS_PATH not found")))?; - Self::try_move_rocks(PathBuf::from(path)) - } + pub fn try_move_rocks(path: PathBuf) -> Result { + let mempool = RocksdbMempool::try_new( + path.to_str().ok_or(anyhow::anyhow!("PathBuf to str failed"))?, + )?; + let mempool = Arc::new(RwLock::new(mempool)); + let parent_block = Arc::new(RwLock::new(Id::default())); + Ok(Self::new(mempool, 10, parent_block, 1000)) + } + + pub fn try_move_rocks_from_env() -> Result { + let path = std::env::var("MOVE_ROCKS_PATH") + .or(Err(anyhow::anyhow!("MOVE_ROCKS_PATH not found")))?; + Self::try_move_rocks(PathBuf::from(path)) + } } impl Sequencer for Memseq { - async fn publish(&self, transaction: Transaction) -> Result<(), anyhow::Error> { - let mempool = self.mempool.read().await; - mempool.add_transaction(transaction).await?; - Ok(()) - } - - async fn wait_for_next_block(&self) -> Result, anyhow::Error> { - let mempool = self.mempool.read().await; - let mut transactions = Vec::new(); - - let mut now = std::time::Instant::now(); - let finish_by = now + std::time::Duration::from_millis(self.building_time_ms); - - loop { - let current_block_size = transactions.len() as u32; - if current_block_size >= self.block_size { - break; - } - - for _ in 0..self.block_size - current_block_size { - if let Some(transaction) = mempool.pop_transaction().await? { - transactions.push(transaction); - } else { - break; - } - } - - // sleep to yield to other tasks and wait for more transactions - tokio::time::sleep(std::time::Duration::from_millis(1)).await; - - now = std::time::Instant::now(); - if now > finish_by { - break; - } - } - - if transactions.is_empty() { - Ok(None) - } else { - Ok(Some(Block::new( - Default::default(), - self.parent_block.read().await.clone().to_vec(), - transactions, - ))) - } - } + async fn publish(&self, transaction: Transaction) -> Result<(), anyhow::Error> { + let mempool = self.mempool.read().await; + mempool.add_transaction(transaction).await?; + Ok(()) + } + + async fn wait_for_next_block(&self) -> Result, anyhow::Error> { + let mempool = self.mempool.read().await; + let mut transactions = Vec::new(); + + let mut now = std::time::Instant::now(); + let finish_by = now + std::time::Duration::from_millis(self.building_time_ms); + + loop { + let current_block_size = transactions.len() as u32; + if current_block_size >= self.block_size { + break; + } + + for _ in 0..self.block_size - current_block_size { + if let Some(transaction) = mempool.pop_transaction().await? { + transactions.push(transaction); + } else { + break; + } + } + + // sleep to yield to other tasks and wait for more transactions + tokio::time::sleep(std::time::Duration::from_millis(1)).await; + + now = std::time::Instant::now(); + if now > finish_by { + break; + } + } + + if transactions.is_empty() { + Ok(None) + } else { + Ok(Some(Block::new( + Default::default(), + self.parent_block.read().await.clone().to_vec(), + transactions, + ))) + } + } } #[cfg(test)] pub mod test { - use super::*; - use futures::stream::FuturesUnordered; - use futures::StreamExt; - use mempool_util::MempoolTransaction; - use tempfile::tempdir; - - #[tokio::test] - async fn test_wait_for_next_block_building_time_expires() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let memseq = Memseq::try_move_rocks(path)?.with_block_size(10).with_building_time_ms(500); - - // Add some transactions - for i in 0..5 { - let transaction = Transaction::new(vec![i as u8]); - memseq.publish(transaction).await?; - } - - // Wait for the block to be built, not enough transactions as such - // the building time should expire - let block = memseq.wait_for_next_block().await?; - assert!(block.is_some()); - - let block = block.ok_or(anyhow::anyhow!("Block not found"))?; - assert_eq!(block.transactions.len(), 5); - - Ok(()) - } - - #[tokio::test] - async fn test_publish_error_propagation() -> Result<(), anyhow::Error> { - let mempool = Arc::new(RwLock::new(MockMempool)); - let parent_block = Arc::new(RwLock::new(Id::default())); - let memseq = Memseq::new(mempool, 10, parent_block, 1000); - - let transaction = Transaction::new(vec![1, 2, 3]); - let result = memseq.publish(transaction).await; - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "Mock add_transaction"); - - let result = memseq.wait_for_next_block().await; - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "Mock pop_transaction"); - - Ok(()) - } - - #[tokio::test] - async fn test_concurrent_access_spawn() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let memseq = Arc::new(Memseq::try_move_rocks(path)?); - - let mut handles = vec![]; - - for i in 0..100 { - let memseq_clone = Arc::clone(&memseq); - let handle = tokio::spawn(async move { - let transaction = Transaction::new(vec![i as u8]); - memseq_clone.publish(transaction).await.unwrap(); - }); - handles.push(handle); - } - - for handle in handles { - handle.await.expect("Task failed"); - } - - Ok(()) - } - - #[tokio::test] - async fn test_concurrent_access_futures() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let memseq = Arc::new(Memseq::try_move_rocks(path)?); - - let futures = FuturesUnordered::new(); - - for i in 0..10 { - let memseq_clone = Arc::clone(&memseq); - let handle = async move { - for n in 0..10 { - let transaction = Transaction::new(vec![i * 10 + n as u8]); - memseq_clone.publish(transaction).await?; - } - Ok::<_, anyhow::Error>(()) - }; - futures.push(handle); - } - - let all_executed_correctly = futures.all(|result| async move { result.is_ok() }).await; - assert!(all_executed_correctly); - - Ok(()) - } - - #[tokio::test] - async fn test_try_move_rocks() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let memseq = Memseq::try_move_rocks(path.clone())?; - - assert_eq!(memseq.block_size, 10); - assert_eq!(memseq.building_time_ms, 1000); - - // Test invalid path - let invalid_path = PathBuf::from(""); - let result = Memseq::try_move_rocks(invalid_path); - assert!(result.is_err()); - - Ok(()) - } - - #[tokio::test] - async fn test_try_move_rocks_from_env() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - std::env::set_var("MOVE_ROCKS_PATH", path.to_str().unwrap()); - - let memseq = Memseq::try_move_rocks_from_env()?; - assert_eq!(memseq.block_size, 10); - assert_eq!(memseq.building_time_ms, 1000); - - // Test environment variable not set - std::env::remove_var("MOVE_ROCKS_PATH"); - let result = Memseq::try_move_rocks_from_env(); - assert!(result.is_err()); - - Ok(()) - } - - #[tokio::test] - async fn test_memseq_initialization() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - - let mem_pool = Arc::new(RwLock::new(RocksdbMempool::try_new( - path.to_str().ok_or(anyhow::anyhow!("PathBuf to str failed"))?, - )?)); - let block_size = 50; - let building_time_ms = 2000; - let parent_block = Arc::new(RwLock::new(Id::default())); + use super::*; + use futures::stream::FuturesUnordered; + use futures::StreamExt; + use mempool_util::MempoolTransaction; + use tempfile::tempdir; + + #[tokio::test] + async fn test_wait_for_next_block_building_time_expires() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let memseq = Memseq::try_move_rocks(path)?.with_block_size(10).with_building_time_ms(500); + + // Add some transactions + for i in 0..5 { + let transaction = Transaction::new(vec![i as u8]); + memseq.publish(transaction).await?; + } + + // Wait for the block to be built, not enough transactions as such + // the building time should expire + let block = memseq.wait_for_next_block().await?; + assert!(block.is_some()); + + let block = block.ok_or(anyhow::anyhow!("Block not found"))?; + assert_eq!(block.transactions.len(), 5); + + Ok(()) + } + + #[tokio::test] + async fn test_publish_error_propagation() -> Result<(), anyhow::Error> { + let mempool = Arc::new(RwLock::new(MockMempool)); + let parent_block = Arc::new(RwLock::new(Id::default())); + let memseq = Memseq::new(mempool, 10, parent_block, 1000); + + let transaction = Transaction::new(vec![1, 2, 3]); + let result = memseq.publish(transaction).await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "Mock add_transaction"); + + let result = memseq.wait_for_next_block().await; + assert!(result.is_err()); + assert_eq!(result.unwrap_err().to_string(), "Mock pop_transaction"); + + Ok(()) + } + + #[tokio::test] + async fn test_concurrent_access_spawn() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let memseq = Arc::new(Memseq::try_move_rocks(path)?); + + let mut handles = vec![]; + + for i in 0..100 { + let memseq_clone = Arc::clone(&memseq); + let handle = tokio::spawn(async move { + let transaction = Transaction::new(vec![i as u8]); + memseq_clone.publish(transaction).await.unwrap(); + }); + handles.push(handle); + } + + for handle in handles { + handle.await.expect("Task failed"); + } + + Ok(()) + } + + #[tokio::test] + async fn test_concurrent_access_futures() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let memseq = Arc::new(Memseq::try_move_rocks(path)?); + + let futures = FuturesUnordered::new(); + + for i in 0..10 { + let memseq_clone = Arc::clone(&memseq); + let handle = async move { + for n in 0..10 { + let transaction = Transaction::new(vec![i * 10 + n as u8]); + memseq_clone.publish(transaction).await?; + } + Ok::<_, anyhow::Error>(()) + }; + futures.push(handle); + } + + let all_executed_correctly = futures.all(|result| async move { result.is_ok() }).await; + assert!(all_executed_correctly); + + Ok(()) + } + + #[tokio::test] + async fn test_try_move_rocks() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let memseq = Memseq::try_move_rocks(path.clone())?; + + assert_eq!(memseq.block_size, 10); + assert_eq!(memseq.building_time_ms, 1000); + + // Test invalid path + let invalid_path = PathBuf::from(""); + let result = Memseq::try_move_rocks(invalid_path); + assert!(result.is_err()); + + Ok(()) + } + + #[tokio::test] + async fn test_try_move_rocks_from_env() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + std::env::set_var("MOVE_ROCKS_PATH", path.to_str().unwrap()); + + let memseq = Memseq::try_move_rocks_from_env()?; + assert_eq!(memseq.block_size, 10); + assert_eq!(memseq.building_time_ms, 1000); + + // Test environment variable not set + std::env::remove_var("MOVE_ROCKS_PATH"); + let result = Memseq::try_move_rocks_from_env(); + assert!(result.is_err()); + + Ok(()) + } + + #[tokio::test] + async fn test_memseq_initialization() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + + let mem_pool = Arc::new(RwLock::new(RocksdbMempool::try_new( + path.to_str().ok_or(anyhow::anyhow!("PathBuf to str failed"))?, + )?)); + let block_size = 50; + let building_time_ms = 2000; + let parent_block = Arc::new(RwLock::new(Id::default())); - let memseq = Memseq::new(mem_pool, block_size, Arc::clone(&parent_block), building_time_ms); - - assert_eq!(memseq.block_size, block_size); - assert_eq!(memseq.building_time_ms, building_time_ms); - assert_eq!(*memseq.parent_block.read().await, *parent_block.read().await); - - Ok(()) - } + let memseq = Memseq::new(mem_pool, block_size, Arc::clone(&parent_block), building_time_ms); + + assert_eq!(memseq.block_size, block_size); + assert_eq!(memseq.building_time_ms, building_time_ms); + assert_eq!(*memseq.parent_block.read().await, *parent_block.read().await); + + Ok(()) + } - #[tokio::test] - async fn test_memseq_with_methods() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - - let mem_pool = Arc::new(RwLock::new(RocksdbMempool::try_new( - path.to_str().ok_or(anyhow::anyhow!("PathBuf to str failed"))?, - )?)); - let block_size = 50; - let building_time_ms = 2000; - let parent_block = Arc::new(RwLock::new(Id::default())); + #[tokio::test] + async fn test_memseq_with_methods() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + + let mem_pool = Arc::new(RwLock::new(RocksdbMempool::try_new( + path.to_str().ok_or(anyhow::anyhow!("PathBuf to str failed"))?, + )?)); + let block_size = 50; + let building_time_ms = 2000; + let parent_block = Arc::new(RwLock::new(Id::default())); - let memseq = Memseq::new(mem_pool, block_size, Arc::clone(&parent_block), building_time_ms); + let memseq = Memseq::new(mem_pool, block_size, Arc::clone(&parent_block), building_time_ms); - // Test with_block_size - let new_block_size = 100; - let memseq = memseq.with_block_size(new_block_size); - assert_eq!(memseq.block_size, new_block_size); + // Test with_block_size + let new_block_size = 100; + let memseq = memseq.with_block_size(new_block_size); + assert_eq!(memseq.block_size, new_block_size); + + // Test with_building_time_ms + let new_building_time_ms = 5000; + let memseq = memseq.with_building_time_ms(new_building_time_ms); + assert_eq!(memseq.building_time_ms, new_building_time_ms); - // Test with_building_time_ms - let new_building_time_ms = 5000; - let memseq = memseq.with_building_time_ms(new_building_time_ms); - assert_eq!(memseq.building_time_ms, new_building_time_ms); + Ok(()) + } + + #[tokio::test] + async fn test_wait_for_next_block_no_transactions() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let memseq = Memseq::try_move_rocks(path)?.with_block_size(10).with_building_time_ms(500); - Ok(()) - } - - #[tokio::test] - async fn test_wait_for_next_block_no_transactions() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let memseq = Memseq::try_move_rocks(path)?.with_block_size(10).with_building_time_ms(500); + let block = memseq.wait_for_next_block().await?; + assert!(block.is_none()); - let block = memseq.wait_for_next_block().await?; - assert!(block.is_none()); + Ok(()) + } - Ok(()) - } + #[tokio::test] + async fn test_memseq() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let memseq = Memseq::try_move_rocks(path)?; - #[tokio::test] - async fn test_memseq() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let memseq = Memseq::try_move_rocks(path)?; + let transaction = Transaction::new(vec![1, 2, 3]); + memseq.publish(transaction.clone()).await?; - let transaction = Transaction::new(vec![1, 2, 3]); - memseq.publish(transaction.clone()).await?; + let block = memseq.wait_for_next_block().await?; - let block = memseq.wait_for_next_block().await?; + assert_eq!(block.ok_or(anyhow::anyhow!("Block not found"))?.transactions[0], transaction); - assert_eq!(block.ok_or(anyhow::anyhow!("Block not found"))?.transactions[0], transaction); + Ok(()) + } - Ok(()) - } + #[tokio::test] + async fn test_respects_size() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let block_size = 100; + let memseq = Memseq::try_move_rocks(path)?.with_block_size(block_size); - #[tokio::test] - async fn test_respects_size() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let block_size = 100; - let memseq = Memseq::try_move_rocks(path)?.with_block_size(block_size); + let mut transactions = Vec::new(); + for i in 0..block_size * 2 { + let transaction = Transaction::new(vec![i as u8]); + memseq.publish(transaction.clone()).await?; + transactions.push(transaction); + } - let mut transactions = Vec::new(); - for i in 0..block_size * 2 { - let transaction = Transaction::new(vec![i as u8]); - memseq.publish(transaction.clone()).await?; - transactions.push(transaction); - } + let block = memseq.wait_for_next_block().await?; - let block = memseq.wait_for_next_block().await?; + assert!(block.is_some()); + + let block = block.ok_or(anyhow::anyhow!("Block not found"))?; - assert!(block.is_some()); - - let block = block.ok_or(anyhow::anyhow!("Block not found"))?; - - assert_eq!(block.transactions.len(), block_size as usize); - - let second_block = memseq.wait_for_next_block().await?; - - assert!(second_block.is_some()); - - let second_block = second_block.ok_or(anyhow::anyhow!("Second block not found"))?; - - assert_eq!(second_block.transactions.len(), block_size as usize); - - Ok(()) - } - - #[tokio::test] - async fn test_wait_next_block_respects_time() -> Result<(), anyhow::Error> { - let dir = tempdir()?; - let path = dir.path().to_path_buf(); - let block_size = 100; - let memseq = - Memseq::try_move_rocks(path)?.with_block_size(block_size).with_building_time_ms(500); - - let building_memseq = Arc::new(memseq); - let waiting_memseq = Arc::clone(&building_memseq); - - let building_task = async move { - let memseq = building_memseq; - - // add half of the transactions - for i in 0..block_size / 2 { - let transaction = Transaction::new(vec![i as u8]); - memseq.publish(transaction.clone()).await?; - } - - tokio::time::sleep(std::time::Duration::from_millis(600)).await; - - // add the rest of the transactions - for i in block_size / 2..block_size - 2 { - let transaction = Transaction::new(vec![i as u8]); - memseq.publish(transaction.clone()).await?; - } - - Ok::<_, anyhow::Error>(()) - }; - - let waiting_task = async move { - let memseq = waiting_memseq; - - // first block - let block = memseq.wait_for_next_block().await?; - assert!(block.is_some()); - let block = block.ok_or(anyhow::anyhow!("Block not found"))?; - assert_eq!(block.transactions.len(), (block_size / 2) as usize); - - tokio::time::sleep(std::time::Duration::from_millis(200)).await; - - // second block - let block = memseq.wait_for_next_block().await?; - assert!(block.is_some()); - let block = block.ok_or(anyhow::anyhow!("Block not found"))?; - assert_eq!(block.transactions.len(), ((block_size / 2) - 2) as usize); - - Ok::<_, anyhow::Error>(()) - }; - - tokio::try_join!(building_task, waiting_task)?; - - Ok(()) - } - - /// Mock Mempool - struct MockMempool; - impl MempoolTransactionOperations for MockMempool { - async fn has_mempool_transaction( - &self, - _transaction_id: Id, - ) -> Result { - Err(anyhow::anyhow!("Mock has_mempool_transaction")) - } - - async fn add_mempool_transaction( - &self, - _tx: MempoolTransaction, - ) -> Result<(), anyhow::Error> { - Err(anyhow::anyhow!("Mock add_mempool_transaction")) - } - - async fn remove_mempool_transaction( - &self, - _transaction_id: Id, - ) -> Result<(), anyhow::Error> { - Err(anyhow::anyhow!("Mock remove_mempool_transaction")) - } - - async fn pop_mempool_transaction( - &self, - ) -> Result, anyhow::Error> { - Err(anyhow::anyhow!("Mock pop_mempool_transaction")) - } - - async fn get_mempool_transaction( - &self, - _transaction_id: Id, - ) -> Result, anyhow::Error> { - Err(anyhow::anyhow!("Mock get_mempool_transaction")) - } - - async fn add_transaction(&self, _transaction: Transaction) -> Result<(), anyhow::Error> { - Err(anyhow::anyhow!("Mock add_transaction")) - } - - async fn pop_transaction(&self) -> Result, anyhow::Error> { - Err(anyhow::anyhow!("Mock pop_transaction")) - } - } - - impl MempoolBlockOperations for MockMempool { - async fn has_block(&self, _block_id: Id) -> Result { - todo!() - } - - async fn add_block(&self, _block: Block) -> Result<(), anyhow::Error> { - todo!() - } - - async fn remove_block(&self, _block_id: Id) -> Result<(), anyhow::Error> { - todo!() - } - - async fn get_block(&self, _block_id: Id) -> Result, anyhow::Error> { - todo!() - } - } + assert_eq!(block.transactions.len(), block_size as usize); + + let second_block = memseq.wait_for_next_block().await?; + + assert!(second_block.is_some()); + + let second_block = second_block.ok_or(anyhow::anyhow!("Second block not found"))?; + + assert_eq!(second_block.transactions.len(), block_size as usize); + + Ok(()) + } + + #[tokio::test] + async fn test_wait_next_block_respects_time() -> Result<(), anyhow::Error> { + let dir = tempdir()?; + let path = dir.path().to_path_buf(); + let block_size = 100; + let memseq = Memseq::try_move_rocks(path)? + .with_block_size(block_size) + .with_building_time_ms(500); + + let building_memseq = Arc::new(memseq); + let waiting_memseq = Arc::clone(&building_memseq); + + let building_task = async move { + let memseq = building_memseq; + + // add half of the transactions + for i in 0..block_size / 2 { + let transaction = Transaction::new(vec![i as u8]); + memseq.publish(transaction.clone()).await?; + } + + tokio::time::sleep(std::time::Duration::from_millis(600)).await; + + // add the rest of the transactions + for i in block_size / 2..block_size - 2 { + let transaction = Transaction::new(vec![i as u8]); + memseq.publish(transaction.clone()).await?; + } + + Ok::<_, anyhow::Error>(()) + }; + + let waiting_task = async move { + let memseq = waiting_memseq; + + // first block + let block = memseq.wait_for_next_block().await?; + assert!(block.is_some()); + let block = block.ok_or(anyhow::anyhow!("Block not found"))?; + assert_eq!(block.transactions.len(), (block_size / 2) as usize); + + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + + // second block + let block = memseq.wait_for_next_block().await?; + assert!(block.is_some()); + let block = block.ok_or(anyhow::anyhow!("Block not found"))?; + assert_eq!(block.transactions.len(), ((block_size / 2) - 2) as usize); + + Ok::<_, anyhow::Error>(()) + }; + + tokio::try_join!(building_task, waiting_task)?; + + Ok(()) + } + + /// Mock Mempool + struct MockMempool; + impl MempoolTransactionOperations for MockMempool { + async fn has_mempool_transaction( + &self, + _transaction_id: Id, + ) -> Result { + Err(anyhow::anyhow!("Mock has_mempool_transaction")) + } + + async fn add_mempool_transaction( + &self, + _tx: MempoolTransaction, + ) -> Result<(), anyhow::Error> { + Err(anyhow::anyhow!("Mock add_mempool_transaction")) + } + + async fn remove_mempool_transaction( + &self, + _transaction_id: Id, + ) -> Result<(), anyhow::Error> { + Err(anyhow::anyhow!("Mock remove_mempool_transaction")) + } + + async fn pop_mempool_transaction( + &self, + ) -> Result, anyhow::Error> { + Err(anyhow::anyhow!("Mock pop_mempool_transaction")) + } + + async fn get_mempool_transaction( + &self, + _transaction_id: Id, + ) -> Result, anyhow::Error> { + Err(anyhow::anyhow!("Mock get_mempool_transaction")) + } + + async fn add_transaction(&self, _transaction: Transaction) -> Result<(), anyhow::Error> { + Err(anyhow::anyhow!("Mock add_transaction")) + } + + async fn pop_transaction(&self) -> Result, anyhow::Error> { + Err(anyhow::anyhow!("Mock pop_transaction")) + } + } + + impl MempoolBlockOperations for MockMempool { + async fn has_block(&self, _block_id: Id) -> Result { + todo!() + } + + async fn add_block(&self, _block: Block) -> Result<(), anyhow::Error> { + todo!() + } + + async fn remove_block(&self, _block_id: Id) -> Result<(), anyhow::Error> { + todo!() + } + + async fn get_block(&self, _block_id: Id) -> Result, anyhow::Error> { + todo!() + } + } } diff --git a/protocol-units/sequencing/util/src/lib.rs b/protocol-units/sequencing/util/src/lib.rs index 91d419054..e21260944 100644 --- a/protocol-units/sequencing/util/src/lib.rs +++ b/protocol-units/sequencing/util/src/lib.rs @@ -1,13 +1,13 @@ use movement_types::{AtomicTransactionBundle, Block, Transaction}; pub trait Sequencer { - async fn publish(&self, atb: Transaction) -> Result<(), anyhow::Error>; + async fn publish(&self, atb: Transaction) -> Result<(), anyhow::Error>; - async fn wait_for_next_block(&self) -> Result, anyhow::Error>; + async fn wait_for_next_block(&self) -> Result, anyhow::Error>; } pub trait SharedSequencer { - async fn publish(&self, atb: AtomicTransactionBundle) -> Result<(), anyhow::Error>; + async fn publish(&self, atb: AtomicTransactionBundle) -> Result<(), anyhow::Error>; - async fn wait_for_next_block(&self) -> Result, anyhow::Error>; + async fn wait_for_next_block(&self) -> Result, anyhow::Error>; } diff --git a/protocol-units/settlement/mcr/client/src/eth_client.rs b/protocol-units/settlement/mcr/client/src/eth_client.rs index dc8d7c809..7a9bc8318 100644 --- a/protocol-units/settlement/mcr/client/src/eth_client.rs +++ b/protocol-units/settlement/mcr/client/src/eth_client.rs @@ -41,526 +41,528 @@ const DEFAULT_TX_GAS_LIMIT: u128 = 10_000_000_000_000_000; #[derive(Clone, Debug)] pub struct McrEthSettlementConfig { - pub mrc_contract_address: String, - pub gas_limit: u128, - pub tx_send_nb_retry: usize, + pub mrc_contract_address: String, + pub gas_limit: u128, + pub tx_send_nb_retry: usize, } impl McrEthSettlementConfig { - fn get_from_env(env_var: &str) -> Result - where - ::Err: std::fmt::Display, - { - env::var(env_var) - .map_err(|err| { - McrEthConnectorError::BadlyDefineEnvVariable(format!( - "{env_var} env var is not defined :{err}" - )) - }) - .and_then(|v| { - T::from_str(&v).map_err(|err| { - McrEthConnectorError::BadlyDefineEnvVariable(format!( - "Parse error for {env_var} env var:{err}" - )) - }) - }) - } - pub fn try_from_env() -> Result { - Ok(McrEthSettlementConfig { - mrc_contract_address: env::var("MCR_CONTRACT_ADDRESS") - .unwrap_or(MRC_CONTRACT_ADDRESS.to_string()), - gas_limit: Self::get_from_env::("MCR_TXSEND_GASLIMIT")?, - tx_send_nb_retry: Self::get_from_env::("MCR_TXSEND_NBRETRY")?, - }) - } + fn get_from_env(env_var: &str) -> Result + where + ::Err: std::fmt::Display, + { + env::var(env_var) + .map_err(|err| { + McrEthConnectorError::BadlyDefineEnvVariable(format!( + "{env_var} env var is not defined :{err}" + )) + }) + .and_then(|v| { + T::from_str(&v).map_err(|err| { + McrEthConnectorError::BadlyDefineEnvVariable(format!( + "Parse error for {env_var} env var:{err}" + )) + }) + }) + } + pub fn try_from_env() -> Result { + Ok(McrEthSettlementConfig { + mrc_contract_address: env::var("MCR_CONTRACT_ADDRESS") + .unwrap_or(MRC_CONTRACT_ADDRESS.to_string()), + gas_limit: Self::get_from_env::("MCR_TXSEND_GASLIMIT")?, + tx_send_nb_retry: Self::get_from_env::("MCR_TXSEND_NBRETRY")?, + }) + } } impl Default for McrEthSettlementConfig { - fn default() -> Self { - McrEthSettlementConfig { - mrc_contract_address: MRC_CONTRACT_ADDRESS.to_string(), - gas_limit: DEFAULT_TX_GAS_LIMIT, - tx_send_nb_retry: MAX_TX_SEND_RETRY, - } - } + fn default() -> Self { + McrEthSettlementConfig { + mrc_contract_address: MRC_CONTRACT_ADDRESS.to_string(), + gas_limit: DEFAULT_TX_GAS_LIMIT, + tx_send_nb_retry: MAX_TX_SEND_RETRY, + } + } } #[derive(Error, Debug)] pub enum McrEthConnectorError { - #[error( - "MCR Settlement Tx fail because gaz estimation is to high. Estimated gaz:{0} gaz limit:{1}" - )] - GasLimitExceed(u128, u128), - #[error("MCR Settlement Tx fail because account funds are insufficient. error:{0}")] - InsufficientFunds(String), - #[error("MCR Settlement Tx send fail because :{0}")] - SendTxError(#[from] alloy_contract::Error), - #[error("MCR Settlement Tx send fail during its execution :{0}")] - RpcTxExecution(String), - #[error("MCR Settlement BlockAccepted event notification error :{0}")] - EventNotificationError(#[from] alloy_sol_types::Error), - #[error("MCR Settlement BlockAccepted event notification stream close")] - EventNotificationStreamClosed, - #[error("MCR Settlement Error environment variable:{0}")] - BadlyDefineEnvVariable(String), + #[error( + "MCR Settlement Tx fail because gaz estimation is to high. Estimated gaz:{0} gaz limit:{1}" + )] + GasLimitExceed(u128, u128), + #[error("MCR Settlement Tx fail because account funds are insufficient. error:{0}")] + InsufficientFunds(String), + #[error("MCR Settlement Tx send fail because :{0}")] + SendTxError(#[from] alloy_contract::Error), + #[error("MCR Settlement Tx send fail during its execution :{0}")] + RpcTxExecution(String), + #[error("MCR Settlement BlockAccepted event notification error :{0}")] + EventNotificationError(#[from] alloy_sol_types::Error), + #[error("MCR Settlement BlockAccepted event notification stream close")] + EventNotificationStreamClosed, + #[error("MCR Settlement Error environment variable:{0}")] + BadlyDefineEnvVariable(String), } // Codegen from artifact. sol!( - #[allow(missing_docs)] - #[sol(rpc)] - MCR, - "abi/MCR.json" + #[allow(missing_docs)] + #[sol(rpc)] + MCR, + "abi/MCR.json" ); pub struct McrEthSettlementClient { - rpc_provider: P, - signer_address: Address, - ws_provider: RootProvider, - config: McrEthSettlementConfig, - send_tx_error_rules: Vec>, - _markert: PhantomData, + rpc_provider: P, + signer_address: Address, + ws_provider: RootProvider, + config: McrEthSettlementConfig, + send_tx_error_rules: Vec>, + _markert: PhantomData, } impl - McrEthSettlementClient< - FillProvider< - JoinFill< - JoinFill< - JoinFill, NonceFiller>, - ChainIdFiller, - >, - SignerFiller, - >, - RootProvider, - BoxTransport, - Ethereum, - >, - BoxTransport, - > + McrEthSettlementClient< + FillProvider< + JoinFill< + JoinFill< + JoinFill, NonceFiller>, + ChainIdFiller, + >, + SignerFiller, + >, + RootProvider, + BoxTransport, + Ethereum, + >, + BoxTransport, + > { - pub async fn build_with_urls( - rpc: &str, - ws_url: S2, - signer_private_key: &str, - config: McrEthSettlementConfig, - ) -> Result - where - S2: Into, - { - let signer: LocalWallet = signer_private_key.parse()?; - let signer_address = signer.address(); - let rpc_provider = ProviderBuilder::new() - .with_recommended_fillers() - .signer(EthereumSigner::from(signer)) - .on_builtin(rpc) - .await?; - - McrEthSettlementClient::build_with_provider(rpc_provider, signer_address, ws_url, config) - .await - } + pub async fn build_with_urls( + rpc: &str, + ws_url: S2, + signer_private_key: &str, + config: McrEthSettlementConfig, + ) -> Result + where + S2: Into, + { + let signer: LocalWallet = signer_private_key.parse()?; + let signer_address = signer.address(); + let rpc_provider = ProviderBuilder::new() + .with_recommended_fillers() + .signer(EthereumSigner::from(signer)) + .on_builtin(rpc) + .await?; + + McrEthSettlementClient::build_with_provider(rpc_provider, signer_address, ws_url, config) + .await + } } impl + Clone, T: Transport + Clone> McrEthSettlementClient { - pub async fn build_with_provider( - rpc_provider: P, - signer_address: Address, - ws_url: S, - config: McrEthSettlementConfig, - ) -> Result - where - S: Into, - { - let ws = WsConnect::new(ws_url); - - let ws_provider = ProviderBuilder::new().on_ws(ws).await?; - - let rule1: Box = Box::new(SendTxErrorRule::::new()); - let rule2: Box = Box::new(SendTxErrorRule::::new()); - let send_tx_error_rules = vec![rule1, rule2]; - - Ok(McrEthSettlementClient { - rpc_provider, - signer_address, - ws_provider, - send_tx_error_rules, - config, - _markert: Default::default(), - }) - } + pub async fn build_with_provider( + rpc_provider: P, + signer_address: Address, + ws_url: S, + config: McrEthSettlementConfig, + ) -> Result + where + S: Into, + { + let ws = WsConnect::new(ws_url); + + let ws_provider = ProviderBuilder::new().on_ws(ws).await?; + + let rule1: Box = Box::new(SendTxErrorRule::::new()); + let rule2: Box = Box::new(SendTxErrorRule::::new()); + let send_tx_error_rules = vec![rule1, rule2]; + + Ok(McrEthSettlementClient { + rpc_provider, + signer_address, + ws_provider, + send_tx_error_rules, + config, + _markert: Default::default(), + }) + } } #[async_trait::async_trait] impl + Clone, T: Transport + Clone> McrSettlementClientOperations - for McrEthSettlementClient + for McrEthSettlementClient { - async fn post_block_commitment( - &self, - block_commitment: BlockCommitment, - ) -> Result<(), anyhow::Error> { - let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.rpc_provider); - - let eth_block_commitment = MCR::BlockCommitment { - // currently, to simplify the api, we'll say 0 is uncommitted all other numbers are legitimate heights - height: U256::from(block_commitment.height), - commitment: alloy_primitives::FixedBytes(block_commitment.commitment.0), - blockId: alloy_primitives::FixedBytes(block_commitment.block_id.0), - }; - - let call_builder = contract.submitBlockCommitment(eth_block_commitment); - - crate::send_eth_tx::send_tx( - call_builder, - &self.send_tx_error_rules, - self.config.tx_send_nb_retry, - self.config.gas_limit, - ) - .await - } - - async fn post_block_commitment_batch( - &self, - block_commitments: Vec, - ) -> Result<(), anyhow::Error> { - let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.rpc_provider); - - let eth_block_commitment: Vec<_> = block_commitments - .into_iter() - .map(|block_commitment| { - Ok(MCR::BlockCommitment { - // currently, to simplify the api, we'll say 0 is uncommitted all other numbers are legitimate heights - height: U256::from(block_commitment.height), - commitment: alloy_primitives::FixedBytes(block_commitment.commitment.0), - blockId: alloy_primitives::FixedBytes(block_commitment.block_id.0), - }) - }) - .collect::, TryFromSliceError>>()?; - - let call_builder = contract.submitBatchBlockCommitment(eth_block_commitment); - - crate::send_eth_tx::send_tx( - call_builder, - &self.send_tx_error_rules, - self.config.tx_send_nb_retry, - self.config.gas_limit, - ) - .await - } - - async fn stream_block_commitments(&self) -> Result { - //register to contract BlockCommitmentSubmitted event - - let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.ws_provider); - let event_filter = contract.BlockAccepted_filter().watch().await?; - - let stream = event_filter.into_stream().map(|event| { - event - .and_then(|(commitment, _)| { - let height = commitment.height.try_into().map_err( - |err: alloy::primitives::ruint::FromUintError| { - alloy_sol_types::Error::Other(err.to_string().into()) - }, - )?; - Ok(BlockCommitment { - height, - block_id: Id(commitment.blockHash.0), - commitment: Commitment(commitment.stateCommitment.0), - }) - }) - .map_err(|err| McrEthConnectorError::EventNotificationError(err).into()) - }); - Ok(Box::pin(stream) as CommitmentStream) - } - - async fn get_commitment_at_height( - &self, - height: u64, - ) -> Result, anyhow::Error> { - let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.ws_provider); - let MCR::getValidatorCommitmentAtBlockHeightReturn { _0: commitment } = contract - .getValidatorCommitmentAtBlockHeight(U256::from(height), self.signer_address) - .call() - .await?; - let return_height: u64 = commitment.height.try_into()?; - // Commitment with height 0 mean not found - Ok((return_height != 0).then_some(BlockCommitment { - height: commitment.height.try_into()?, - block_id: Id(commitment.blockId.into()), - commitment: Commitment(commitment.commitment.into()), - })) - } - - async fn get_max_tolerable_block_height(&self) -> Result { - let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.ws_provider); - let MCR::getMaxTolerableBlockHeightReturn { _0: block_height } = - contract.getMaxTolerableBlockHeight().call().await?; - let return_height: u64 = block_height.try_into()?; - Ok(return_height) - } + async fn post_block_commitment( + &self, + block_commitment: BlockCommitment, + ) -> Result<(), anyhow::Error> { + let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.rpc_provider); + + let eth_block_commitment = MCR::BlockCommitment { + // currently, to simplify the api, we'll say 0 is uncommitted all other numbers are legitimate heights + height: U256::from(block_commitment.height), + commitment: alloy_primitives::FixedBytes(block_commitment.commitment.0), + blockId: alloy_primitives::FixedBytes(block_commitment.block_id.0), + }; + + let call_builder = contract.submitBlockCommitment(eth_block_commitment); + + crate::send_eth_tx::send_tx( + call_builder, + &self.send_tx_error_rules, + self.config.tx_send_nb_retry, + self.config.gas_limit, + ) + .await + } + + async fn post_block_commitment_batch( + &self, + block_commitments: Vec, + ) -> Result<(), anyhow::Error> { + let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.rpc_provider); + + let eth_block_commitment: Vec<_> = block_commitments + .into_iter() + .map(|block_commitment| { + Ok(MCR::BlockCommitment { + // currently, to simplify the api, we'll say 0 is uncommitted all other numbers are legitimate heights + height: U256::from(block_commitment.height), + commitment: alloy_primitives::FixedBytes(block_commitment.commitment.0), + blockId: alloy_primitives::FixedBytes(block_commitment.block_id.0), + }) + }) + .collect::, TryFromSliceError>>()?; + + let call_builder = contract.submitBatchBlockCommitment(eth_block_commitment); + + crate::send_eth_tx::send_tx( + call_builder, + &self.send_tx_error_rules, + self.config.tx_send_nb_retry, + self.config.gas_limit, + ) + .await + } + + async fn stream_block_commitments(&self) -> Result { + //register to contract BlockCommitmentSubmitted event + + let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.ws_provider); + let event_filter = contract.BlockAccepted_filter().watch().await?; + + let stream = event_filter.into_stream().map(|event| { + event + .and_then(|(commitment, _)| { + let height = commitment.height.try_into().map_err( + |err: alloy::primitives::ruint::FromUintError| { + alloy_sol_types::Error::Other(err.to_string().into()) + }, + )?; + Ok(BlockCommitment { + height, + block_id: Id(commitment.blockHash.0), + commitment: Commitment(commitment.stateCommitment.0), + }) + }) + .map_err(|err| McrEthConnectorError::EventNotificationError(err).into()) + }); + Ok(Box::pin(stream) as CommitmentStream) + } + + async fn get_commitment_at_height( + &self, + height: u64, + ) -> Result, anyhow::Error> { + let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.ws_provider); + let MCR::getValidatorCommitmentAtBlockHeightReturn { _0: commitment } = contract + .getValidatorCommitmentAtBlockHeight(U256::from(height), self.signer_address) + .call() + .await?; + let return_height: u64 = commitment.height.try_into()?; + // Commitment with height 0 mean not found + Ok((return_height != 0).then_some(BlockCommitment { + height: commitment.height.try_into()?, + block_id: Id(commitment.blockId.into()), + commitment: Commitment(commitment.commitment.into()), + })) + } + + async fn get_max_tolerable_block_height(&self) -> Result { + let contract = MCR::new(self.config.mrc_contract_address.parse()?, &self.ws_provider); + let MCR::getMaxTolerableBlockHeightReturn { _0: block_height } = + contract.getMaxTolerableBlockHeight().call().await?; + let return_height: u64 = block_height.try_into()?; + Ok(return_height) + } } #[cfg(test)] pub mod test { - use super::*; - use alloy_provider::ProviderBuilder; - use alloy_signer_wallet::LocalWallet; - use movement_types::Commitment; - - // Define 2 validators (signer1 and signer2) with each a little more than 50% of stake. - // After genesis ceremony, 2 validator send the commitment for height 1. - // Validator2 send a commitment for height 2 to trigger next epoch and fire event. - // Wait the commitment accepted event. - //#[ignore] - #[tokio::test] - async fn test_send_commitment() -> Result<(), anyhow::Error> { - //Activate to debug the test. - // use tracing_subscriber::EnvFilter; - - // tracing_subscriber::fmt() - // .with_env_filter( - // EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), - // ) - // .init(); - - // Inititalize Test variables - let rpc_port = env::var("MCR_ANVIL_PORT").unwrap(); - let rpc_url = format!("http://localhost:{rpc_port}"); - let ws_url = format!("ws://localhost:{rpc_port}"); - - let anvil_address = read_anvil_json_file_address()?; - - //Do SC ceremony init stake calls. - do_genesis_ceremonial(&anvil_address, &rpc_url).await?; - - let mcr_address = read_mcr_sc_adress()?; - //Define Signers. Ceremony define 2 signers with half stake each. - let signer1: LocalWallet = anvil_address[1].1.parse()?; - let signer1_addr = signer1.address(); - - //Build client 1 and send first commitment. - let provider_client1 = ProviderBuilder::new() - .with_recommended_fillers() - .signer(EthereumSigner::from(signer1)) - .on_http(rpc_url.parse().unwrap()); - - let config = McrEthSettlementConfig { - mrc_contract_address: mcr_address.to_string(), - gas_limit: DEFAULT_TX_GAS_LIMIT, - tx_send_nb_retry: MAX_TX_SEND_RETRY, - }; - - let client1 = McrEthSettlementClient::build_with_provider( - provider_client1, - signer1_addr, - ws_url.clone(), - config.clone(), - ) - .await - .unwrap(); - - let mut client1_stream = client1.stream_block_commitments().await.unwrap(); - - //client post a new commitment - let commitment = - BlockCommitment { height: 1, block_id: Id([2; 32]), commitment: Commitment([3; 32]) }; - - let res = client1.post_block_commitment(commitment.clone()).await; - assert!(res.is_ok()); - - //no notification quorum is not reach - let res = - tokio::time::timeout(tokio::time::Duration::from_secs(5), client1_stream.next()).await; - assert!(res.is_err()); - - //Build client 2 and send the second commitment. - let client2 = - McrEthSettlementClient::build_with_urls(&rpc_url, ws_url, &anvil_address[2].1, config) - .await - .unwrap(); - - let mut client2_stream = client2.stream_block_commitments().await.unwrap(); - - //client post a new commitment - let res = client2.post_block_commitment(commitment).await; - assert!(res.is_ok()); - - // now we move to block 2 and make some commitment just to trigger the epochRollover - let commitment2 = - BlockCommitment { height: 2, block_id: Id([4; 32]), commitment: Commitment([5; 32]) }; - - let res = client2.post_block_commitment(commitment2.clone()).await; - assert!(res.is_ok()); - - //validate that the accept commitment stream get the event. - let event = - tokio::time::timeout(tokio::time::Duration::from_secs(5), client1_stream.next()) - .await - .unwrap() - .unwrap() - .unwrap(); - assert_eq!(event.commitment.0[0], 3); - assert_eq!(event.block_id.0[0], 2); - let event = - tokio::time::timeout(tokio::time::Duration::from_secs(5), client2_stream.next()) - .await - .unwrap() - .unwrap() - .unwrap(); - assert_eq!(event.commitment.0[0], 3); - assert_eq!(event.block_id.0[0], 2); - - //test post batch commitment - // post the complementary batch on height 2 and one on height 3 - let commitment3 = - BlockCommitment { height: 3, block_id: Id([6; 32]), commitment: Commitment([7; 32]) }; - let res = client1.post_block_commitment_batch(vec![commitment2, commitment3]).await; - assert!(res.is_ok()); - //validate that the accept commitment stream get the event. - let event = - tokio::time::timeout(tokio::time::Duration::from_secs(5), client1_stream.next()) - .await - .unwrap() - .unwrap() - .unwrap(); - assert_eq!(event.commitment.0[0], 5); - assert_eq!(event.block_id.0[0], 4); - let event = - tokio::time::timeout(tokio::time::Duration::from_secs(5), client2_stream.next()) - .await - .unwrap() - .unwrap() - .unwrap(); - assert_eq!(event.commitment.0[0], 5); - assert_eq!(event.block_id.0[0], 4); - - //test get_commitment_at_height - let commitment = client1.get_commitment_at_height(1).await?; - assert!(commitment.is_some()); - let commitment = commitment.unwrap(); - assert_eq!(commitment.commitment.0[0], 3); - assert_eq!(commitment.block_id.0[0], 2); - let commitment = client1.get_commitment_at_height(10).await?; - assert_eq!(commitment, None); - - Ok(()) - } - - use serde_json::{from_str, Value}; - use std::fs; - fn read_anvil_json_file_address() -> Result, anyhow::Error> { - let anvil_conf_file = env::var("ANVIL_JSON_PATH")?; - let file_content = fs::read_to_string(anvil_conf_file)?; - - let json_value: Value = from_str(&file_content)?; - - // Extract the available_accounts and private_keys fields - let available_accounts_iter = json_value["available_accounts"] - .as_array() - .expect("available_accounts should be an array") - .iter() - .map(|v| v.as_str().map(|s| s.to_string())) - .flatten(); - - let private_keys_iter = json_value["private_keys"] - .as_array() - .expect("private_keys should be an array") - .iter() - .map(|v| v.as_str().map(|s| s.to_string())) - .flatten(); - - let res = available_accounts_iter.zip(private_keys_iter).collect::>(); - Ok(res) - } - - fn read_mcr_sc_adress() -> Result { - let file_path = env::var("MCR_SC_ADDRESS_FILE")?; - let addr_str = fs::read_to_string(file_path)?; - let addr: Address = addr_str.trim().parse()?; - Ok(addr) - } - - // Do the Genesis ceremony in Rust because if node by forge script, - // it's never done from Rust call. - use alloy_primitives::Bytes; - use alloy_rpc_types::TransactionRequest; - - async fn do_genesis_ceremonial( - anvil_address: &[(String, String)], - rpc_url: &str, - ) -> Result<(), anyhow::Error> { - let mcr_address = read_mcr_sc_adress()?; - //Define Signer. Signer1 is the MCRSettelement client - let signer1: LocalWallet = anvil_address[1].1.parse()?; - let signer1_addr: Address = anvil_address[1].0.parse()?; - let signer1_rpc_provider = ProviderBuilder::new() - .with_recommended_fillers() - .signer(EthereumSigner::from(signer1)) - .on_http(rpc_url.parse()?); - let signer1_contract = MCR::new(mcr_address, &signer1_rpc_provider); - - stake_genesis( - &signer1_rpc_provider, - &signer1_contract, - mcr_address, - signer1_addr, - 55_000_000_000_000_000_000, - ) - .await?; - - let signer2: LocalWallet = anvil_address[2].1.parse()?; - let signer2_addr: Address = anvil_address[2].0.parse()?; - let signer2_rpc_provider = ProviderBuilder::new() - .with_recommended_fillers() - .signer(EthereumSigner::from(signer2)) - .on_http(rpc_url.parse()?); - let signer2_contract = MCR::new(mcr_address, &signer2_rpc_provider); - - //init staking - // Build a transaction to set the values. - stake_genesis( - &signer2_rpc_provider, - &signer2_contract, - mcr_address, - signer2_addr, - 54_000_000_000_000_000_000, - ) - .await?; - - let MCR::hasGenesisCeremonyEndedReturn { _0: has_genesis_ceremony_ended } = - signer2_contract.hasGenesisCeremonyEnded().call().await?; - let ceremony: bool = has_genesis_ceremony_ended.try_into().unwrap(); - assert!(ceremony); - Ok(()) - } - - async fn stake_genesis, T: Transport + Clone>( - provider: &P, - contract: &MCR::MCRInstance, - contract_address: Address, - signer: Address, - amount: u128, - ) -> Result<(), anyhow::Error> { - let stake_genesis_call = contract.stakeGenesis(); - let calldata = stake_genesis_call.calldata().to_owned(); - sendtx_function(provider, calldata, contract_address, signer, amount).await - } - async fn sendtx_function, T: Transport + Clone>( - provider: &P, - call_data: Bytes, - contract_address: Address, - signer: Address, - amount: u128, - ) -> Result<(), anyhow::Error> { - let eip1559_fees = provider.estimate_eip1559_fees(None).await?; - let tx = TransactionRequest::default() - .from(signer) - .to(contract_address) - .value(U256::from(amount)) - .input(call_data.into()) - .max_fee_per_gas(eip1559_fees.max_fee_per_gas) - .max_priority_fee_per_gas(eip1559_fees.max_priority_fee_per_gas); - - provider.send_transaction(tx).await?.get_receipt().await?; - Ok(()) - } + use super::*; + use alloy_provider::ProviderBuilder; + use alloy_signer_wallet::LocalWallet; + use movement_types::Commitment; + + // Define 2 validators (signer1 and signer2) with each a little more than 50% of stake. + // After genesis ceremony, 2 validator send the commitment for height 1. + // Validator2 send a commitment for height 2 to trigger next epoch and fire event. + // Wait the commitment accepted event. + //#[ignore] + #[tokio::test] + async fn test_send_commitment() -> Result<(), anyhow::Error> { + //Activate to debug the test. + // use tracing_subscriber::EnvFilter; + + // tracing_subscriber::fmt() + // .with_env_filter( + // EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + // ) + // .init(); + + // Inititalize Test variables + let rpc_port = env::var("MCR_ANVIL_PORT").unwrap(); + let rpc_url = format!("http://localhost:{rpc_port}"); + let ws_url = format!("ws://localhost:{rpc_port}"); + + let anvil_address = read_anvil_json_file_address()?; + + //Do SC ceremony init stake calls. + do_genesis_ceremonial(&anvil_address, &rpc_url).await?; + + let mcr_address = read_mcr_sc_adress()?; + //Define Signers. Ceremony define 2 signers with half stake each. + let signer1: LocalWallet = anvil_address[1].1.parse()?; + let signer1_addr = signer1.address(); + + //Build client 1 and send first commitment. + let provider_client1 = ProviderBuilder::new() + .with_recommended_fillers() + .signer(EthereumSigner::from(signer1)) + .on_http(rpc_url.parse().unwrap()); + + let config = McrEthSettlementConfig { + mrc_contract_address: mcr_address.to_string(), + gas_limit: DEFAULT_TX_GAS_LIMIT, + tx_send_nb_retry: MAX_TX_SEND_RETRY, + }; + + let client1 = McrEthSettlementClient::build_with_provider( + provider_client1, + signer1_addr, + ws_url.clone(), + config.clone(), + ) + .await + .unwrap(); + + let mut client1_stream = client1.stream_block_commitments().await.unwrap(); + + //client post a new commitment + let commitment = + BlockCommitment { height: 1, block_id: Id([2; 32]), commitment: Commitment([3; 32]) }; + + let res = client1.post_block_commitment(commitment.clone()).await; + assert!(res.is_ok()); + + //no notification quorum is not reach + let res = + tokio::time::timeout(tokio::time::Duration::from_secs(5), client1_stream.next()).await; + assert!(res.is_err()); + + //Build client 2 and send the second commitment. + let client2 = + McrEthSettlementClient::build_with_urls(&rpc_url, ws_url, &anvil_address[2].1, config) + .await + .unwrap(); + + let mut client2_stream = client2.stream_block_commitments().await.unwrap(); + + //client post a new commitment + let res = client2.post_block_commitment(commitment).await; + assert!(res.is_ok()); + + // now we move to block 2 and make some commitment just to trigger the epochRollover + let commitment2 = + BlockCommitment { height: 2, block_id: Id([4; 32]), commitment: Commitment([5; 32]) }; + + let res = client2.post_block_commitment(commitment2.clone()).await; + assert!(res.is_ok()); + + //validate that the accept commitment stream get the event. + let event = + tokio::time::timeout(tokio::time::Duration::from_secs(5), client1_stream.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + assert_eq!(event.commitment.0[0], 3); + assert_eq!(event.block_id.0[0], 2); + let event = + tokio::time::timeout(tokio::time::Duration::from_secs(5), client2_stream.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + assert_eq!(event.commitment.0[0], 3); + assert_eq!(event.block_id.0[0], 2); + + //test post batch commitment + // post the complementary batch on height 2 and one on height 3 + let commitment3 = + BlockCommitment { height: 3, block_id: Id([6; 32]), commitment: Commitment([7; 32]) }; + let res = client1.post_block_commitment_batch(vec![commitment2, commitment3]).await; + assert!(res.is_ok()); + //validate that the accept commitment stream get the event. + let event = + tokio::time::timeout(tokio::time::Duration::from_secs(5), client1_stream.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + assert_eq!(event.commitment.0[0], 5); + assert_eq!(event.block_id.0[0], 4); + let event = + tokio::time::timeout(tokio::time::Duration::from_secs(5), client2_stream.next()) + .await + .unwrap() + .unwrap() + .unwrap(); + assert_eq!(event.commitment.0[0], 5); + assert_eq!(event.block_id.0[0], 4); + + //test get_commitment_at_height + let commitment = client1.get_commitment_at_height(1).await?; + assert!(commitment.is_some()); + let commitment = commitment.unwrap(); + assert_eq!(commitment.commitment.0[0], 3); + assert_eq!(commitment.block_id.0[0], 2); + let commitment = client1.get_commitment_at_height(10).await?; + assert_eq!(commitment, None); + + Ok(()) + } + + use serde_json::{from_str, Value}; + use std::fs; + fn read_anvil_json_file_address() -> Result, anyhow::Error> { + let anvil_conf_file = env::var("ANVIL_JSON_PATH")?; + let file_content = fs::read_to_string(anvil_conf_file)?; + + let json_value: Value = from_str(&file_content)?; + + // Extract the available_accounts and private_keys fields + let available_accounts_iter = json_value["available_accounts"] + .as_array() + .expect("available_accounts should be an array") + .iter() + .map(|v| v.as_str().map(|s| s.to_string())) + .flatten(); + + let private_keys_iter = json_value["private_keys"] + .as_array() + .expect("private_keys should be an array") + .iter() + .map(|v| v.as_str().map(|s| s.to_string())) + .flatten(); + + let res = available_accounts_iter + .zip(private_keys_iter) + .collect::>(); + Ok(res) + } + + fn read_mcr_sc_adress() -> Result { + let file_path = env::var("MCR_SC_ADDRESS_FILE")?; + let addr_str = fs::read_to_string(file_path)?; + let addr: Address = addr_str.trim().parse()?; + Ok(addr) + } + + // Do the Genesis ceremony in Rust because if node by forge script, + // it's never done from Rust call. + use alloy_primitives::Bytes; + use alloy_rpc_types::TransactionRequest; + + async fn do_genesis_ceremonial( + anvil_address: &[(String, String)], + rpc_url: &str, + ) -> Result<(), anyhow::Error> { + let mcr_address = read_mcr_sc_adress()?; + //Define Signer. Signer1 is the MCRSettelement client + let signer1: LocalWallet = anvil_address[1].1.parse()?; + let signer1_addr: Address = anvil_address[1].0.parse()?; + let signer1_rpc_provider = ProviderBuilder::new() + .with_recommended_fillers() + .signer(EthereumSigner::from(signer1)) + .on_http(rpc_url.parse()?); + let signer1_contract = MCR::new(mcr_address, &signer1_rpc_provider); + + stake_genesis( + &signer1_rpc_provider, + &signer1_contract, + mcr_address, + signer1_addr, + 55_000_000_000_000_000_000, + ) + .await?; + + let signer2: LocalWallet = anvil_address[2].1.parse()?; + let signer2_addr: Address = anvil_address[2].0.parse()?; + let signer2_rpc_provider = ProviderBuilder::new() + .with_recommended_fillers() + .signer(EthereumSigner::from(signer2)) + .on_http(rpc_url.parse()?); + let signer2_contract = MCR::new(mcr_address, &signer2_rpc_provider); + + //init staking + // Build a transaction to set the values. + stake_genesis( + &signer2_rpc_provider, + &signer2_contract, + mcr_address, + signer2_addr, + 54_000_000_000_000_000_000, + ) + .await?; + + let MCR::hasGenesisCeremonyEndedReturn { _0: has_genesis_ceremony_ended } = + signer2_contract.hasGenesisCeremonyEnded().call().await?; + let ceremony: bool = has_genesis_ceremony_ended.try_into().unwrap(); + assert!(ceremony); + Ok(()) + } + + async fn stake_genesis, T: Transport + Clone>( + provider: &P, + contract: &MCR::MCRInstance, + contract_address: Address, + signer: Address, + amount: u128, + ) -> Result<(), anyhow::Error> { + let stake_genesis_call = contract.stakeGenesis(); + let calldata = stake_genesis_call.calldata().to_owned(); + sendtx_function(provider, calldata, contract_address, signer, amount).await + } + async fn sendtx_function, T: Transport + Clone>( + provider: &P, + call_data: Bytes, + contract_address: Address, + signer: Address, + amount: u128, + ) -> Result<(), anyhow::Error> { + let eip1559_fees = provider.estimate_eip1559_fees(None).await?; + let tx = TransactionRequest::default() + .from(signer) + .to(contract_address) + .value(U256::from(amount)) + .input(call_data.into()) + .max_fee_per_gas(eip1559_fees.max_fee_per_gas) + .max_priority_fee_per_gas(eip1559_fees.max_priority_fee_per_gas); + + provider.send_transaction(tx).await?.get_receipt().await?; + Ok(()) + } } diff --git a/protocol-units/settlement/mcr/client/src/lib.rs b/protocol-units/settlement/mcr/client/src/lib.rs index 558bd5a3a..8e9bce72b 100644 --- a/protocol-units/settlement/mcr/client/src/lib.rs +++ b/protocol-units/settlement/mcr/client/src/lib.rs @@ -10,26 +10,26 @@ pub mod eth_client; mod send_eth_tx; type CommitmentStream = - std::pin::Pin> + Send>>; + std::pin::Pin> + Send>>; #[async_trait::async_trait] pub trait McrSettlementClientOperations { - async fn post_block_commitment( - &self, - block_commitment: BlockCommitment, - ) -> Result<(), anyhow::Error>; + async fn post_block_commitment( + &self, + block_commitment: BlockCommitment, + ) -> Result<(), anyhow::Error>; - async fn post_block_commitment_batch( - &self, - block_commitment: Vec, - ) -> Result<(), anyhow::Error>; + async fn post_block_commitment_batch( + &self, + block_commitment: Vec, + ) -> Result<(), anyhow::Error>; - async fn stream_block_commitments(&self) -> Result; + async fn stream_block_commitments(&self) -> Result; - async fn get_commitment_at_height( - &self, - height: u64, - ) -> Result, anyhow::Error>; + async fn get_commitment_at_height( + &self, + height: u64, + ) -> Result, anyhow::Error>; - async fn get_max_tolerable_block_height(&self) -> Result; + async fn get_max_tolerable_block_height(&self) -> Result; } diff --git a/protocol-units/settlement/mcr/client/src/mock.rs b/protocol-units/settlement/mcr/client/src/mock.rs index 8ae03d545..67f79a332 100644 --- a/protocol-units/settlement/mcr/client/src/mock.rs +++ b/protocol-units/settlement/mcr/client/src/mock.rs @@ -7,260 +7,260 @@ use tokio_stream::wrappers::ReceiverStream; #[derive(Clone)] pub struct MockMcrSettlementClient { - commitments: Arc>>, - stream_sender: mpsc::Sender>, - stream_receiver: Arc>>>>, - pub current_height: Arc>, - pub block_lead_tolerance: u64, - paused_at_height: Arc>>, + commitments: Arc>>, + stream_sender: mpsc::Sender>, + stream_receiver: Arc>>>>, + pub current_height: Arc>, + pub block_lead_tolerance: u64, + paused_at_height: Arc>>, } impl MockMcrSettlementClient { - pub fn new() -> Self { - let (stream_sender, receiver) = mpsc::channel(10); - MockMcrSettlementClient { - commitments: Arc::new(RwLock::new(BTreeMap::new())), - stream_sender, - stream_receiver: Arc::new(Mutex::new(Some(receiver))), - current_height: Arc::new(RwLock::new(0)), - block_lead_tolerance: 16, - paused_at_height: Arc::new(RwLock::new(None)), - } - } + pub fn new() -> Self { + let (stream_sender, receiver) = mpsc::channel(10); + MockMcrSettlementClient { + commitments: Arc::new(RwLock::new(BTreeMap::new())), + stream_sender, + stream_receiver: Arc::new(Mutex::new(Some(receiver))), + current_height: Arc::new(RwLock::new(0)), + block_lead_tolerance: 16, + paused_at_height: Arc::new(RwLock::new(None)), + } + } - /// Overrides the commitment to settle on at given height. - /// - /// To have effect, this method needs to be called before a commitment is - /// posted for this height with the `McrSettlementClientOperations` API. - pub async fn override_block_commitment(&self, commitment: BlockCommitment) { - let mut commitments = self.commitments.write().await; - commitments.insert(commitment.height, commitment); - } + /// Overrides the commitment to settle on at given height. + /// + /// To have effect, this method needs to be called before a commitment is + /// posted for this height with the `McrSettlementClientOperations` API. + pub async fn override_block_commitment(&self, commitment: BlockCommitment) { + let mut commitments = self.commitments.write().await; + commitments.insert(commitment.height, commitment); + } - /// Stop streaming commitments after the given height. - /// - /// Any posted commitments will be accumulated. - pub async fn pause_after(&self, height: u64) { - let mut paused_at_height = self.paused_at_height.write().await; - *paused_at_height = Some(height); - } + /// Stop streaming commitments after the given height. + /// + /// Any posted commitments will be accumulated. + pub async fn pause_after(&self, height: u64) { + let mut paused_at_height = self.paused_at_height.write().await; + *paused_at_height = Some(height); + } - /// Stream any commitments that have been posted following the height - /// at which `pause` was called, and resume streaming any newly posted - /// commitments - pub async fn resume(&self) { - let resume_height = { - let mut paused_at_height = self.paused_at_height.write().await; - paused_at_height.take().expect("not paused") - }; - { - let commitments = self.commitments.read().await; - for (_, commitment) in commitments.range(resume_height + 1..) { - println!("resume sends commitment for height {}", commitment.height); - self.stream_sender.send(Ok(commitment.clone())).await.unwrap(); - } - } - } + /// Stream any commitments that have been posted following the height + /// at which `pause` was called, and resume streaming any newly posted + /// commitments + pub async fn resume(&self) { + let resume_height = { + let mut paused_at_height = self.paused_at_height.write().await; + paused_at_height.take().expect("not paused") + }; + { + let commitments = self.commitments.read().await; + for (_, commitment) in commitments.range(resume_height + 1..) { + println!("resume sends commitment for height {}", commitment.height); + self.stream_sender.send(Ok(commitment.clone())).await.unwrap(); + } + } + } } #[async_trait::async_trait] impl McrSettlementClientOperations for MockMcrSettlementClient { - async fn post_block_commitment( - &self, - block_commitment: BlockCommitment, - ) -> Result<(), anyhow::Error> { - let height = block_commitment.height; + async fn post_block_commitment( + &self, + block_commitment: BlockCommitment, + ) -> Result<(), anyhow::Error> { + let height = block_commitment.height; - let settled = { - let mut commitments = self.commitments.write().await; - commitments.entry(block_commitment.height).or_insert(block_commitment).clone() - }; - { - let paused_at_height = self.paused_at_height.read().await; - match *paused_at_height { - Some(ph) if ph < height => {} - _ => { - self.stream_sender.send(Ok(settled)).await?; - } - } - } + let settled = { + let mut commitments = self.commitments.write().await; + commitments.entry(block_commitment.height).or_insert(block_commitment).clone() + }; + { + let paused_at_height = self.paused_at_height.read().await; + match *paused_at_height { + Some(ph) if ph < height => {} + _ => { + self.stream_sender.send(Ok(settled)).await?; + } + } + } - { - let mut current_height = self.current_height.write().await; - if height > *current_height { - *current_height = height; - } - } + { + let mut current_height = self.current_height.write().await; + if height > *current_height { + *current_height = height; + } + } - Ok(()) - } + Ok(()) + } - async fn post_block_commitment_batch( - &self, - block_commitment: Vec, - ) -> Result<(), anyhow::Error> { - for commitment in block_commitment { - self.post_block_commitment(commitment).await?; - } - Ok(()) - } + async fn post_block_commitment_batch( + &self, + block_commitment: Vec, + ) -> Result<(), anyhow::Error> { + for commitment in block_commitment { + self.post_block_commitment(commitment).await?; + } + Ok(()) + } - async fn stream_block_commitments(&self) -> Result { - let receiver = self - .stream_receiver - .lock() - .unwrap() - .take() - .expect("stream_block_commitments already called"); - Ok(Box::pin(ReceiverStream::new(receiver))) - } + async fn stream_block_commitments(&self) -> Result { + let receiver = self + .stream_receiver + .lock() + .unwrap() + .take() + .expect("stream_block_commitments already called"); + Ok(Box::pin(ReceiverStream::new(receiver))) + } - async fn get_commitment_at_height( - &self, - height: u64, - ) -> Result, anyhow::Error> { - let guard = self.commitments.read().await; - Ok(guard.get(&height).cloned()) - } + async fn get_commitment_at_height( + &self, + height: u64, + ) -> Result, anyhow::Error> { + let guard = self.commitments.read().await; + Ok(guard.get(&height).cloned()) + } - async fn get_max_tolerable_block_height(&self) -> Result { - Ok(*self.current_height.read().await + self.block_lead_tolerance) - } + async fn get_max_tolerable_block_height(&self) -> Result { + Ok(*self.current_height.read().await + self.block_lead_tolerance) + } } #[cfg(test)] pub mod test { - use super::*; - use movement_types::Commitment; + use super::*; + use movement_types::Commitment; - use futures::future; - use tokio::select; - use tokio_stream::StreamExt; + use futures::future; + use tokio::select; + use tokio_stream::StreamExt; - #[tokio::test] - async fn test_post_block_commitment() -> Result<(), anyhow::Error> { - let client = MockMcrSettlementClient::new(); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment::test(), - }; - client.post_block_commitment(commitment.clone()).await.unwrap(); - let guard = client.commitments.write().await; - assert_eq!(guard.get(&1), Some(&commitment)); + #[tokio::test] + async fn test_post_block_commitment() -> Result<(), anyhow::Error> { + let client = MockMcrSettlementClient::new(); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment::test(), + }; + client.post_block_commitment(commitment.clone()).await.unwrap(); + let guard = client.commitments.write().await; + assert_eq!(guard.get(&1), Some(&commitment)); - assert_eq!(*client.current_height.read().await, 1); - assert_eq!(client.get_max_tolerable_block_height().await?, 17); + assert_eq!(*client.current_height.read().await, 1); + assert_eq!(client.get_max_tolerable_block_height().await?, 17); - Ok(()) - } + Ok(()) + } - #[tokio::test] - async fn test_post_block_commitment_batch() -> Result<(), anyhow::Error> { - let client = MockMcrSettlementClient::new(); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment::test(), - }; - let commitment2 = BlockCommitment { - height: 2, - block_id: Default::default(), - commitment: Commitment::test(), - }; - client - .post_block_commitment_batch(vec![commitment.clone(), commitment2.clone()]) - .await - .unwrap(); - let guard = client.commitments.write().await; - assert_eq!(guard.get(&1), Some(&commitment)); - assert_eq!(guard.get(&2), Some(&commitment2)); - Ok(()) - } + #[tokio::test] + async fn test_post_block_commitment_batch() -> Result<(), anyhow::Error> { + let client = MockMcrSettlementClient::new(); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment::test(), + }; + let commitment2 = BlockCommitment { + height: 2, + block_id: Default::default(), + commitment: Commitment::test(), + }; + client + .post_block_commitment_batch(vec![commitment.clone(), commitment2.clone()]) + .await + .unwrap(); + let guard = client.commitments.write().await; + assert_eq!(guard.get(&1), Some(&commitment)); + assert_eq!(guard.get(&2), Some(&commitment2)); + Ok(()) + } - #[tokio::test] - async fn test_stream_block_commitments() -> Result<(), anyhow::Error> { - let client = MockMcrSettlementClient::new(); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment::test(), - }; - client.post_block_commitment(commitment.clone()).await.unwrap(); - let mut stream = client.stream_block_commitments().await?; - assert_eq!(stream.next().await.unwrap().unwrap(), commitment); - Ok(()) - } + #[tokio::test] + async fn test_stream_block_commitments() -> Result<(), anyhow::Error> { + let client = MockMcrSettlementClient::new(); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment::test(), + }; + client.post_block_commitment(commitment.clone()).await.unwrap(); + let mut stream = client.stream_block_commitments().await?; + assert_eq!(stream.next().await.unwrap().unwrap(), commitment); + Ok(()) + } - #[tokio::test] - async fn test_override_block_commitments() -> Result<(), anyhow::Error> { - let client = MockMcrSettlementClient::new(); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment::test(), - }; - client.override_block_commitment(commitment.clone()).await; - client - .post_block_commitment(BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }) - .await - .unwrap(); - let mut stream = client.stream_block_commitments().await?; - assert_eq!(stream.next().await.expect("stream has ended")?, commitment); - Ok(()) - } + #[tokio::test] + async fn test_override_block_commitments() -> Result<(), anyhow::Error> { + let client = MockMcrSettlementClient::new(); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment::test(), + }; + client.override_block_commitment(commitment.clone()).await; + client + .post_block_commitment(BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }) + .await + .unwrap(); + let mut stream = client.stream_block_commitments().await?; + assert_eq!(stream.next().await.expect("stream has ended")?, commitment); + Ok(()) + } - #[tokio::test] - async fn test_pause() -> Result<(), anyhow::Error> { - let client = MockMcrSettlementClient::new(); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - client.pause_after(1).await; - client.post_block_commitment(commitment.clone()).await?; - let commitment2 = BlockCommitment { - height: 2, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - client.post_block_commitment(commitment2).await?; - let mut stream = client.stream_block_commitments().await?; - assert_eq!(stream.next().await.expect("stream has ended")?, commitment); - select! { - biased; - _ = stream.next() => panic!("stream should be paused"), - _ = future::ready(()) => {} - } - Ok(()) - } + #[tokio::test] + async fn test_pause() -> Result<(), anyhow::Error> { + let client = MockMcrSettlementClient::new(); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + client.pause_after(1).await; + client.post_block_commitment(commitment.clone()).await?; + let commitment2 = BlockCommitment { + height: 2, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + client.post_block_commitment(commitment2).await?; + let mut stream = client.stream_block_commitments().await?; + assert_eq!(stream.next().await.expect("stream has ended")?, commitment); + select! { + biased; + _ = stream.next() => panic!("stream should be paused"), + _ = future::ready(()) => {} + } + Ok(()) + } - #[tokio::test] - async fn test_resume() -> Result<(), anyhow::Error> { - let client = MockMcrSettlementClient::new(); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - client.pause_after(1).await; - client.post_block_commitment(commitment.clone()).await?; - let commitment2 = BlockCommitment { - height: 2, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - client.post_block_commitment(commitment2.clone()).await?; - let mut stream = client.stream_block_commitments().await?; - assert_eq!(stream.next().await.expect("stream has ended")?, commitment); - client.resume().await; - assert_eq!(stream.next().await.expect("stream has ended")?, commitment2); - Ok(()) - } + #[tokio::test] + async fn test_resume() -> Result<(), anyhow::Error> { + let client = MockMcrSettlementClient::new(); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + client.pause_after(1).await; + client.post_block_commitment(commitment.clone()).await?; + let commitment2 = BlockCommitment { + height: 2, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + client.post_block_commitment(commitment2.clone()).await?; + let mut stream = client.stream_block_commitments().await?; + assert_eq!(stream.next().await.expect("stream has ended")?, commitment); + client.resume().await; + assert_eq!(stream.next().await.expect("stream has ended")?, commitment2); + Ok(()) + } } diff --git a/protocol-units/settlement/mcr/client/src/send_eth_tx.rs b/protocol-units/settlement/mcr/client/src/send_eth_tx.rs index 95d3e0654..69bd36f22 100644 --- a/protocol-units/settlement/mcr/client/src/send_eth_tx.rs +++ b/protocol-units/settlement/mcr/client/src/send_eth_tx.rs @@ -11,17 +11,17 @@ use std::marker::PhantomData; // * a specific error must be return: return Err(McrEthConnectorError::xxx); // * the rule doesn't apply: return Ok(false) pub trait VerifyRule: Sync + Send { - fn verify(&self, error: &alloy_contract::Error) -> Result; + fn verify(&self, error: &alloy_contract::Error) -> Result; } pub struct SendTxErrorRule { - _kind: PhantomData, + _kind: PhantomData, } impl SendTxErrorRule { - pub fn new() -> Self { - SendTxErrorRule { _kind: PhantomData } - } + pub fn new() -> Self { + SendTxErrorRule { _kind: PhantomData } + } } // Define the current 2 errors managed. @@ -29,109 +29,109 @@ pub struct UnderPriced; pub struct InsufficentFunds; impl VerifyRule for SendTxErrorRule { - fn verify(&self, error: &alloy_contract::Error) -> Result { - let alloy_contract::Error::TransportError(TransportError::ErrorResp(payload)) = error - else { - return Ok(false); - }; + fn verify(&self, error: &alloy_contract::Error) -> Result { + let alloy_contract::Error::TransportError(TransportError::ErrorResp(payload)) = error + else { + return Ok(false); + }; - if payload.code == -32000 && payload.message.contains("transaction underpriced") { - Ok(true) - } else { - Ok(false) - } - } + if payload.code == -32000 && payload.message.contains("transaction underpriced") { + Ok(true) + } else { + Ok(false) + } + } } impl VerifyRule for SendTxErrorRule { - fn verify(&self, error: &alloy_contract::Error) -> Result { - let alloy_contract::Error::TransportError(TransportError::ErrorResp(payload)) = error - else { - return Ok(false); - }; + fn verify(&self, error: &alloy_contract::Error) -> Result { + let alloy_contract::Error::TransportError(TransportError::ErrorResp(payload)) = error + else { + return Ok(false); + }; - if payload.code == -32000 && payload.message.contains("insufficient funds") { - Err(McrEthConnectorError::InsufficientFunds(payload.message.clone())) - } else { - Ok(false) - } - } + if payload.code == -32000 && payload.message.contains("insufficient funds") { + Err(McrEthConnectorError::InsufficientFunds(payload.message.clone())) + } else { + Ok(false) + } + } } pub async fn send_tx< - P: Provider + Clone, - T: Transport + Clone, - D: CallDecoder + Clone, + P: Provider + Clone, + T: Transport + Clone, + D: CallDecoder + Clone, >( - base_call_builder: CallBuilder, - send_tx_error_rules: &[Box], - nb_retry: usize, - gas_limit: u128, + base_call_builder: CallBuilder, + send_tx_error_rules: &[Box], + nb_retry: usize, + gas_limit: u128, ) -> Result<(), anyhow::Error> { - //validate gaz price. - let mut estimate_gas = base_call_builder.estimate_gas().await?; - // Add 20% because initial gas estimate are too low. - estimate_gas += (estimate_gas * 20) / 100; + //validate gaz price. + let mut estimate_gas = base_call_builder.estimate_gas().await?; + // Add 20% because initial gas estimate are too low. + estimate_gas += (estimate_gas * 20) / 100; - // Sending Tx automatically can lead to errors that depend on the state for Eth. - // It's convenient to manage some of them automatically to avoid to fail commitment Tx. - // I define a first one but other should be added depending on the test with mainnet. - for _ in 0..nb_retry { - let call_builder = base_call_builder.clone().gas(estimate_gas); + // Sending Tx automatically can lead to errors that depend on the state for Eth. + // It's convenient to manage some of them automatically to avoid to fail commitment Tx. + // I define a first one but other should be added depending on the test with mainnet. + for _ in 0..nb_retry { + let call_builder = base_call_builder.clone().gas(estimate_gas); - //detect if the gas price doesn't execeed the limit. - let gas_price = call_builder.provider.get_gas_price().await?; - let tx_fee_wei = estimate_gas * gas_price; - if tx_fee_wei > gas_limit { - return Err(McrEthConnectorError::GasLimitExceed(tx_fee_wei, gas_limit).into()); - } + //detect if the gas price doesn't execeed the limit. + let gas_price = call_builder.provider.get_gas_price().await?; + let tx_fee_wei = estimate_gas * gas_price; + if tx_fee_wei > gas_limit { + return Err(McrEthConnectorError::GasLimitExceed(tx_fee_wei, gas_limit).into()); + } - //send the Tx and detect send error. - let pending_tx = match call_builder.send().await { - Ok(pending_tx) => pending_tx, - Err(err) => { - //apply defined rules. - for rule in send_tx_error_rules { - // Verify all rules. If one rule return true or an error stop verification. - // If true retry with more gas else return the error. - if rule.verify(&err)? { - //increase gas of 10% and retry - estimate_gas += (estimate_gas * 10) / 100; - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - continue; - } - } + //send the Tx and detect send error. + let pending_tx = match call_builder.send().await { + Ok(pending_tx) => pending_tx, + Err(err) => { + //apply defined rules. + for rule in send_tx_error_rules { + // Verify all rules. If one rule return true or an error stop verification. + // If true retry with more gas else return the error. + if rule.verify(&err)? { + //increase gas of 10% and retry + estimate_gas += (estimate_gas * 10) / 100; + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + continue; + } + } - return Err(McrEthConnectorError::from(err).into()); - } - }; + return Err(McrEthConnectorError::from(err).into()); + } + }; - match pending_tx.get_receipt().await { - // Tx execution fail - Ok(tx_receipt) if !tx_receipt.status() => { - tracing::debug!( - "tx_receipt.gas_used: {} / estimate_gas: {estimate_gas}", - tx_receipt.gas_used - ); - if tx_receipt.gas_used == estimate_gas { - tracing::warn!("Send commitment Tx fail because of insufficient gas, receipt:{tx_receipt:?} "); - estimate_gas += (estimate_gas * 10) / 100; - continue; - } else { - return Err(McrEthConnectorError::RpcTxExecution(format!( - "Send commitment Tx fail, abort Tx, receipt:{tx_receipt:?}" - )) - .into()); - } - } - Ok(_) => return Ok(()), - Err(err) => return Err(McrEthConnectorError::RpcTxExecution(err.to_string()).into()), - }; - } + match pending_tx.get_receipt().await { + // Tx execution fail + Ok(tx_receipt) if !tx_receipt.status() => { + tracing::debug!( + "tx_receipt.gas_used: {} / estimate_gas: {estimate_gas}", + tx_receipt.gas_used + ); + if tx_receipt.gas_used == estimate_gas { + tracing::warn!("Send commitment Tx fail because of insufficient gas, receipt:{tx_receipt:?} "); + estimate_gas += (estimate_gas * 10) / 100; + continue; + } else { + return Err(McrEthConnectorError::RpcTxExecution(format!( + "Send commitment Tx fail, abort Tx, receipt:{tx_receipt:?}" + )) + .into()); + } + } + Ok(_) => return Ok(()), + Err(err) => return Err(McrEthConnectorError::RpcTxExecution(err.to_string()).into()), + }; + } - //Max retry exceed - Err(McrEthConnectorError::RpcTxExecution( - "Send commitment Tx fail because of exceed max retry".to_string(), - ) - .into()) + //Max retry exceed + Err(McrEthConnectorError::RpcTxExecution( + "Send commitment Tx fail because of exceed max retry".to_string(), + ) + .into()) } diff --git a/protocol-units/settlement/mcr/manager/src/lib.rs b/protocol-units/settlement/mcr/manager/src/lib.rs index 738c7db72..72583be58 100644 --- a/protocol-units/settlement/mcr/manager/src/lib.rs +++ b/protocol-units/settlement/mcr/manager/src/lib.rs @@ -6,13 +6,13 @@ mod manager; pub use manager::Manager as McrSettlementManager; pub type CommitmentEventStream = - std::pin::Pin> + Send>>; + std::pin::Pin> + Send>>; #[async_trait::async_trait] pub trait McrSettlementManagerOperations { - /// Adds a block commitment to the manager queue. - async fn post_block_commitment( - &self, - block_commitment: BlockCommitment, - ) -> Result<(), anyhow::Error>; + /// Adds a block commitment to the manager queue. + async fn post_block_commitment( + &self, + block_commitment: BlockCommitment, + ) -> Result<(), anyhow::Error>; } diff --git a/protocol-units/settlement/mcr/manager/src/manager.rs b/protocol-units/settlement/mcr/manager/src/manager.rs index 19e60baa4..91d406d31 100644 --- a/protocol-units/settlement/mcr/manager/src/manager.rs +++ b/protocol-units/settlement/mcr/manager/src/manager.rs @@ -13,234 +13,234 @@ use std::mem; /// Public handle for the MCR settlement manager. pub struct Manager { - sender: mpsc::Sender, + sender: mpsc::Sender, } impl Manager { - /// Creates a new MCR settlement manager. - /// - /// Returns the handle with the public API and the stream to receive commitment events. - /// The stream needs to be polled to drive the MCR settlement client and - /// process the commitments. - pub fn new( - client: C, - ) -> (Self, CommitmentEventStream) { - let (sender, receiver) = mpsc::channel(16); - let event_stream = process_commitments(receiver, client); - (Self { sender }, event_stream) - } + /// Creates a new MCR settlement manager. + /// + /// Returns the handle with the public API and the stream to receive commitment events. + /// The stream needs to be polled to drive the MCR settlement client and + /// process the commitments. + pub fn new( + client: C, + ) -> (Self, CommitmentEventStream) { + let (sender, receiver) = mpsc::channel(16); + let event_stream = process_commitments(receiver, client); + (Self { sender }, event_stream) + } } #[async_trait] impl McrSettlementManagerOperations for Manager { - async fn post_block_commitment( - &self, - block_commitment: BlockCommitment, - ) -> Result<(), anyhow::Error> { - self.sender.send(block_commitment).await?; - Ok(()) - } + async fn post_block_commitment( + &self, + block_commitment: BlockCommitment, + ) -> Result<(), anyhow::Error> { + self.sender.send(block_commitment).await?; + Ok(()) + } } fn process_commitments( - mut receiver: mpsc::Receiver, - client: C, + mut receiver: mpsc::Receiver, + client: C, ) -> CommitmentEventStream { - // Can't mix try_stream! and select!, see https://github.com/tokio-rs/async-stream/issues/63 - Box::pin(stream! { - let mut settlement_stream = client.stream_block_commitments().await?; - let mut max_height = client.get_max_tolerable_block_height().await?; - let mut ahead_of_settlement = false; - let mut commitments_to_settle = BTreeMap::new(); - let mut batch_acc = Vec::new(); - loop { - tokio::select! { - Some(block_commitment) = receiver.recv(), if !ahead_of_settlement => { - commitments_to_settle.insert( - block_commitment.height, - block_commitment.commitment.clone(), - ); - if block_commitment.height > max_height { - ahead_of_settlement = true; - let batch = mem::replace(&mut batch_acc, Vec::new()); - if let Err(e) = client.post_block_commitment_batch(batch).await { - yield Err(e); - break; - } - } - batch_acc.push(block_commitment); - } - Some(res) = settlement_stream.next() => { - let settled_commitment = match res { - Ok(commitment) => commitment, - Err(e) => { - yield Err(e); - break; - } - }; - - let height = settled_commitment.height; - if let Some(commitment) = commitments_to_settle.remove(&height) { - let event = if commitment == settled_commitment.commitment { - BlockCommitmentEvent::Accepted(settled_commitment) - } else { - BlockCommitmentEvent::Rejected { - height, - reason: BlockCommitmentRejectionReason::InvalidCommitment, - } - }; - yield Ok(event); - } else if let Some((&lh, _)) = commitments_to_settle.last_key_value() { - if lh < height { - // Settlement has left some commitments behind, but the client could - // deliver them of order? - todo!("Handle falling behind on settlement") - } - } - // Remove back-pressure if we can proceed settling new blocks. - if ahead_of_settlement { - let new_max_height = match client.get_max_tolerable_block_height().await { - Ok(h) => h, - Err(e) => { - yield Err(e); - break; - } - }; - if new_max_height > max_height { - max_height = new_max_height; - ahead_of_settlement = false; - } - } - } - else => break - } - } - }) + // Can't mix try_stream! and select!, see https://github.com/tokio-rs/async-stream/issues/63 + Box::pin(stream! { + let mut settlement_stream = client.stream_block_commitments().await?; + let mut max_height = client.get_max_tolerable_block_height().await?; + let mut ahead_of_settlement = false; + let mut commitments_to_settle = BTreeMap::new(); + let mut batch_acc = Vec::new(); + loop { + tokio::select! { + Some(block_commitment) = receiver.recv(), if !ahead_of_settlement => { + commitments_to_settle.insert( + block_commitment.height, + block_commitment.commitment.clone(), + ); + if block_commitment.height > max_height { + ahead_of_settlement = true; + let batch = mem::replace(&mut batch_acc, Vec::new()); + if let Err(e) = client.post_block_commitment_batch(batch).await { + yield Err(e); + break; + } + } + batch_acc.push(block_commitment); + } + Some(res) = settlement_stream.next() => { + let settled_commitment = match res { + Ok(commitment) => commitment, + Err(e) => { + yield Err(e); + break; + } + }; + + let height = settled_commitment.height; + if let Some(commitment) = commitments_to_settle.remove(&height) { + let event = if commitment == settled_commitment.commitment { + BlockCommitmentEvent::Accepted(settled_commitment) + } else { + BlockCommitmentEvent::Rejected { + height, + reason: BlockCommitmentRejectionReason::InvalidCommitment, + } + }; + yield Ok(event); + } else if let Some((&lh, _)) = commitments_to_settle.last_key_value() { + if lh < height { + // Settlement has left some commitments behind, but the client could + // deliver them of order? + todo!("Handle falling behind on settlement") + } + } + // Remove back-pressure if we can proceed settling new blocks. + if ahead_of_settlement { + let new_max_height = match client.get_max_tolerable_block_height().await { + Ok(h) => h, + Err(e) => { + yield Err(e); + break; + } + }; + if new_max_height > max_height { + max_height = new_max_height; + ahead_of_settlement = false; + } + } + } + else => break + } + } + }) } #[cfg(test)] mod tests { - use super::*; - use mcr_settlement_client::mock::MockMcrSettlementClient; - use movement_types::{BlockCommitment, Commitment}; - - #[tokio::test] - async fn test_block_commitment_accepted() -> Result<(), anyhow::Error> { - let mut client = MockMcrSettlementClient::new(); - client.block_lead_tolerance = 1; - let (manager, mut event_stream) = Manager::new(client.clone()); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - manager.post_block_commitment(commitment.clone()).await?; - let commitment2 = BlockCommitment { - height: 2, - block_id: Default::default(), - commitment: Commitment([2; 32]), - }; - manager.post_block_commitment(commitment2).await?; - let item = event_stream.next().await; - let res = item.unwrap(); - let event = res.unwrap(); - assert_eq!(event, BlockCommitmentEvent::Accepted(commitment)); - Ok(()) - } - - #[tokio::test] - async fn test_block_commitment_rejected() -> Result<(), anyhow::Error> { - let mut client = MockMcrSettlementClient::new(); - client.block_lead_tolerance = 1; - let (manager, mut event_stream) = Manager::new(client.clone()); - let commitment = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - client - .override_block_commitment(BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([3; 32]), - }) - .await; - manager.post_block_commitment(commitment.clone()).await?; - let commitment2 = BlockCommitment { - height: 2, - block_id: Default::default(), - commitment: Commitment([2; 32]), - }; - manager.post_block_commitment(commitment2).await?; - let item = event_stream.next().await; - let res = item.unwrap(); - let event = res.unwrap(); - assert_eq!( - event, - BlockCommitmentEvent::Rejected { - height: 1, - reason: BlockCommitmentRejectionReason::InvalidCommitment, - } - ); - Ok(()) - } - - #[tokio::test] - async fn test_back_pressure() -> Result<(), anyhow::Error> { - let mut client = MockMcrSettlementClient::new(); - client.block_lead_tolerance = 2; - client.pause_after(2).await; - let (manager, mut event_stream) = Manager::new(client.clone()); - - let commitment1 = BlockCommitment { - height: 1, - block_id: Default::default(), - commitment: Commitment([1; 32]), - }; - manager.post_block_commitment(commitment1.clone()).await?; - let commitment2 = BlockCommitment { - height: 2, - block_id: Default::default(), - commitment: Commitment([2; 32]), - }; - manager.post_block_commitment(commitment2.clone()).await?; - let commitment3 = BlockCommitment { - height: 3, - block_id: Default::default(), - commitment: Commitment([3; 32]), - }; - manager.post_block_commitment(commitment3.clone()).await?; - - let event = event_stream.next().await.expect("stream has ended")?; - assert_eq!(event, BlockCommitmentEvent::Accepted(commitment1.clone())); - let event = event_stream.next().await.expect("stream has ended")?; - assert_eq!(event, BlockCommitmentEvent::Accepted(commitment2.clone())); - - // The batch of first two should have been posted, - // the third commitment is batched in the manager. - assert_eq!(client.get_commitment_at_height(1).await?, Some(commitment1.clone())); - assert_eq!(client.get_commitment_at_height(2).await?, Some(commitment2.clone())); - assert_eq!(client.get_commitment_at_height(3).await?, None); - - // Unblock the client, allowing processing of commitments to resume. - client.resume().await; - - let commitment4 = BlockCommitment { - height: 4, - block_id: Default::default(), - commitment: Commitment([4; 32]), - }; - manager.post_block_commitment(commitment4).await?; - let commitment5 = BlockCommitment { - height: 5, - block_id: Default::default(), - commitment: Commitment([5; 32]), - }; - manager.post_block_commitment(commitment5).await?; - - let event = event_stream.next().await.expect("stream has ended")?; - assert_eq!(event, BlockCommitmentEvent::Accepted(commitment3.clone())); - - Ok(()) - } + use super::*; + use mcr_settlement_client::mock::MockMcrSettlementClient; + use movement_types::{BlockCommitment, Commitment}; + + #[tokio::test] + async fn test_block_commitment_accepted() -> Result<(), anyhow::Error> { + let mut client = MockMcrSettlementClient::new(); + client.block_lead_tolerance = 1; + let (manager, mut event_stream) = Manager::new(client.clone()); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + manager.post_block_commitment(commitment.clone()).await?; + let commitment2 = BlockCommitment { + height: 2, + block_id: Default::default(), + commitment: Commitment([2; 32]), + }; + manager.post_block_commitment(commitment2).await?; + let item = event_stream.next().await; + let res = item.unwrap(); + let event = res.unwrap(); + assert_eq!(event, BlockCommitmentEvent::Accepted(commitment)); + Ok(()) + } + + #[tokio::test] + async fn test_block_commitment_rejected() -> Result<(), anyhow::Error> { + let mut client = MockMcrSettlementClient::new(); + client.block_lead_tolerance = 1; + let (manager, mut event_stream) = Manager::new(client.clone()); + let commitment = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + client + .override_block_commitment(BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([3; 32]), + }) + .await; + manager.post_block_commitment(commitment.clone()).await?; + let commitment2 = BlockCommitment { + height: 2, + block_id: Default::default(), + commitment: Commitment([2; 32]), + }; + manager.post_block_commitment(commitment2).await?; + let item = event_stream.next().await; + let res = item.unwrap(); + let event = res.unwrap(); + assert_eq!( + event, + BlockCommitmentEvent::Rejected { + height: 1, + reason: BlockCommitmentRejectionReason::InvalidCommitment, + } + ); + Ok(()) + } + + #[tokio::test] + async fn test_back_pressure() -> Result<(), anyhow::Error> { + let mut client = MockMcrSettlementClient::new(); + client.block_lead_tolerance = 2; + client.pause_after(2).await; + let (manager, mut event_stream) = Manager::new(client.clone()); + + let commitment1 = BlockCommitment { + height: 1, + block_id: Default::default(), + commitment: Commitment([1; 32]), + }; + manager.post_block_commitment(commitment1.clone()).await?; + let commitment2 = BlockCommitment { + height: 2, + block_id: Default::default(), + commitment: Commitment([2; 32]), + }; + manager.post_block_commitment(commitment2.clone()).await?; + let commitment3 = BlockCommitment { + height: 3, + block_id: Default::default(), + commitment: Commitment([3; 32]), + }; + manager.post_block_commitment(commitment3.clone()).await?; + + let event = event_stream.next().await.expect("stream has ended")?; + assert_eq!(event, BlockCommitmentEvent::Accepted(commitment1.clone())); + let event = event_stream.next().await.expect("stream has ended")?; + assert_eq!(event, BlockCommitmentEvent::Accepted(commitment2.clone())); + + // The batch of first two should have been posted, + // the third commitment is batched in the manager. + assert_eq!(client.get_commitment_at_height(1).await?, Some(commitment1.clone())); + assert_eq!(client.get_commitment_at_height(2).await?, Some(commitment2.clone())); + assert_eq!(client.get_commitment_at_height(3).await?, None); + + // Unblock the client, allowing processing of commitments to resume. + client.resume().await; + + let commitment4 = BlockCommitment { + height: 4, + block_id: Default::default(), + commitment: Commitment([4; 32]), + }; + manager.post_block_commitment(commitment4).await?; + let commitment5 = BlockCommitment { + height: 5, + block_id: Default::default(), + commitment: Commitment([5; 32]), + }; + manager.post_block_commitment(commitment5).await?; + + let event = event_stream.next().await.expect("stream has ended")?; + assert_eq!(event, BlockCommitmentEvent::Accepted(commitment3.clone())); + + Ok(()) + } } diff --git a/rustfmt.toml b/rustfmt.toml index c34215391..ac90e59de 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -3,22 +3,12 @@ edition = "2021" hard_tabs = true max_width = 100 use_small_heuristics = "Max" + # Imports -imports_granularity = "Crate" reorder_imports = true + # Consistency newline_style = "Unix" + # Misc chain_width = 80 -spaces_around_ranges = false -binop_separator = "Back" -reorder_impl_items = false -match_arm_leading_pipes = "Preserve" -match_arm_blocks = false -match_block_trailing_comma = true -trailing_comma = "Vertical" -trailing_semicolon = false -use_field_init_shorthand = true -# Format comments -comment_width = 100 -wrap_comments = true diff --git a/util/buildtime/buildtime-helpers/src/cargo.rs b/util/buildtime/buildtime-helpers/src/cargo.rs index 9e1c89947..30e759593 100644 --- a/util/buildtime/buildtime-helpers/src/cargo.rs +++ b/util/buildtime/buildtime-helpers/src/cargo.rs @@ -5,38 +5,39 @@ use std::str; // You will need the `serde_json` crate /// Gets the current cargo workspace root using `cargo metadata` pub fn cargo_workspace() -> Result { - let output = - Command::new("cargo").args(["metadata", "--format-version=1", "--no-deps"]).output()?; + let output = Command::new("cargo") + .args(["metadata", "--format-version=1", "--no-deps"]) + .output()?; - let metadata = str::from_utf8(&output.stdout)?; - let json: Value = serde_json::from_str(metadata)?; - let workspace_root = json["workspace_root"] - .as_str() - .ok_or(anyhow::anyhow!("Could not get workspace root from cargo metadata"))?; + let metadata = str::from_utf8(&output.stdout)?; + let json: Value = serde_json::from_str(metadata)?; + let workspace_root = json["workspace_root"] + .as_str() + .ok_or(anyhow::anyhow!("Could not get workspace root from cargo metadata"))?; - Ok(PathBuf::from(workspace_root)) + Ok(PathBuf::from(workspace_root)) } #[cfg(test)] pub mod test { - use super::*; - use std::fs; + use super::*; + use std::fs; - #[test] - fn test_cargo_workspace() -> Result<(), anyhow::Error> { - // Get the cargo workspace - let workspace = cargo_workspace()?; + #[test] + fn test_cargo_workspace() -> Result<(), anyhow::Error> { + // Get the cargo workspace + let workspace = cargo_workspace()?; - // Check that a Cargo.toml file exists in the workspace - assert_eq!(workspace.join("Cargo.toml").exists(), true); + // Check that a Cargo.toml file exists in the workspace + assert_eq!(workspace.join("Cargo.toml").exists(), true); - // Parse the toml and check that workspace.package.authors is ["Movement Labs"] - let toml = fs::read_to_string(workspace.join("Cargo.toml"))?; - let toml: toml::Value = toml::from_str(&toml)?; - let authors = toml["workspace"]["package"]["authors"].as_array(); - assert_eq!(authors, Some(&vec![toml::Value::String("Movement Labs".to_string())])); + // Parse the toml and check that workspace.package.authors is ["Movement Labs"] + let toml = fs::read_to_string(workspace.join("Cargo.toml"))?; + let toml: toml::Value = toml::from_str(&toml)?; + let authors = toml["workspace"]["package"]["authors"].as_array(); + assert_eq!(authors, Some(&vec![toml::Value::String("Movement Labs".to_string())])); - Ok(()) - } + Ok(()) + } } diff --git a/util/buildtime/buildtime-helpers/src/proto.rs b/util/buildtime/buildtime-helpers/src/proto.rs index e7b5f893e..5af8860f1 100644 --- a/util/buildtime/buildtime-helpers/src/proto.rs +++ b/util/buildtime/buildtime-helpers/src/proto.rs @@ -2,24 +2,24 @@ use crate::cargo::cargo_workspace; use std::path::PathBuf; pub fn proto() -> Result { - let workspace = cargo_workspace()?; - let proto_dir = workspace.join("proto"); - Ok(proto_dir) + let workspace = cargo_workspace()?; + let proto_dir = workspace.join("proto"); + Ok(proto_dir) } #[cfg(test)] pub mod test { - use super::*; + use super::*; - #[test] - fn test_proto() -> Result<(), anyhow::Error> { - // Get the proto directory - let proto = proto()?; + #[test] + fn test_proto() -> Result<(), anyhow::Error> { + // Get the proto directory + let proto = proto()?; - // check that it exists - assert_eq!(proto.exists(), true); + // check that it exists + assert_eq!(proto.exists(), true); - Ok(()) - } + Ok(()) + } } diff --git a/util/buildtime/buildtime-macros/src/lib.rs b/util/buildtime/buildtime-macros/src/lib.rs index 026b46b3c..6a80329ed 100644 --- a/util/buildtime/buildtime-macros/src/lib.rs +++ b/util/buildtime/buildtime-macros/src/lib.rs @@ -6,87 +6,87 @@ use syn::{parse::Parse, parse_macro_input, punctuated::Punctuated, LitStr, Resul #[proc_macro] pub fn cargo_workspace(_input: TokenStream) -> TokenStream { - let workspace = buildtime_helpers::cargo::cargo_workspace().unwrap(); - let workspace_str = workspace.to_str().unwrap(); - let code = quote! { - std::path::PathBuf::from(#workspace_str) - }; - code.into() + let workspace = buildtime_helpers::cargo::cargo_workspace().unwrap(); + let workspace_str = workspace.to_str().unwrap(); + let code = quote! { + std::path::PathBuf::from(#workspace_str) + }; + code.into() } #[proc_macro] pub fn proto(_input: TokenStream) -> TokenStream { - let proto = buildtime_helpers::proto::proto().unwrap(); - let proto_str = proto.to_str().unwrap(); - let code = quote! { - std::path::PathBuf::from(#proto_str) - }; - code.into() + let proto = buildtime_helpers::proto::proto().unwrap(); + let proto_str = proto.to_str().unwrap(); + let code = quote! { + std::path::PathBuf::from(#proto_str) + }; + code.into() } // Define a custom struct that holds a Punctuated list struct ParsablePuncuated { - list: Punctuated, + list: Punctuated, } // Implement Parse for ParsablePuncuated impl Parse for ParsablePuncuated { - fn parse(input: syn::parse::ParseStream) -> Result { - let list = Punctuated::parse_terminated(input)?; - Ok(ParsablePuncuated { list }) - } + fn parse(input: syn::parse::ParseStream) -> Result { + let list = Punctuated::parse_terminated(input)?; + Ok(ParsablePuncuated { list }) + } } #[proc_macro] pub fn proto_build_main(input: TokenStream) -> TokenStream { - // Use custom parsing struct - let ParsablePuncuated { list: inputs } = parse_macro_input!(input as ParsablePuncuated); + // Use custom parsing struct + let ParsablePuncuated { list: inputs } = parse_macro_input!(input as ParsablePuncuated); - // Assume proto_dir is provided by a runtime function and convert it to a string - let proto_dir = buildtime_helpers::proto::proto().unwrap(); - let proto_dir_str = proto_dir.to_str().unwrap(); + // Assume proto_dir is provided by a runtime function and convert it to a string + let proto_dir = buildtime_helpers::proto::proto().unwrap(); + let proto_dir_str = proto_dir.to_str().unwrap(); - // Collect input files into a Rust array expression - let proto_files: Vec<_> = inputs - .iter() - .map(|lit_str| { - let file = lit_str.value(); - // Combine proto_dir with the relative path - let full_path = PathBuf::from(proto_dir_str).join(file).display().to_string(); - quote! { #full_path } - }) - .collect(); + // Collect input files into a Rust array expression + let proto_files: Vec<_> = inputs + .iter() + .map(|lit_str| { + let file = lit_str.value(); + // Combine proto_dir with the relative path + let full_path = PathBuf::from(proto_dir_str).join(file).display().to_string(); + quote! { #full_path } + }) + .collect(); - // Generate the code - let expanded = quote! { - fn main() -> Result<(), Box> { - let proto_files = &[#(#proto_files),*]; - let proto_include_dirs = &[#proto_dir_str]; + // Generate the code + let expanded = quote! { + fn main() -> Result<(), Box> { + let proto_files = &[#(#proto_files),*]; + let proto_include_dirs = &[#proto_dir_str]; - // Set up file descriptors for reflection - let out_dir = std::path::PathBuf::from(std::env::var("OUT_DIR").unwrap()); - let crate_name = std::env::var("CARGO_PKG_NAME").unwrap(); - let proto_descriptor_filename = format!("{}-descriptor.bin", crate_name); - let descriptor_file_path = out_dir.join(proto_descriptor_filename); + // Set up file descriptors for reflection + let out_dir = std::path::PathBuf::from(std::env::var("OUT_DIR").unwrap()); + let crate_name = std::env::var("CARGO_PKG_NAME").unwrap(); + let proto_descriptor_filename = format!("{}-descriptor.bin", crate_name); + let descriptor_file_path = out_dir.join(proto_descriptor_filename); - // Check if specific features are enabled and default to enabling both if neither is enabled - let client_enabled = cfg!(feature = "client"); - let server_enabled = cfg!(feature = "server"); + // Check if specific features are enabled and default to enabling both if neither is enabled + let client_enabled = cfg!(feature = "client"); + let server_enabled = cfg!(feature = "server"); - let mut config = tonic_build::configure() - .file_descriptor_set_path(descriptor_file_path) - .include_file("all.rs") - .build_client(client_enabled) - .build_server(server_enabled); + let mut config = tonic_build::configure() + .file_descriptor_set_path(descriptor_file_path) + .include_file("all.rs") + .build_client(client_enabled) + .build_server(server_enabled); - // Compile the proto files based on the configuration - config.compile(proto_files, proto_include_dirs)?; + // Compile the proto files based on the configuration + config.compile(proto_files, proto_include_dirs)?; - Ok(()) - } - }; + Ok(()) + } + }; - // Convert the generated code back into a token stream - TokenStream::from(expanded) + // Convert the generated code back into a token stream + TokenStream::from(expanded) } diff --git a/util/buildtime/src/macro_tests.rs b/util/buildtime/src/macro_tests.rs index cee1f6e48..0c223841d 100644 --- a/util/buildtime/src/macro_tests.rs +++ b/util/buildtime/src/macro_tests.rs @@ -1,19 +1,19 @@ #[test] pub fn test_cargo_workspace() -> Result<(), anyhow::Error> { - let macro_result = buildtime_macros::cargo_workspace!(); - let runtime_result = buildtime_helpers::cargo::cargo_workspace()?; + let macro_result = buildtime_macros::cargo_workspace!(); + let runtime_result = buildtime_helpers::cargo::cargo_workspace()?; - assert_eq!(macro_result, runtime_result); + assert_eq!(macro_result, runtime_result); - Ok(()) + Ok(()) } #[test] pub fn test_proto() -> Result<(), anyhow::Error> { - let macro_result = buildtime_macros::proto!(); - let runtime_result = buildtime_helpers::proto::proto()?; + let macro_result = buildtime_macros::proto!(); + let runtime_result = buildtime_helpers::proto::proto()?; - assert_eq!(macro_result, runtime_result); + assert_eq!(macro_result, runtime_result); - Ok(()) + Ok(()) } diff --git a/util/movement-types/src/lib.rs b/util/movement-types/src/lib.rs index d4a44b24b..b418adccc 100644 --- a/util/movement-types/src/lib.rs +++ b/util/movement-types/src/lib.rs @@ -9,182 +9,182 @@ use core::fmt; pub struct Id(pub [u8; 32]); impl Id { - pub fn test() -> Self { - Self([0; 32]) - } + pub fn test() -> Self { + Self([0; 32]) + } - pub fn to_vec(&self) -> Vec { - self.0.into() - } + pub fn to_vec(&self) -> Vec { + self.0.into() + } - pub fn genesis_block() -> Self { - Self([0; 32]) - } + pub fn genesis_block() -> Self { + Self([0; 32]) + } } impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", &self.0) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", &self.0) + } } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Transaction(pub Vec); impl From> for Transaction { - fn from(data: Vec) -> Self { - Self(data) - } + fn from(data: Vec) -> Self { + Self(data) + } } impl Transaction { - pub fn new(data: Vec) -> Self { - Self(data) - } + pub fn new(data: Vec) -> Self { + Self(data) + } - pub fn id(&self) -> Id { - let mut hasher = sha2::Sha256::new(); - hasher.update(&self.0); - Id(hasher.finalize().into()) - } + pub fn id(&self) -> Id { + let mut hasher = sha2::Sha256::new(); + hasher.update(&self.0); + Id(hasher.finalize().into()) + } - pub fn test() -> Self { - Self(vec![0]) - } + pub fn test() -> Self { + Self(vec![0]) + } } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct TransactionEntry { - pub consumer_id: Id, - pub data: Transaction, + pub consumer_id: Id, + pub data: Transaction, } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct AtomicTransactionBundle { - pub sequencer_id: Id, - pub transactions: Vec, + pub sequencer_id: Id, + pub transactions: Vec, } impl TryFrom for Transaction { - type Error = anyhow::Error; + type Error = anyhow::Error; - fn try_from(value: AtomicTransactionBundle) -> Result { - if value.transactions.len() == 1 { - Ok(value.transactions[0].data.clone()) - } else { - Err(anyhow::anyhow!("AtomicTransactionBundle must contain exactly one transaction")) - } - } + fn try_from(value: AtomicTransactionBundle) -> Result { + if value.transactions.len() == 1 { + Ok(value.transactions[0].data.clone()) + } else { + Err(anyhow::anyhow!("AtomicTransactionBundle must contain exactly one transaction")) + } + } } impl From for AtomicTransactionBundle { - fn from(transaction: Transaction) -> Self { - Self { - sequencer_id: Id::default(), - transactions: vec![TransactionEntry { consumer_id: Id::default(), data: transaction }], - } - } + fn from(transaction: Transaction) -> Self { + Self { + sequencer_id: Id::default(), + transactions: vec![TransactionEntry { consumer_id: Id::default(), data: transaction }], + } + } } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum BlockMetadata { - #[default] - BlockMetadata, + #[default] + BlockMetadata, } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Block { - pub metadata: BlockMetadata, - pub parent: Vec, - pub transactions: Vec, + pub metadata: BlockMetadata, + pub parent: Vec, + pub transactions: Vec, } impl Block { - pub fn new(metadata: BlockMetadata, parent: Vec, transactions: Vec) -> Self { - Self { metadata, parent, transactions } - } - - pub fn id(&self) -> Id { - let mut hasher = sha2::Sha256::new(); - hasher.update(&self.parent); - for transaction in &self.transactions { - hasher.update(&transaction.0); - } - Id(hasher.finalize().into()) - } - - pub fn test() -> Self { - Self { - metadata: BlockMetadata::BlockMetadata, - parent: vec![0], - transactions: vec![Transaction::test()], - } - } - - pub fn add_transaction(&mut self, transaction: Transaction) { - self.transactions.push(transaction); - } + pub fn new(metadata: BlockMetadata, parent: Vec, transactions: Vec) -> Self { + Self { metadata, parent, transactions } + } + + pub fn id(&self) -> Id { + let mut hasher = sha2::Sha256::new(); + hasher.update(&self.parent); + for transaction in &self.transactions { + hasher.update(&transaction.0); + } + Id(hasher.finalize().into()) + } + + pub fn test() -> Self { + Self { + metadata: BlockMetadata::BlockMetadata, + parent: vec![0], + transactions: vec![Transaction::test()], + } + } + + pub fn add_transaction(&mut self, transaction: Transaction) { + self.transactions.push(transaction); + } } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct Commitment(pub [u8; 32]); impl Commitment { - pub fn test() -> Self { - Self([0; 32]) - } + pub fn test() -> Self { + Self([0; 32]) + } - /// Creates a commitment by making a cryptographic digest of the state proof. - pub fn digest_state_proof(state_proof: &StateProof) -> Self { - let mut hasher = sha2::Sha256::new(); - bcs::serialize_into(&mut hasher, &state_proof).expect("unexpected serialization error"); - Self(hasher.finalize().into()) - } + /// Creates a commitment by making a cryptographic digest of the state proof. + pub fn digest_state_proof(state_proof: &StateProof) -> Self { + let mut hasher = sha2::Sha256::new(); + bcs::serialize_into(&mut hasher, &state_proof).expect("unexpected serialization error"); + Self(hasher.finalize().into()) + } } impl TryFrom> for Commitment { - type Error = std::array::TryFromSliceError; + type Error = std::array::TryFromSliceError; - fn try_from(data: Vec) -> Result { - Ok(Self(data[..32].try_into()?)) - } + fn try_from(data: Vec) -> Result { + Ok(Self(data[..32].try_into()?)) + } } impl From<[u8; 32]> for Commitment { - fn from(data: [u8; 32]) -> Self { - Self(data) - } + fn from(data: [u8; 32]) -> Self { + Self(data) + } } impl From for [u8; 32] { - fn from(commitment: Commitment) -> [u8; 32] { - commitment.0 - } + fn from(commitment: Commitment) -> [u8; 32] { + commitment.0 + } } impl From for Vec { - fn from(commitment: Commitment) -> Vec { - commitment.0.into() - } + fn from(commitment: Commitment) -> Vec { + commitment.0.into() + } } #[derive(Serialize, Deserialize, Clone, Default, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct BlockCommitment { - pub height: u64, - pub block_id: Id, - pub commitment: Commitment, + pub height: u64, + pub block_id: Id, + pub commitment: Commitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum BlockCommitmentRejectionReason { - InvalidBlockId, - InvalidCommitment, - InvalidHeight, - ContractError, + InvalidBlockId, + InvalidCommitment, + InvalidHeight, + ContractError, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum BlockCommitmentEvent { - Accepted(BlockCommitment), - Rejected { height: u64, reason: BlockCommitmentRejectionReason }, + Accepted(BlockCommitment), + Rejected { height: u64, reason: BlockCommitmentRejectionReason }, }