diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bbd8091..803f7156 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). ## Fixed +- Fixes all unwraps() in code to improve error logging - Simplified Update_Job for Database. - Simplified otel setup. - Added new_with_settings to SharpClient. diff --git a/Cargo.lock b/Cargo.lock index 91867e97..9466c0f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4527,6 +4527,7 @@ dependencies = [ "serde", "settlement-client-interface", "starknet-os", + "thiserror", "tokio", "tokio-test", "tracing", @@ -9672,7 +9673,6 @@ dependencies = [ name = "starknet-settlement-client" version = "0.1.0" dependencies = [ - "anyhow", "appchain-core-contract-client", "async-std", "async-trait", diff --git a/crates/orchestrator/src/config.rs b/crates/orchestrator/src/config.rs index a64e4027..927bf243 100644 --- a/crates/orchestrator/src/config.rs +++ b/crates/orchestrator/src/config.rs @@ -90,7 +90,7 @@ pub async fn get_aws_config(settings_provider: &impl Settings) -> SdkConfig { } /// Initializes the app config -pub async fn init_config() -> Arc { +pub async fn init_config() -> color_eyre::Result> { dotenv().ok(); let settings_provider = EnvSettingsProvider {}; @@ -104,7 +104,7 @@ pub async fn init_config() -> Arc { // init database let database = build_database_client(&settings_provider).await; let da_client = build_da_client(&settings_provider).await; - let settlement_client = build_settlement_client(&settings_provider).await; + let settlement_client = build_settlement_client(&settings_provider).await?; let prover_client = build_prover_service(&settings_provider); let storage_client = build_storage_client(&settings_provider, provider_config.clone()).await; let alerts_client = build_alert_client(&settings_provider, provider_config.clone()).await; @@ -115,7 +115,7 @@ pub async fn init_config() -> Arc { // us stop using the generic omniqueue abstractions for message ack/nack let queue = build_queue_client(); - Arc::new(Config::new( + Ok(Arc::new(Config::new( rpc_url, snos_url, Arc::new(provider), @@ -126,7 +126,7 @@ pub async fn init_config() -> Arc { queue, storage_client, alerts_client, - )) + ))) } impl Config { @@ -230,24 +230,26 @@ pub fn build_prover_service(settings_provider: &impl Settings) -> Box Box { +pub async fn build_settlement_client( + settings_provider: &impl Settings, +) -> color_eyre::Result> { match get_env_var_or_panic("SETTLEMENT_LAYER").as_str() { "ethereum" => { #[cfg(not(feature = "testing"))] { - Box::new(EthereumSettlementClient::new_with_settings(settings_provider)) + Ok(Box::new(EthereumSettlementClient::new_with_settings(settings_provider))) } #[cfg(feature = "testing")] { - Box::new(EthereumSettlementClient::with_test_settings( - RootProvider::new_http(get_env_var_or_panic("SETTLEMENT_RPC_URL").as_str().parse().unwrap()), - Address::from_str(&get_env_var_or_panic("L1_CORE_CONTRACT_ADDRESS")).unwrap(), - Url::from_str(get_env_var_or_panic("SETTLEMENT_RPC_URL").as_str()).unwrap(), - Some(Address::from_str(get_env_var_or_panic("STARKNET_OPERATOR_ADDRESS").as_str()).unwrap()), - )) + Ok(Box::new(EthereumSettlementClient::with_test_settings( + RootProvider::new_http(get_env_var_or_panic("SETTLEMENT_RPC_URL").as_str().parse()?), + Address::from_str(&get_env_var_or_panic("L1_CORE_CONTRACT_ADDRESS"))?, + Url::from_str(get_env_var_or_panic("SETTLEMENT_RPC_URL").as_str())?, + Some(Address::from_str(get_env_var_or_panic("STARKNET_OPERATOR_ADDRESS").as_str())?), + ))) } } - "starknet" => Box::new(StarknetSettlementClient::new_with_settings(settings_provider).await), + "starknet" => Ok(Box::new(StarknetSettlementClient::new_with_settings(settings_provider).await)), _ => panic!("Unsupported Settlement layer"), } } diff --git a/crates/orchestrator/src/jobs/conversion.rs b/crates/orchestrator/src/jobs/conversion.rs new file mode 100644 index 00000000..63a4a723 --- /dev/null +++ b/crates/orchestrator/src/jobs/conversion.rs @@ -0,0 +1,10 @@ +use std::str::FromStr; + +use crate::jobs::{JobError, OtherError}; + +pub fn parse_string(value: &str) -> Result +where + T::Err: std::fmt::Display, +{ + value.parse::().map_err(|e| JobError::Other(OtherError::from(format!("Could not parse string: {e}")))) +} diff --git a/crates/orchestrator/src/jobs/da_job/mod.rs b/crates/orchestrator/src/jobs/da_job/mod.rs index cc36a114..2c51ace8 100644 --- a/crates/orchestrator/src/jobs/da_job/mod.rs +++ b/crates/orchestrator/src/jobs/da_job/mod.rs @@ -31,13 +31,13 @@ lazy_static! { pub static ref GENERATOR: BigUint = BigUint::from_str( "39033254847818212395286706435128746857159659164139250548781411570340225835782", ) - .unwrap(); + .expect("Failed to convert to biguint"); pub static ref BLS_MODULUS: BigUint = BigUint::from_str( "52435875175126190479447740508185965837690552500527637822603658699938581184513", ) - .unwrap(); - pub static ref TWO: BigUint = 2u32.to_biguint().unwrap(); + .expect("Failed to convert to biguint"); + pub static ref TWO: BigUint = 2u32.to_biguint().expect("Failed to convert to biguint"); pub static ref BLOB_LEN: usize = 4096; } diff --git a/crates/orchestrator/src/jobs/mod.rs b/crates/orchestrator/src/jobs/mod.rs index 20565f94..52d53b96 100644 --- a/crates/orchestrator/src/jobs/mod.rs +++ b/crates/orchestrator/src/jobs/mod.rs @@ -5,6 +5,7 @@ use std::time::Duration; use async_trait::async_trait; use color_eyre::eyre::{eyre, Context}; +use conversion::parse_string; use da_job::DaError; use mockall::automock; use mockall_double::double; @@ -25,6 +26,7 @@ use crate::metrics::ORCHESTRATOR_METRICS; use crate::queue::job_queue::{add_job_to_process_queue, add_job_to_verification_queue, ConsumptionError}; pub mod constants; +pub mod conversion; pub mod da_job; pub mod job_handler_factory; pub mod proving_job; @@ -173,7 +175,7 @@ pub async fn create_job( KeyValue::new("job", format!("{:?}", job_item)), ]; - ORCHESTRATOR_METRICS.block_gauge.record(internal_id.parse::().unwrap(), &attributes); + ORCHESTRATOR_METRICS.block_gauge.record(parse_string(&internal_id)?, &attributes); tracing::info!(log_type = "completed", category = "general", function_type = "create_job", block_no = %internal_id, "General create job completed for block"); Ok(()) } @@ -260,8 +262,7 @@ pub async fn process_job(id: Uuid, config: Arc) -> Result<(), JobError> KeyValue::new("job", format!("{:?}", job)), ]; - ORCHESTRATOR_METRICS.block_gauge.record(job.internal_id.parse::().unwrap(), &attributes); - + ORCHESTRATOR_METRICS.block_gauge.record(parse_string(&job.internal_id)?, &attributes); tracing::info!(log_type = "completed", category = "general", function_type = "process_job", block_no = %internal_id, "General process job completed for block"); Ok(()) } @@ -394,8 +395,7 @@ pub async fn verify_job(id: Uuid, config: Arc) -> Result<(), JobError> { KeyValue::new("job", format!("{:?}", job)), ]; - ORCHESTRATOR_METRICS.block_gauge.record(job.internal_id.parse::().unwrap(), &attributes); - + ORCHESTRATOR_METRICS.block_gauge.record(parse_string(&job.internal_id)?, &attributes); tracing::info!(log_type = "completed", category = "general", function_type = "verify_job", block_no = %internal_id, "General verify job completed for block"); Ok(()) } @@ -460,7 +460,10 @@ pub fn increment_key_in_metadata( let attempt = get_u64_from_metadata(metadata, key).map_err(|e| JobError::Other(OtherError(e)))?; let incremented_value = attempt.checked_add(1); incremented_value.ok_or_else(|| JobError::KeyOutOfBounds { key: key.to_string() })?; - new_metadata.insert(key.to_string(), incremented_value.unwrap().to_string()); + new_metadata.insert( + key.to_string(), + incremented_value.ok_or(JobError::Other(OtherError(eyre!("Overflow while incrementing attempt"))))?.to_string(), + ); Ok(new_metadata) } diff --git a/crates/orchestrator/src/jobs/snos_job/fact_topology.rs b/crates/orchestrator/src/jobs/snos_job/fact_topology.rs index 0128db74..e56a9d8c 100644 --- a/crates/orchestrator/src/jobs/snos_job/fact_topology.rs +++ b/crates/orchestrator/src/jobs/snos_job/fact_topology.rs @@ -104,7 +104,7 @@ pub fn get_page_sizes(pages: &HashMap, output_size: usi tracing::trace!("FactTopology Processing non-first page"); ensure!( Some(page_start) == expected_page_start, - FactError::OutputPagesUnexpectedStart(page_id, page_start, expected_page_start.unwrap_or_default(),) + FactError::OutputPagesUnexpectedStart(page_id, page_start, expected_page_start.unwrap_or_default(),) /* The unwrap here is fine as the assert is exactly for this reason */ ); } @@ -123,7 +123,7 @@ pub fn get_page_sizes(pages: &HashMap, output_size: usi ensure!( pages.is_empty() || expected_page_start == Some(output_size), - FactError::OutputPagesUncoveredOutput(expected_page_start.unwrap_or_default(), output_size) + FactError::OutputPagesUncoveredOutput(expected_page_start.unwrap_or_default(), output_size) /* The unwrap here is fine as the assert is exactly for this reason */ ); tracing::debug!( diff --git a/crates/orchestrator/src/jobs/state_update_job/mod.rs b/crates/orchestrator/src/jobs/state_update_job/mod.rs index be659d47..9d288c52 100644 --- a/crates/orchestrator/src/jobs/state_update_job/mod.rs +++ b/crates/orchestrator/src/jobs/state_update_job/mod.rs @@ -171,7 +171,11 @@ impl Job for StateUpdateJob { // number : 0 let metadata_tx_hashes = job .metadata - .get(&format!("{}{}", JOB_METADATA_STATE_UPDATE_ATTEMPT_PREFIX, attempt_no.parse::().unwrap() - 1)) + .get(&format!( + "{}{}", + JOB_METADATA_STATE_UPDATE_ATTEMPT_PREFIX, + attempt_no.parse::().map_err(|e| JobError::Other(OtherError(eyre!(e))))? - 1 + )) .ok_or_else(|| StateUpdateError::TxnHashMetadataNotFound)? .clone() .replace(' ', ""); diff --git a/crates/orchestrator/src/jobs/state_update_job/utils.rs b/crates/orchestrator/src/jobs/state_update_job/utils.rs index af43870b..ab8bb7fb 100644 --- a/crates/orchestrator/src/jobs/state_update_job/utils.rs +++ b/crates/orchestrator/src/jobs/state_update_job/utils.rs @@ -23,7 +23,7 @@ pub async fn fetch_program_data_for_block(block_number: u64, config: Arc let storage_client = config.storage(); let key = block_number.to_string() + "/" + PROGRAM_OUTPUT_FILE_NAME; let blob_data = storage_client.get_data(&key).await?; - let transformed_blob_vec_u8 = bytes_to_vec_u8(blob_data.as_ref()); + let transformed_blob_vec_u8 = bytes_to_vec_u8(blob_data.as_ref())?; Ok(transformed_blob_vec_u8) } @@ -48,7 +48,7 @@ pub fn hex_string_to_u8_vec(hex_str: &str) -> color_eyre::Result> { Ok(result) } -pub fn bytes_to_vec_u8(bytes: &[u8]) -> Vec<[u8; 32]> { +pub fn bytes_to_vec_u8(bytes: &[u8]) -> color_eyre::Result> { let cursor = Cursor::new(bytes); let reader = std::io::BufReader::new(cursor); @@ -62,11 +62,13 @@ pub fn bytes_to_vec_u8(bytes: &[u8]) -> Vec<[u8; 32]> { let result = U256::from_str(trimmed).expect("Unable to convert line"); let res_vec = result.to_be_bytes_vec(); let hex = to_padded_hex(res_vec.as_slice()); - let vec_hex = hex_string_to_u8_vec(&hex).unwrap(); - program_output.push(vec_hex.try_into().unwrap()); + let vec_hex = hex_string_to_u8_vec(&hex) + .map_err(|e| eyre!(format!("Failed converting hex string to Vec, {:?}", e)))?; + program_output + .push(vec_hex.try_into().map_err(|e| eyre!(format!("Failed to convert Vec to [u8; 32] : {:?}", e)))?); } - program_output + Ok(program_output) } fn to_padded_hex(slice: &[u8]) -> String { diff --git a/crates/orchestrator/src/main.rs b/crates/orchestrator/src/main.rs index 92b697bb..7298209d 100644 --- a/crates/orchestrator/src/main.rs +++ b/crates/orchestrator/src/main.rs @@ -16,8 +16,10 @@ async fn main() { let meter_provider = setup_analytics(); tracing::info!(service = "orchestrator", "Starting orchestrator service"); + color_eyre::install().expect("Unable to isntall color_eyre"); + // initial config setup - let config = init_config().await; + let config = init_config().await.expect("Config instantiation failed"); tracing::debug!(service = "orchestrator", "Configuration initialized"); let host = get_env_var_or_default("HOST", "127.0.0.1"); diff --git a/crates/orchestrator/src/tests/config.rs b/crates/orchestrator/src/tests/config.rs index d565c2da..8fe11648 100644 --- a/crates/orchestrator/src/tests/config.rs +++ b/crates/orchestrator/src/tests/config.rs @@ -285,7 +285,9 @@ pub mod implement_client { ) -> Box { match service { ConfigType::Mock(client) => client.into(), - ConfigType::Actual => build_settlement_client(settings_provider).await, + ConfigType::Actual => { + build_settlement_client(settings_provider).await.expect("Failed to initialise settlement_client") + } ConfigType::Dummy => Box::new(MockSettlementClient::new()), } } diff --git a/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs b/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs index 1783779c..9a74e35f 100644 --- a/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs +++ b/crates/orchestrator/src/tests/jobs/state_update_job/mod.rs @@ -30,7 +30,7 @@ use crate::tests::common::default_job_item; use crate::tests::config::TestConfigBuilder; lazy_static! { - pub static ref CURRENT_PATH: PathBuf = std::env::current_dir().unwrap(); + pub static ref CURRENT_PATH: PathBuf = std::env::current_dir().expect("Failed to get Current Path"); } pub const X_0_FILE_NAME: &str = "x_0.txt"; diff --git a/crates/orchestrator/src/workers/data_submission_worker.rs b/crates/orchestrator/src/workers/data_submission_worker.rs index d1f0f937..bdba3458 100644 --- a/crates/orchestrator/src/workers/data_submission_worker.rs +++ b/crates/orchestrator/src/workers/data_submission_worker.rs @@ -24,8 +24,7 @@ impl Worker for DataSubmissionWorker { let latest_proven_job_id = config .database() .get_latest_job_by_type_and_status(JobType::ProofCreation, JobStatus::Completed) - .await - .unwrap() + .await? .map(|item| item.internal_id) .unwrap_or("0".to_string()); @@ -35,8 +34,7 @@ impl Worker for DataSubmissionWorker { let latest_data_submission_job_id = config .database() .get_latest_job_by_type(JobType::DataSubmission) - .await - .unwrap() + .await? .map(|item| item.internal_id) .unwrap_or("0".to_string()); diff --git a/crates/orchestrator/src/workers/snos.rs b/crates/orchestrator/src/workers/snos.rs index 186c0aad..4673b459 100644 --- a/crates/orchestrator/src/workers/snos.rs +++ b/crates/orchestrator/src/workers/snos.rs @@ -27,8 +27,7 @@ impl Worker for SnosWorker { let latest_block_processed_data = config .database() .get_latest_job_by_type_and_status(JobType::SnosRun, JobStatus::Completed) - .await - .unwrap() + .await? .map(|item| item.internal_id) .unwrap_or("0".to_string()); tracing::debug!(latest_processed_block = %latest_block_processed_data, "Fetched latest processed block from database"); @@ -39,8 +38,7 @@ impl Worker for SnosWorker { let job_in_db = config .database() .get_job_by_internal_id_and_type(&latest_block_number.to_string(), &JobType::SnosRun) - .await - .unwrap(); + .await?; if job_in_db.is_some() { tracing::trace!(block_number = %latest_block_number, "SNOS job already exists for the latest block"); diff --git a/crates/prover-services/sharp-service/src/client.rs b/crates/prover-services/sharp-service/src/client.rs index 505d2800..c652e23d 100644 --- a/crates/prover-services/sharp-service/src/client.rs +++ b/crates/prover-services/sharp-service/src/client.rs @@ -31,9 +31,15 @@ impl SharpClient { pub fn new_with_settings(url: Url, settings: &impl Settings) -> Self { // Getting the cert files from the .env and then decoding it from base64 - let cert = general_purpose::STANDARD.decode(settings.get_settings_or_panic("SHARP_USER_CRT")).unwrap(); - let key = general_purpose::STANDARD.decode(settings.get_settings_or_panic("SHARP_USER_KEY")).unwrap(); - let server_cert = general_purpose::STANDARD.decode(settings.get_settings_or_panic("SHARP_SERVER_CRT")).unwrap(); + let cert = general_purpose::STANDARD + .decode(settings.get_settings_or_panic("SHARP_USER_CRT")) + .expect("Failed to decode certificate"); + let key = general_purpose::STANDARD + .decode(settings.get_settings_or_panic("SHARP_USER_KEY")) + .expect("Failed to decode sharp user key"); + let server_cert = general_purpose::STANDARD + .decode(settings.get_settings_or_panic("SHARP_SERVER_CRT")) + .expect("Failed to decode sharp server certificate"); // Adding Customer ID to the url @@ -44,10 +50,15 @@ impl SharpClient { Self { base_url: url_mut, client: ClientBuilder::new() - .identity(Identity::from_pkcs8_pem(&cert, &key).unwrap()) - .add_root_certificate(Certificate::from_pem(server_cert.as_slice()).unwrap()) + .identity( + Identity::from_pkcs8_pem(&cert, &key) + .expect("Failed to build the identity from certificate and key"), + ) + .add_root_certificate( + Certificate::from_pem(server_cert.as_slice()).expect("Failed to add root certificate"), + ) .build() - .unwrap(), + .expect("Failed to build the reqwest client with provided configs"), } } diff --git a/crates/prover-services/sharp-service/src/config.rs b/crates/prover-services/sharp-service/src/config.rs index 475dee2d..b4e2cc8d 100644 --- a/crates/prover-services/sharp-service/src/config.rs +++ b/crates/prover-services/sharp-service/src/config.rs @@ -17,9 +17,18 @@ pub struct SharpConfig { impl SharpConfig { pub fn new_with_settings(settings: &impl Settings) -> color_eyre::Result { Ok(Self { - service_url: settings.get_settings_or_panic("SHARP_URL").parse().unwrap(), - rpc_node_url: settings.get_settings_or_panic("SETTLEMENT_RPC_URL").parse().unwrap(), - verifier_address: settings.get_settings_or_panic("GPS_VERIFIER_CONTRACT_ADDRESS").parse().unwrap(), + service_url: settings + .get_settings_or_panic("SHARP_URL") + .parse() + .expect("Failed to parse to service url for SharpConfig"), + rpc_node_url: settings + .get_settings_or_panic("SETTLEMENT_RPC_URL") + .parse() + .expect("Failed to parse to rpc_node_url for SharpConfig"), + verifier_address: settings + .get_settings_or_panic("GPS_VERIFIER_CONTRACT_ADDRESS") + .parse() + .expect("Failed to parse to verifier_address for SharpConfig"), }) } } diff --git a/crates/prover-services/sharp-service/src/lib.rs b/crates/prover-services/sharp-service/src/lib.rs index f97a7af5..6cdb8cbb 100644 --- a/crates/prover-services/sharp-service/src/lib.rs +++ b/crates/prover-services/sharp-service/src/lib.rs @@ -66,6 +66,9 @@ impl ProverClient for SharpProverService { // TODO : We would need to remove the FAILED, UNKNOWN, NOT_CREATED status as it is not in the sharp client // response specs : https://docs.google.com/document/d/1-9ggQoYmjqAtLBGNNR2Z5eLreBmlckGYjbVl0khtpU0 // We are waiting for the official public API spec before making changes + + // The `unwrap_or_default`s below is safe as it is just returning if any error logs + // present CairoJobStatus::FAILED => { tracing::error!( log_type = "failed", @@ -142,8 +145,10 @@ impl SharpProverService { pub fn with_test_settings(settings: &impl Settings, port: u16) -> Self { let sharp_config = SharpConfig::new_with_settings(settings) .expect("Not able to create SharpProverService from given settings."); - let sharp_client = - SharpClient::new_with_settings(format!("http://127.0.0.1:{}", port).parse().unwrap(), settings); + let sharp_client = SharpClient::new_with_settings( + format!("http://127.0.0.1:{}", port).parse().expect("Failed to create sharp client with the given params"), + settings, + ); let fact_checker = FactChecker::new(sharp_config.rpc_node_url, sharp_config.verifier_address); Self::new(sharp_client, fact_checker) } diff --git a/crates/settlement-clients/ethereum/Cargo.toml b/crates/settlement-clients/ethereum/Cargo.toml index 230664f9..a458bb1d 100644 --- a/crates/settlement-clients/ethereum/Cargo.toml +++ b/crates/settlement-clients/ethereum/Cargo.toml @@ -35,6 +35,7 @@ opentelemetry-otlp = { workspace = true, features = [ ] } opentelemetry-semantic-conventions = { workspace = true } opentelemetry_sdk = { workspace = true, features = ["rt-tokio", "logs"] } +thiserror = { workspace = true } tracing = { workspace = true } tracing-core = { workspace = true, default-features = false } tracing-opentelemetry = "0.26.0" diff --git a/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs b/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs index 3cdf7ab0..81826413 100644 --- a/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs +++ b/crates/settlement-clients/ethereum/src/clients/interfaces/validity_interface.rs @@ -58,6 +58,14 @@ sol! { } } +#[derive(Debug, thiserror::Error)] +pub enum StarknetValidityContractError { + #[error("RPC error: {0}")] + RpcError(#[from] RpcError), + #[error("Failed to estimate gas: {0}")] + EstimateGasError(#[from] alloy::contract::Error), +} + #[async_trait] pub trait StarknetValidityContractTrait { /// Retrieves the last block number settled @@ -69,13 +77,13 @@ pub trait StarknetValidityContractTrait { program_output: Vec, onchain_data_hash: U256, onchain_data_size: U256, - ) -> Result>; + ) -> Result; async fn update_state_kzg( &self, program_output: Vec, kzg_proof: [u8; 48], - ) -> Result>; + ) -> Result; } #[async_trait] @@ -99,25 +107,33 @@ where program_output: Vec, onchain_data_hash: U256, onchain_data_size: U256, - ) -> Result> { - let base_fee = self.as_ref().provider().as_ref().get_gas_price().await.unwrap(); - let from_address = self.as_ref().provider().as_ref().get_accounts().await.unwrap()[0]; + ) -> Result { + let base_fee = self.as_ref().provider().as_ref().get_gas_price().await?; + let from_address = self.as_ref().provider().as_ref().get_accounts().await?[0]; let gas = self .as_ref() .updateState(program_output.clone(), onchain_data_hash, onchain_data_size) .from(from_address) .estimate_gas() - .await - .unwrap(); + .await?; let builder = self.as_ref().updateState(program_output, onchain_data_hash, onchain_data_size); - builder.from(from_address).nonce(2).gas(gas).gas_price(base_fee).send().await.unwrap().get_receipt().await + builder + .from(from_address) + .nonce(2) + .gas(gas) + .gas_price(base_fee) + .send() + .await? + .get_receipt() + .await + .map_err(StarknetValidityContractError::RpcError) } async fn update_state_kzg( &self, program_output: Vec, kzg_proof: [u8; 48], - ) -> Result> { + ) -> Result { let base_fee = self.as_ref().provider().as_ref().get_gas_price().await?; let from_address = self.as_ref().provider().as_ref().get_accounts().await?[0]; let proof_vec = vec![Bytes::from(kzg_proof.to_vec())]; @@ -126,9 +142,17 @@ where .updateStateKzgDA(program_output.clone(), proof_vec.clone()) .from(from_address) .estimate_gas() - .await - .unwrap(); + .await?; let builder = self.as_ref().updateStateKzgDA(program_output, proof_vec); - builder.from(from_address).nonce(2).gas(gas).gas_price(base_fee).send().await.unwrap().get_receipt().await + builder + .from(from_address) + .nonce(2) + .gas(gas) + .gas_price(base_fee) + .send() + .await? + .get_receipt() + .await + .map_err(StarknetValidityContractError::RpcError) } } diff --git a/crates/settlement-clients/starknet/Cargo.toml b/crates/settlement-clients/starknet/Cargo.toml index e06dd646..861df735 100644 --- a/crates/settlement-clients/starknet/Cargo.toml +++ b/crates/settlement-clients/starknet/Cargo.toml @@ -4,7 +4,6 @@ version.workspace = true edition.workspace = true [dependencies] -anyhow = "1.0.89" appchain-core-contract-client = { workspace = true } async-trait = { workspace = true } c-kzg = { workspace = true } diff --git a/crates/settlement-clients/starknet/src/conversion.rs b/crates/settlement-clients/starknet/src/conversion.rs index f0bdd5b6..1b0f12ae 100644 --- a/crates/settlement-clients/starknet/src/conversion.rs +++ b/crates/settlement-clients/starknet/src/conversion.rs @@ -17,14 +17,14 @@ pub(crate) fn u64_from_felt(number: Felt) -> Result { return Err(color_eyre::Report::msg("byte should be zero, cannot convert to Felt")); } } - Ok(u64::from_be_bytes(bytes[24..32].try_into().unwrap())) + Ok(u64::from_be_bytes(bytes[24..32].try_into()?)) } #[test] fn test_u64_from_from_felt_ok() { let number = 10.into(); let converted = u64_from_felt(number); - assert!(converted.unwrap() == 10u64, "Should be able to convert"); + assert!(converted.expect("Failed to convert to u64") == 10u64, "Incorrect value conversion"); } #[test] diff --git a/crates/settlement-clients/starknet/src/lib.rs b/crates/settlement-clients/starknet/src/lib.rs index b2c8cc99..d220c105 100644 --- a/crates/settlement-clients/starknet/src/lib.rs +++ b/crates/settlement-clients/starknet/src/lib.rs @@ -66,7 +66,7 @@ impl StarknetSettlementClient { provider.clone(), signer.clone(), signer_address, - provider.chain_id().await.unwrap(), + provider.chain_id().await.expect("Failed to get chain id"), ExecutionEncoding::New, )); @@ -163,7 +163,7 @@ impl SettlementClient for StarknetSettlementClient { Ok(SettlementVerificationStatus::Rejected(format!( "Transaction {} has been reverted: {}", tx_hash, - execution_result.revert_reason().unwrap() + execution_result.revert_reason().unwrap_or_default() ))) } TransactionExecutionStatus::Succeeded => { diff --git a/crates/settlement-clients/starknet/src/tests/setup.rs b/crates/settlement-clients/starknet/src/tests/setup.rs index 84ec7ded..ae75a622 100644 --- a/crates/settlement-clients/starknet/src/tests/setup.rs +++ b/crates/settlement-clients/starknet/src/tests/setup.rs @@ -20,10 +20,10 @@ pub struct MadaraCmd { pub _port: MadaraPortNum, } -pub async fn wait_for_cond>>( +pub async fn wait_for_cond>>( mut cond: impl FnMut() -> F, duration: Duration, -) -> Result { +) -> color_eyre::Result { let mut attempt = 0; loop { let err = match cond().await { @@ -49,12 +49,14 @@ impl MadaraCmd { } pub async fn wait_for_ready(&mut self) -> &mut Self { - let endpoint = self.rpc_url.join("/health").unwrap(); + // We are fine with `expect` here as this function is called in the intial phases of the + // program execution + let endpoint = self.rpc_url.join("/health").expect("Request to health endpoint failed"); wait_for_cond( || async { let res = reqwest::get(endpoint.clone()).await?; res.error_for_status()?; - anyhow::Ok(true) + Ok(true) }, Duration::from_millis(1000), ) @@ -71,12 +73,20 @@ impl Drop for MadaraCmd { let kill = || { let mut kill = Command::new("kill").args(["-s", "TERM", &child.id().to_string()]).spawn()?; kill.wait()?; - anyhow::Ok(()) + Ok::<_, color_eyre::Report>(()) }; if let Err(_err) = kill() { - child.kill().unwrap() + match child.kill() { + Ok(kill) => kill, + Err(e) => { + log::error!("{}", format!("Failed to kill Madara {:?}", e)); + } + } + } + match child.wait() { + Ok(exit_status) => log::debug!("{}", exit_status), + Err(e) => log::error!("failed to exit madara {:?}", e), } - child.wait().unwrap(); } } diff --git a/crates/settlement-clients/starknet/src/tests/test.rs b/crates/settlement-clients/starknet/src/tests/test.rs index e68f3d11..f5eedad2 100644 --- a/crates/settlement-clients/starknet/src/tests/test.rs +++ b/crates/settlement-clients/starknet/src/tests/test.rs @@ -3,6 +3,7 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; +use color_eyre::eyre::eyre; use rstest::{fixture, rstest}; use settlement_client_interface::SettlementClient; use starknet::accounts::{Account, ConnectedAccount, ExecutionEncoding, SingleOwnerAccount}; @@ -46,13 +47,13 @@ async fn wait_for_tx(account: &LocalWalletSignerMiddleware, transaction_hash: Fe let receipt = match account.provider().get_transaction_status(transaction_hash).await { Ok(receipt) => Ok(receipt), Err(ProviderError::StarknetError(StarknetError::TransactionHashNotFound)) => { - Err(anyhow::anyhow!("Transaction not yet received")) + Err(eyre!("Transaction not yet received")) } - _ => Err(anyhow::anyhow!("Unknown error")), + _ => Err(eyre!("Unknown error")), }; match receipt { - Ok(TransactionStatus::Received) => Err(anyhow::anyhow!("Transaction not yet received")), + Ok(TransactionStatus::Received) => Err(eyre!("Transaction not yet received")), Ok(TransactionStatus::Rejected) => Ok(false), Ok(TransactionStatus::AcceptedOnL2(status)) => match status { TransactionExecutionStatus::Succeeded => Ok(true), @@ -62,7 +63,7 @@ async fn wait_for_tx(account: &LocalWalletSignerMiddleware, transaction_hash: Fe TransactionExecutionStatus::Succeeded => Ok(true), TransactionExecutionStatus::Reverted => Ok(false), }, - Err(e) => Err(anyhow::anyhow!("Unknown error: {}", e)), + Err(e) => Err(eyre!("Unknown error: {}", e)), } }, duration,